diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..70f9eae --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[registries.crates-io] +protocol = "sparse" diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..2957cfd --- /dev/null +++ b/.dockerignore @@ -0,0 +1,6 @@ +target/ +.git +README.md +TODO.md +manual_test.nu +config.toml diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..943896d --- /dev/null +++ b/.env.example @@ -0,0 +1 @@ +RUST_LOG=info,tx_sitter=debug,fake_rpc=debug,tower_http=debug diff --git a/.github/workflows/build-image-and-publish.yml b/.github/workflows/build-image-and-publish.yml new file mode 100644 index 0000000..0fb4553 --- /dev/null +++ b/.github/workflows/build-image-and-publish.yml @@ -0,0 +1,53 @@ +name: Build and Publish Docker Image + +on: + push: + branches: [ main, dev ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build-and-push: + runs-on: ubuntu-latest + steps: + - name: Check Out Repo + uses: actions/checkout@v4 + + - name: Log in to Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha + type=raw,value=latest + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + annotations: ${{ steps.meta.outputs.annotations }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..65c0030 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,33 @@ +name: "Run tests" +on: + push: + pull_request: + +jobs: + test: + name: cargo test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - name: Install latest nextest release + uses: taiki-e/install-action@nextest + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly + - run: cargo nextest run --workspace + env: + RUST_LOG: info,tx-sitter=debug + - run: cargo clippy --workspace --tests + + formatting: + name: cargo fmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + components: rustfmt + - name: Rustfmt Check + uses: actions-rust-lang/rustfmt@v1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5f32e70 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +target/ +.env \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 984c18f..ace9c7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -206,9 +206,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aws-config" -version = "0.57.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2bf00cb9416daab4ce4927c54ebe63c08b9caf4d7b9314b6d7a4a2c5a1afb09" +checksum = "80c950a809d39bc9480207cb1cfc879ace88ea7e3a4392a8e9999e45d6e5692e" dependencies = [ "aws-credential-types", "aws-http", @@ -237,9 +237,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "0.57.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9073c88dbf12f68ce7d0e149f989627a1d1ae3d2b680459f04ccc29d1cbd0f" +checksum = "8c1317e1a3514b103cf7d5828bbab3b4d30f56bd22d684f8568bc51b6cfbbb1c" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -249,11 +249,10 @@ dependencies = [ [[package]] name = "aws-http" -version = "0.57.2" +version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24067106d09620cf02d088166cdaedeaca7146d4d499c41b37accecbea11b246" +checksum = "361c4310fdce94328cc2d1ca0c8a48c13f43009c61d3367585685a50ca8c66b6" dependencies = [ - "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", @@ -266,9 +265,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "0.57.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6ee0152c06d073602236a4e94a8c52a327d310c1ecd596570ce795af8777ff" +checksum = "1ed7ef604a15fd0d4d9e43701295161ea6b504b63c44990ead352afea2bc15e9" dependencies = [ "aws-credential-types", "aws-http", @@ -287,9 +286,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "0.36.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "674c06944cbef8df0c5ab43226f85ac28a15e6f4498d74aa200e00588b9b75e4" +checksum = "e5a1b6e2e95bc32f3b88d00de3b48156fbece5d8112dc76d975b9c2d2837dc8d" dependencies = [ "aws-credential-types", "aws-http", @@ -309,9 +308,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "0.36.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb8158015232b4596ccef74a205600398e152d704b40b7ec9f486092474d7fa" +checksum = "0619ab97a5ca8982e7de073cdc66f93e5f6a1b05afc09e696bec1cb3607cd4df" dependencies = [ "aws-credential-types", "aws-http", @@ -331,9 +330,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "0.36.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a1493e1c57f173e53621935bfb5b6217376168dbdb4cd459aebcf645924a48" +checksum = "f04b9f5474cc0f35d829510b2ec8c21e352309b46bf9633c5a81fb9321e9b1c7" dependencies = [ "aws-credential-types", "aws-http", @@ -353,9 +352,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "0.36.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e032b77f5cd1dd3669d777a38ac08cbf8ec68e29460d4ef5d3e50cffa74ec75a" +checksum = "798c8d82203af9e15a8b406574e0b36da91dd6db533028b74676489a1bc8bc7d" dependencies = [ "aws-credential-types", "aws-http", @@ -376,13 +375,14 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "0.57.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f81a6abc4daab06b53cabf27c54189928893283093e37164ca53aa47488a5b" +checksum = "380adcc8134ad8bbdfeb2ace7626a869914ee266322965276cbc54066186d236" dependencies = [ "aws-credential-types", "aws-smithy-http", "aws-smithy-runtime-api", + "aws-smithy-types", "bytes", "form_urlencoded", "hex", @@ -398,9 +398,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "0.57.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe53fccd3b10414b9cae63767a15a2789b34e6c6727b6e32b33e8c7998a3e80" +checksum = "3e37ca17d25fe1e210b6d4bdf59b81caebfe99f986201a1228cb5061233b4b13" dependencies = [ "futures-util", "pin-project-lite", @@ -409,9 +409,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.57.2" +version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7972373213d1d6e619c0edc9dda2d6634154e4ed75c5e0b2bf065cd5ec9f0d1" +checksum = "5b1de8aee22f67de467b2e3d0dd0fb30859dc53f579a63bd5381766b987db644" dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", @@ -429,18 +429,18 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.57.2" +version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d64d5af16dd585de9ff6c606423c1aaad47c6baa38de41c2beb32ef21c6645" +checksum = "6a46dd338dc9576d6a6a5b5a19bd678dcad018ececee11cf28ecd7588bd1a55c" dependencies = [ "aws-smithy-types", ] [[package]] name = "aws-smithy-query" -version = "0.57.2" +version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7527bf5335154ba1b285479c50b630e44e93d1b4a759eaceb8d0bf9fbc82caa5" +checksum = "feb5b8c7a86d4b6399169670723b7e6f21a39fc833a30f5c5a2f997608178129" dependencies = [ "aws-smithy-types", "urlencoding", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "0.57.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "839b363adf3b2bdab2742a1f540fec23039ea8bc9ec0f9f61df48470cfe5527b" +checksum = "273479291efc55e7b0bce985b139d86b6031adb8e50f65c1f712f20ba38f6388" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -458,6 +458,7 @@ dependencies = [ "aws-smithy-types", "bytes", "fastrand", + "h2", "http", "http-body", "hyper", @@ -472,9 +473,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "0.57.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f24ecc446e62c3924539e7c18dec8038dba4fdf8718d5c2de62f9d2fecca8ba9" +checksum = "c6cebff0d977b6b6feed2fd07db52aac58ba3ccaf26cdd49f1af4add5061bef9" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -488,9 +489,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "0.57.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "051de910296522a21178a2ea402ea59027eef4b63f1cef04a0be2bb5e25dea03" +checksum = "d7f48b3f27ddb40ab19892a5abda331f403e3cb877965e4e51171447807104af" dependencies = [ "base64-simd", "bytes", @@ -511,18 +512,18 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.57.2" +version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb1e3ac22c652662096c8e37a6f9af80c6f3520cab5610b2fe76c725bce18eac" +checksum = "0ec40d74a67fd395bc3f6b4ccbdf1543672622d905ef3f979689aea5b730cb95" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "0.57.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048bbf1c24cdf4eb1efcdc243388a93a90ebf63979e25fc1c7b8cbd9cb6beb38" +checksum = "8403fc56b1f3761e8efe45771ddc1165e47ec3417c68e68a4519b5cb030159ca" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -638,6 +639,30 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +[[package]] +name = "bigdecimal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "bigdecimal" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06619be423ea5bb86c95f087d5707942791a08a85530df0db2209a3ecfb8bc9" +dependencies = [ + "autocfg", + "libm", + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -757,6 +782,15 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "cadence" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab51a759f502097abe855100b81b421d3a104b62a2c3209f751d90ce6dd2ea1" +dependencies = [ + "crossbeam-channel", +] + [[package]] name = "camino" version = "1.1.6" @@ -1020,6 +1054,16 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.3" @@ -1569,6 +1613,7 @@ dependencies = [ "const-hex", "enr", "ethers-core", + "futures-channel", "futures-core", "futures-timer", "futures-util", @@ -1660,43 +1705,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "fake-rpc" -version = "0.1.0" -dependencies = [ - "async-trait", - "axum", - "chrono", - "clap", - "config", - "dotenv", - "ethers", - "ethers-signers", - "eyre", - "futures", - "headers", - "hex", - "hex-literal", - "humantime", - "humantime-serde", - "hyper", - "rand", - "reqwest", - "serde", - "serde_json", - "sha3", - "spki", - "sqlx", - "strum", - "thiserror", - "tokio", - "toml 0.8.8", - "tower-http", - "tracing", - "tracing-subscriber", - "uuid 0.8.2", -] - [[package]] name = "fastrand" version = "2.0.1" @@ -1764,6 +1772,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.0" @@ -1987,7 +2010,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 2.1.0", "slab", "tokio", "tokio-util", @@ -2003,6 +2026,15 @@ dependencies = [ "ahash 0.7.7", ] +[[package]] +name = "hashbrown" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" +dependencies = [ + "ahash 0.8.6", +] + [[package]] name = "hashbrown" version = "0.14.2" @@ -2205,6 +2237,19 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" version = "0.1.58" @@ -2282,6 +2327,16 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + [[package]] name = "indexmap" version = "2.1.0" @@ -2351,6 +2406,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -2562,6 +2626,61 @@ dependencies = [ "autocfg", ] +[[package]] +name = "metrics" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd71d9db2e4287c3407fa04378b8c2ee570aebe0854431562cdd89ca091854f4" +dependencies = [ + "ahash 0.8.6", + "portable-atomic", +] + +[[package]] +name = "metrics-exporter-prometheus" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a4c4718a371ddfb7806378f23617876eea8b82e5ff1324516bcd283249d9ea" +dependencies = [ + "base64 0.21.5", + "hyper", + "hyper-tls", + "indexmap 1.9.3", + "ipnet", + "metrics", + "metrics-util", + "quanta", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "metrics-exporter-statsd" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82bd7bb16e431f15d56a61b18ee34881cd9d427da7b4450d1a588c911c1d9ac3" +dependencies = [ + "cadence", + "metrics", + "thiserror", +] + +[[package]] +name = "metrics-util" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2670b8badcc285d486261e2e9f1615b506baff91427b61bd336a472b65bbf5ed" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown 0.13.1", + "metrics", + "num_cpus", + "quanta", + "sketches-ddsketch", +] + [[package]] name = "mime" version = "0.3.17" @@ -2594,6 +2713,24 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "new_debug_unreachable" version = "1.0.4" @@ -2750,18 +2887,146 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "openssl" +version = "0.10.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" +dependencies = [ + "futures-core", + "futures-sink", + "indexmap 2.1.0", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry-datadog" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e09667367cb509f10d7cf5960a83f9c4d96e93715f750b164b4b98d46c3cbf4" +dependencies = [ + "futures-core", + "http", + "indexmap 2.1.0", + "itertools 0.11.0", + "once_cell", + "opentelemetry", + "opentelemetry-http", + "opentelemetry-semantic-conventions", + "opentelemetry_sdk", + "reqwest", + "rmp", + "thiserror", + "url", +] + +[[package]] +name = "opentelemetry-http" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f51189ce8be654f9b5f7e70e49967ed894e84a06fc35c6c042e64ac1fc5399e" +dependencies = [ + "async-trait", + "bytes", + "http", + "opentelemetry", + "reqwest", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5774f1ef1f982ef2a447f6ee04ec383981a3ab99c8e77a1a7b30182e65bbc84" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f16aec8a98a457a52664d69e0091bac3a0abd18ead9b641cb00202ba4e0efe4" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry", + "ordered-float", + "percent-encoding", + "rand", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "option-ext" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "ordered-float" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" +dependencies = [ + "num-traits", +] + [[package]] name = "ordered-multimap" version = "0.4.3" @@ -2960,7 +3225,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 2.1.0", ] [[package]] @@ -3083,6 +3348,12 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + [[package]] name = "postgres-docker-utils" version = "0.1.0" @@ -3202,6 +3473,21 @@ dependencies = [ "unarray", ] +[[package]] +name = "quanta" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + [[package]] name = "quote" version = "1.0.33" @@ -3256,6 +3542,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "raw-cpuid" +version = "11.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" +dependencies = [ + "bitflags 2.4.1", +] + [[package]] name = "rayon" version = "1.8.0" @@ -3348,9 +3643,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ "base64 0.21.5", "bytes", @@ -3362,10 +3657,12 @@ dependencies = [ "http-body", "hyper", "hyper-rustls", + "hyper-tls", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -3374,8 +3671,10 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", + "tokio-native-tls", "tokio-rustls", "tower-service", "url", @@ -3456,6 +3755,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rmp" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" +dependencies = [ + "byteorder", + "num-traits", + "paste", +] + [[package]] name = "ron" version = "0.7.1" @@ -3785,52 +4095,6 @@ dependencies = [ "serde", ] -[[package]] -name = "service" -version = "0.1.0" -dependencies = [ - "async-trait", - "aws-config", - "aws-credential-types", - "aws-sdk-kms", - "aws-smithy-types", - "aws-types", - "axum", - "chrono", - "clap", - "config", - "dotenv", - "ethers", - "ethers-signers", - "eyre", - "fake-rpc", - "futures", - "headers", - "hex", - "hex-literal", - "humantime", - "humantime-serde", - "hyper", - "indoc", - "postgres-docker-utils", - "rand", - "reqwest", - "serde", - "serde_json", - "sha3", - "spki", - "sqlx", - "strum", - "test-case", - "thiserror", - "tokio", - "toml 0.8.8", - "tower-http", - "tracing", - "tracing-subscriber", - "uuid 0.8.2", -] - [[package]] name = "sha1" version = "0.10.6" @@ -3909,6 +4173,12 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +[[package]] +name = "sketches-ddsketch" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" + [[package]] name = "slab" version = "0.4.9" @@ -4015,8 +4285,10 @@ checksum = "8d6753e460c998bbd4cd8c6f0ed9a64346fcca0723d6e75e52fdc351c5d2169d" dependencies = [ "ahash 0.8.6", "atoi", + "bigdecimal 0.3.1", "byteorder", "bytes", + "chrono", "crc", "crossbeam-queue", "dotenvy", @@ -4029,7 +4301,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap", + "indexmap 2.1.0", "log", "memchr", "once_cell", @@ -4043,6 +4315,7 @@ dependencies = [ "smallvec", "sqlformat", "thiserror", + "time", "tokio", "tokio-stream", "tracing", @@ -4097,9 +4370,11 @@ checksum = "864b869fdf56263f4c95c45483191ea0af340f9f3e3e7b4d57a61c7c87a970db" dependencies = [ "atoi", "base64 0.21.5", + "bigdecimal 0.3.1", "bitflags 2.4.1", "byteorder", "bytes", + "chrono", "crc", "digest", "dotenvy", @@ -4127,6 +4402,7 @@ dependencies = [ "sqlx-core", "stringprep", "thiserror", + "time", "tracing", "whoami", ] @@ -4139,8 +4415,10 @@ checksum = "eb7ae0e6a97fb3ba33b23ac2671a5ce6e3cabe003f451abd5a56e7951d975624" dependencies = [ "atoi", "base64 0.21.5", + "bigdecimal 0.3.1", "bitflags 2.4.1", "byteorder", + "chrono", "crc", "dotenvy", "etcetera", @@ -4156,6 +4434,7 @@ dependencies = [ "log", "md-5", "memchr", + "num-bigint", "once_cell", "rand", "serde", @@ -4166,6 +4445,7 @@ dependencies = [ "sqlx-core", "stringprep", "thiserror", + "time", "tracing", "whoami", ] @@ -4177,6 +4457,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d59dc83cf45d89c555a577694534fcd1b55c545a816c816ce51f20bbe56a4f3f" dependencies = [ "atoi", + "chrono", "flume", "futures-channel", "futures-core", @@ -4188,6 +4469,7 @@ dependencies = [ "percent-encoding", "serde", "sqlx-core", + "time", "tracing", "url", ] @@ -4331,6 +4613,33 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "telemetry-batteries" +version = "0.1.0" +source = "git+https://github.com/worldcoin/telemetry-batteries?rev=e0891328b29d9f85df037633feccca2f74a291a6#e0891328b29d9f85df037633feccca2f74a291a6" +dependencies = [ + "chrono", + "dirs", + "http", + "metrics", + "metrics-exporter-prometheus", + "metrics-exporter-statsd", + "opentelemetry", + "opentelemetry-datadog", + "opentelemetry-http", + "opentelemetry_sdk", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-appender", + "tracing-opentelemetry", + "tracing-serde", + "tracing-subscriber", +] + [[package]] name = "tempfile" version = "3.8.1" @@ -4503,6 +4812,16 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -4589,7 +4908,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap", + "indexmap 2.1.0", "toml_datetime", "winnow", ] @@ -4600,7 +4919,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap", + "indexmap 2.1.0", "toml_datetime", "winnow", ] @@ -4611,7 +4930,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", @@ -4640,6 +4959,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ + "base64 0.21.5", "bitflags 2.4.1", "bytes", "futures-core", @@ -4647,6 +4967,7 @@ dependencies = [ "http", "http-body", "http-range-header", + "mime", "pin-project-lite", "tower-layer", "tower-service", @@ -4677,6 +4998,18 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror", + "time", + "tracing-subscriber", +] + [[package]] name = "tracing-attributes" version = "0.1.27" @@ -4708,6 +5041,35 @@ dependencies = [ "tracing", ] +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c67ac25c5407e7b961fafc6f7e9aa5958fd297aada2d20fa2ae1737357e55596" +dependencies = [ + "js-sys", + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", + "web-time", +] + [[package]] name = "tracing-serde" version = "0.1.3" @@ -4731,9 +5093,11 @@ dependencies = [ "serde", "serde_json", "sharded-slab", + "smallvec", "thread_local", "tracing", "tracing-core", + "tracing-log", "tracing-serde", ] @@ -4763,6 +5127,58 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tx-sitter" +version = "0.1.0" +dependencies = [ + "async-trait", + "aws-config", + "aws-credential-types", + "aws-sdk-kms", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "axum", + "base64 0.21.5", + "bigdecimal 0.4.2", + "chrono", + "clap", + "config", + "dotenv", + "ethers", + "eyre", + "futures", + "headers", + "hex", + "hex-literal", + "humantime", + "humantime-serde", + "hyper", + "indoc", + "itertools 0.12.0", + "metrics", + "num-bigint", + "postgres-docker-utils", + "rand", + "reqwest", + "serde", + "serde_json", + "sha3", + "spki", + "sqlx", + "strum", + "telemetry-batteries", + "test-case", + "thiserror", + "tokio", + "toml 0.8.8", + "tower-http", + "tracing", + "tracing-subscriber", + "url", + "uuid 0.8.2", +] + [[package]] name = "typenum" version = "1.17.0" @@ -5014,6 +5430,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa30049b1c872b72c89866d458eae9f20380ab280ffd1b1e18df2d3e2d98cfe0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.24.0" diff --git a/Cargo.toml b/Cargo.toml index ffcd4c4..0ab0993 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,43 +1,72 @@ [package] -name = "service" +name = "tx-sitter" version = "0.1.0" edition = "2021" +default-run = "tx-sitter" [workspace] members = ["crates/*"] [dependencies] -# Third Party ## AWS -aws-config = { version = "0.57.2" } -aws-sdk-kms = "0.36.0" -aws-smithy-types = "0.57.2" -aws-types = "0.57.2" -aws-credential-types = { version = "0.57.2", features = [ +aws-config = { version = "1.0.1" } +aws-credential-types = { version = "1.0.1", features = [ "hardcoded-credentials", ] } +aws-sdk-kms = "1.3.0" +aws-smithy-runtime-api = "1.0.2" +aws-smithy-types = "1.0.2" +aws-types = "1.0.1" -## Other -serde = "1.0.136" +# Internal +postgres-docker-utils = { path = "crates/postgres-docker-utils" } + +# Company +telemetry-batteries = { git = "https://github.com/worldcoin/telemetry-batteries", rev = "e0891328b29d9f85df037633feccca2f74a291a6" } + +## External +async-trait = "0.1.74" axum = { version = "0.6.20", features = ["headers"] } -thiserror = "1.0.50" -headers = "0.3.9" -humantime = "2.1.0" -humantime-serde = "1.1.1" -hyper = "0.14.27" -dotenv = "0.15.0" +base64 = "0.21.5" +bigdecimal = "0.4.2" +chrono = "0.4" clap = { version = "4.3.0", features = ["env", "derive"] } -ethers = { version = "2.0.11" } -ethers-signers = { version = "2.0.11" } +config = "0.13.3" +dotenv = "0.15.0" +ethers = { version = "2.0.11", features = ["ws"] } eyre = "0.6.5" +futures = "0.3" +headers = "0.3.9" hex = "0.4.3" hex-literal = "0.4.1" -reqwest = { version = "0.11.13", default-features = false, features = [ +humantime = "2.1.0" +humantime-serde = "1.1.1" +hyper = "0.14.27" +itertools = "0.12.0" +metrics = "0.22.1" +num-bigint = "0.4.4" +rand = "0.8.5" +reqwest = { version = "0.11.24", default-features = false, features = [ "rustls-tls", ] } +serde = "1.0.136" serde_json = "1.0.91" +sha3 = "0.10.8" +spki = "0.7.2" +sqlx = { version = "0.7.2", features = [ + "time", + "chrono", + "runtime-tokio", + "tls-rustls", + "postgres", + "migrate", + "bigdecimal", +] } strum = { version = "0.25.0", features = ["derive"] } +thiserror = "1.0.50" tokio = { version = "1", features = ["macros", "rt-multi-thread"] } +toml = "0.8.8" +tower-http = { version = "0.4.4", features = ["trace", "auth"] } tracing = { version = "0.1", features = ["log"] } tracing-subscriber = { version = "0.3", default-features = false, features = [ "env-filter", @@ -46,31 +75,16 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ "json", "ansi", ] } -tower-http = { version = "0.4.4", features = ["trace"] } +url = "2.4.1" uuid = { version = "0.8", features = ["v4"] } -futures = "0.3" -chrono = "0.4" -rand = "0.8.5" -sha3 = "0.10.8" -config = "0.13.3" -toml = "0.8.8" -sqlx = { version = "0.7.2", features = [ - "runtime-tokio", - "tls-rustls", - "postgres", - "migrate", -] } -spki = "0.7.2" -async-trait = "0.1.74" - -# Internal -postgres-docker-utils = { path = "crates/postgres-docker-utils" } [dev-dependencies] -test-case = "3.1.0" indoc = "2.0.3" -fake-rpc = { path = "crates/fake-rpc" } +test-case = "3.1.0" + +[features] +default = ["default-config"] +default-config = [] -[[bin]] -name = "bootstrap" -path = "src/main.rs" +[profile.release] +panic = "abort" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..add7e48 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,58 @@ +FROM debian:12 as build-env + +WORKDIR /src + +# Install dependencies +RUN apt-get update && \ + apt-get install -y curl build-essential libssl-dev texinfo libcap2-bin pkg-config + +# TODO: Use a specific version of rustup +# Install rustup +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y + +# Copy only rust-toolchain.toml for better caching +COPY ./rust-toolchain.toml ./rust-toolchain.toml + +# Set environment variables +ENV PATH="/root/.cargo/bin:${PATH}" +ENV RUSTUP_HOME="/root/.rustup" +ENV CARGO_HOME="/root/.cargo" + +# Install the toolchain +RUN rustup component add cargo + +# TODO: Hacky but it works +RUN mkdir -p ./src +RUN mkdir -p ./crates/postgres-docker-utils/src + +# Copy only Cargo.toml for better caching +COPY .cargo/config.toml .cargo/config.toml +COPY ./Cargo.toml ./Cargo.toml +COPY ./Cargo.lock ./Cargo.lock +COPY ./crates/postgres-docker-utils/Cargo.toml ./crates/postgres-docker-utils/Cargo.toml + +RUN echo "fn main() {}" > ./src/main.rs +RUN echo "fn main() {}" > ./crates/postgres-docker-utils/src/main.rs + +# Prebuild dependencies +RUN cargo fetch +RUN cargo build --release --no-default-features + +# Copy all the source files +# .dockerignore ignores the target dir +COPY . . + +# Build the binary +RUN cargo fetch +RUN cargo build --release --no-default-features + +# Make sure it runs +RUN /src/target/release/tx-sitter --version + +# cc variant because we need libgcc and others +FROM gcr.io/distroless/cc-debian12:nonroot + +# Copy the tx-sitter binary +COPY --from=build-env --chown=0:10001 --chmod=010 /src/target/release/tx-sitter /bin/tx-sitter + +ENTRYPOINT [ "/bin/tx-sitter" ] diff --git a/README.md b/README.md index 27ceead..960855d 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,36 @@ A monolithized version of the [tx-sitter](https://github.com/worldcoin/tx-sitter-aws/). -## Testing locally +## Configuration +The Tx Sitter can be configured in 2 ways: +1. Using the config file, refer to `config.rs` and `config.toml` for more info +2. Using env vars. Every field in the config can also be set via an env var. + For example the following config + ```toml + [service] + escalation_interval = "1m" + + [server] + host = "127.0.0.1:3000" + disable_auth = true + + [database] + connection_string = "postgres://postgres:postgres@127.0.0.1:5432/database" + + [keys] + kind = "local" + ``` + + Can also be expressed with env vars + ``` + TX_SITTER__SERVICE__ESCALATION_INTERVAL="1m" + TX_SITTER__SERVER__HOST="127.0.0.1:3000" + TX_SITTER__SERVER__DISABLE_AUTH="true" + TX_SITTER__DATABASE__CONNECTION_STRING="postgres://postgres:postgres@127.0.0.1:5432/database" + TX_SITTER__KEYS__KIND="local" + ``` +## Testing locally Copy `.env.example` to `.env` or set `RUST_LOG=info,service=debug` to have logging. 1. Spin up the database `docker run --rm -e POSTGRES_HOST_AUTH_METHOD=trust -p 5432:5432 postgres` @@ -15,6 +43,14 @@ This will use the `config.toml` configuration. If you have [nushell](https://www.nushell.sh/) installed, `nu manual_test.nu` can be run to execute a basic test. ## Running tests -While you obviously can run tests with `cargo test --workspace` some tests take quite a long time (due to spinning up an anvil node, sending txs, etc.). - -Therefore I recommend [cargo-nextest](https://nexte.st/) as it runs all the tests in parallel. Once installed `cargo nextest run --workspace` can be used instead. +While you obviously can run tests with +``` +cargo test --workspace +``` +some tests take quite a long time (due to spinning up an anvil node, sending txs, etc.). + +Therefore I recommend [cargo-nextest](https://nexte.st/) as it runs all the tests in parallel. Once installed +``` +cargo nextest run --workspace +``` +can be used instead. diff --git a/TODO.md b/TODO.md index 2258c9d..16f0eeb 100644 --- a/TODO.md +++ b/TODO.md @@ -1,15 +1,30 @@ # TODO - -1. [ ] Handling reorgs -2. [ ] Per network settings (i.e. max inflight txs, max gas price) -3. [ ] Multiple RPCs support -4. [ ] Cross-network dependencies (i.e. Optimism depends on L1 gas cost) -5. [ ] Transaction priority -6. [ ] Metrics -7. [ ] Tracing (add telemetry-batteries) -8. [ ] Automated testing +1. [ ] Per network settings (max blocks age/number - for pruning) +4. [ ] Multiple RPCs support +5. [ ] Telemtry (add telemetry-batteries) + 1. [ ] Metrics + 2. [ ] Tracing +6. [ ] Automated testing 1. [x] Basic 2. [ ] Basic with contracts 3. [ ] Escalation testing 4. [ ] Reorg testing (how?!?) -9. [ ] Parallelization in a few places +7. [ ] Plug block holes - we can periodically fetch the list of known blocks for a given chain and find and fetch any missing ones from the RPC +8. [ ] Find missing txs - sometimes a transaction can be sent but not saved in the DB. On every block we should fetch all the txs (not just hashes) and find txs coming from our relayer addresses. This way we can find missing transactions. +9. [ ] Smarter broadcast error handling - we shouldn't constantly attempt to broadcast the same tx if it's failing (e.g. because relayer address is out of funds). + +# IN PROGRESS + +# DONE +2. [x] Parallelization: + 1. [x] Parallelize block indexing - depends on per network settings + 2. [x] Parallelize nonce updating + 3. [x] Parallelize broadcast per ~chain id~ relayer id +3. [x] No need to insert all block txs into DB +4. [x] Prune block info +5. [x] RPC Access +6. [x] Cross-network dependencies (i.e. Optimism depends on L1 gas cost) +7. [x] Transaction priority +8. [x] Handling reorgs +9. [x] Per relayer settings (i.e. max inflight txs, max gas price) +10. [x] Authentication diff --git a/compose.yml b/compose.yml new file mode 100644 index 0000000..7db4896 --- /dev/null +++ b/compose.yml @@ -0,0 +1,44 @@ +version: '3' +services: + tx-sitter: + build: + context: . + dockerfile: Dockerfile + depends_on: + - db + - blockchain + restart: always + ports: + - "3000:3000" + environment: + - TX_SITTER__SERVICE__ESCALATION_INTERVAL=1m + - TX_SITTER__DATABASE__KIND=connection_string + - TX_SITTER__DATABASE__CONNECTION_STRING=postgres://postgres:postgres@db:5432/tx-sitter?sslmode=disable + - TX_SITTER__KEYS__KIND=local + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__CHAIN_ID=31337 + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__NAME=Anvil + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__HTTP_RPC=http://blockchain:8545 + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__WS_RPC=ws://blockchain:8545 + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__ID=1b908a34-5dc1-4d2d-a146-5eb46e975830 + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__NAME=Relayer + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__CHAIN_ID=31337 + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__KEY_ID=d10607662a85424f02a33fb1e6d095bd0ac7154396ff09762e41f82ff2233aaa + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__API_KEY=G5CKNF3BTS2hRl60bpdYMNPqXvXsP-QZd2lrtmgctsnllwU9D3Z4D8gOt04M0QNH + - TX_SITTER__SERVER__HOST=0.0.0.0:3000 + - TX_SITTER__SERVER__DISABLE_AUTH=true + - RUST_LOG=info + + db: + hostname: db + image: postgres + ports: + - "5432:5432" + environment: + POSTGRES_HOST_AUTH_METHOD: trust + + blockchain: + hostname: blockchain + image: ghcr.io/foundry-rs/foundry:latest + ports: + - "8545:8545" + command: ["anvil --block-time 2"] diff --git a/config.toml b/config.toml index e4ca7dc..a5c194f 100644 --- a/config.toml +++ b/config.toml @@ -1,20 +1,40 @@ [service] escalation_interval = "1m" +datadog_enabled = false +statsd_enabled = false + +[service.predefined.network] +chain_id = 31337 +name = "predefined" +http_rpc = "http://127.0.0.1:8545" +ws_rpc = "ws://127.0.0.1:8545" + +[service.predefined.relayer] +id = "1b908a34-5dc1-4d2d-a146-5eb46e975830" +name = "predefined" +chain_id = 31337 +key_id = "d10607662a85424f02a33fb1e6d095bd0ac7154396ff09762e41f82ff2233aaa" +api_key = "G5CKNF3BTS2hRl60bpdYMNPqXvXsP-QZd2lrtmgctsk=" [server] host = "127.0.0.1:3000" -disable_auth = true - -[rpc] -rpcs = ["http://127.0.0.1:8545"] +disable_auth = false [database] +kind = "connection_string" connection_string = "postgres://postgres:postgres@127.0.0.1:5432/database" +# [database] +# kind = "parts" +# host = "127.0.0.1" +# port = "5432" +# username = "postgres" +# password = "postgres" +# database = "database" + [keys] kind = "local" # Example KMS configuration # [keys] # kind = "kms" -# region = "us-east-1" diff --git a/crates/fake-rpc/Cargo.toml b/crates/fake-rpc/Cargo.toml deleted file mode 100644 index da94d7a..0000000 --- a/crates/fake-rpc/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "fake-rpc" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -serde = "1.0.136" -axum = { version = "0.6.20", features = ["headers"] } -thiserror = "1.0.50" -headers = "0.3.9" -humantime = "2.1.0" -humantime-serde = "1.1.1" -hyper = "0.14.27" -dotenv = "0.15.0" -clap = { version = "4.3.0", features = ["env", "derive"] } -ethers = { version = "2.0.11" } -ethers-signers = { version = "2.0.11" } -eyre = "0.6.5" -hex = "0.4.3" -hex-literal = "0.4.1" -reqwest = { version = "0.11.13", default-features = false, features = [ - "rustls-tls", -] } -serde_json = "1.0.91" -strum = { version = "0.25.0", features = ["derive"] } -tokio = { version = "1", features = ["macros", "rt-multi-thread"] } -tracing = { version = "0.1", features = ["log"] } -tracing-subscriber = { version = "0.3", default-features = false, features = [ - "env-filter", - "std", - "fmt", - "json", - "ansi", -] } -tower-http = { version = "0.4.4", features = ["trace"] } -uuid = { version = "0.8", features = ["v4"] } -futures = "0.3" -chrono = "0.4" -rand = "0.8.5" -sha3 = "0.10.8" -config = "0.13.3" -toml = "0.8.8" -sqlx = { version = "0.7.2", features = [ - "runtime-tokio", - "tls-rustls", - "postgres", - "migrate", -] } -spki = "0.7.2" -async-trait = "0.1.74" diff --git a/crates/fake-rpc/src/lib.rs b/crates/fake-rpc/src/lib.rs deleted file mode 100644 index 2a9dbc0..0000000 --- a/crates/fake-rpc/src/lib.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::net::{Ipv4Addr, SocketAddr}; -use std::sync::atomic::AtomicBool; -use std::sync::Arc; - -use axum::extract::State; -use axum::routing::{post, IntoMakeService}; -use axum::{Json, Router}; -use ethers::utils::{Anvil, AnvilInstance}; -use hyper::server::conn::AddrIncoming; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use tokio::sync::Mutex; - -pub struct DoubleAnvil { - main_anvil: Mutex, - reference_anvil: Mutex, - held_back_txs: Mutex>, - - auto_advance: AtomicBool, -} - -impl DoubleAnvil { - pub async fn drop_txs(&self) -> eyre::Result<()> { - let mut held_back_txs = self.held_back_txs.lock().await; - held_back_txs.clear(); - Ok(()) - } - - pub async fn advance(&self) -> eyre::Result<()> { - let mut held_back_txs = self.held_back_txs.lock().await; - - for req in held_back_txs.drain(..) { - tracing::info!(?req, "eth_sendRawTransaction"); - - let response = reqwest::Client::new() - .post(&self.main_anvil.lock().await.endpoint()) - .json(&req) - .send() - .await - .unwrap(); - - let resp = response.json::().await.unwrap(); - - tracing::info!(?resp, "eth_sendRawTransaction.response"); - } - - Ok(()) - } - - pub fn set_auto_advance(&self, auto_advance: bool) { - self.auto_advance - .store(auto_advance, std::sync::atomic::Ordering::SeqCst); - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -struct JsonRpcReq { - pub id: u64, - pub jsonrpc: String, - pub method: String, - #[serde(default)] - pub params: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct JsonRpcResponse { - pub id: u64, - pub jsonrpc: String, - pub result: Value, -} - -async fn advance(State(anvil): State>) { - anvil.advance().await.unwrap(); -} - -async fn rpc( - State(anvil): State>, - Json(req): Json, -) -> Json { - let method = req.method.as_str(); - let anvil_instance = match method { - "eth_sendRawTransaction" => { - anvil.held_back_txs.lock().await.push(req.clone()); - - if anvil.auto_advance.load(std::sync::atomic::Ordering::SeqCst) { - anvil.advance().await.unwrap(); - } - - anvil.reference_anvil.lock().await - } - "eth_getTransactionReceipt" => anvil.main_anvil.lock().await, - "eth_getTransactionByHash" => anvil.reference_anvil.lock().await, - _ => anvil.main_anvil.lock().await, - }; - - tracing::info!(?req, "{}", method); - - let response = reqwest::Client::new() - .post(&anvil_instance.endpoint()) - .json(&req) - .send() - .await - .unwrap(); - - let resp = response.json::().await.unwrap(); - - tracing::info!(?resp, "{}.response", method); - - Json(resp) -} - -pub async fn serve( - port: u16, -) -> ( - Arc, - axum::Server>, -) { - let main_anvil = Anvil::new().spawn(); - let reference_anvil = Anvil::new().spawn(); - - tracing::info!("Main anvil instance: {}", main_anvil.endpoint()); - tracing::info!("Reference anvil instance: {}", reference_anvil.endpoint()); - - let state = Arc::new(DoubleAnvil { - main_anvil: Mutex::new(main_anvil), - reference_anvil: Mutex::new(reference_anvil), - held_back_txs: Mutex::new(Vec::new()), - auto_advance: AtomicBool::new(true), - }); - - let router = Router::new() - .route("/", post(rpc)) - .route("/advance", post(advance)) - .with_state(state.clone()) - .layer(tower_http::trace::TraceLayer::new_for_http()); - - let host = Ipv4Addr::new(127, 0, 0, 1); - let socket_addr = SocketAddr::new(host.into(), port); - - let server = - axum::Server::bind(&socket_addr).serve(router.into_make_service()); - - (state, server) -} diff --git a/crates/fake-rpc/src/main.rs b/crates/fake-rpc/src/main.rs deleted file mode 100644 index a20b425..0000000 --- a/crates/fake-rpc/src/main.rs +++ /dev/null @@ -1,30 +0,0 @@ -use clap::Parser; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::EnvFilter; - -#[derive(Debug, Clone, Parser)] -struct Args { - #[clap(short, long, default_value = "8545")] - port: u16, -} - -#[tokio::main] -async fn main() -> eyre::Result<()> { - dotenv::dotenv().ok(); - - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().pretty().compact()) - .with(EnvFilter::from_default_env()) - .init(); - - let args = Args::parse(); - - let (_app, server) = fake_rpc::serve(args.port).await; - - tracing::info!("Serving fake RPC at {}", server.local_addr()); - - server.await?; - - Ok(()) -} diff --git a/db/migrations/001_init.sql b/db/migrations/001_init.sql index c043647..62a7209 100644 --- a/db/migrations/001_init.sql +++ b/db/migrations/001_init.sql @@ -1,88 +1,106 @@ -CREATE TYPE block_tx_status AS ENUM ( - 'pending', - 'mined', - 'finalized' -); +CREATE TYPE tx_status AS ENUM ('pending', 'mined', 'finalized'); + +CREATE TYPE rpc_kind AS ENUM ('http', 'ws'); + +CREATE TYPE transaction_priority AS ENUM ('slowest', 'slow', 'regular', 'fast', 'fastest'); --- create table networks ( --- id BIGSERIAL PRIMARY KEY, --- name VARCHAR(255) NOT NULL, --- chain_id BIGINT NOT NULL --- ); +CREATE TABLE networks ( + chain_id BIGINT PRIMARY KEY, + name VARCHAR(255) NOT NULL +); --- create table rpcs ( --- id BIGSERIAL PRIMARY KEY, --- network_id BIGINT NOT NULL REFERENCES networks(id), --- url VARCHAR(255) NOT NULL --- ); +CREATE TABLE rpcs ( + id BIGSERIAL PRIMARY KEY, + chain_id BIGINT NOT NULL REFERENCES networks(chain_id), + url VARCHAR(255) NOT NULL, + kind rpc_kind NOT NULL +); CREATE TABLE relayers ( - id VARCHAR(255) PRIMARY KEY, - name VARCHAR(255) NOT NULL, - chain_id BIGINT NOT NULL, - key_id VARCHAR(255) NOT NULL, - address BYTEA NOT NULL, + -- The relayer's ID is UUID v4 - always 36 characters (including 4 dashes) + id CHAR(36) PRIMARY KEY, + name VARCHAR(255) NOT NULL, + chain_id BIGINT NOT NULL, + key_id VARCHAR(255) NOT NULL, + address BYTEA NOT NULL, -- The local nonce value - nonce BIGINT NOT NULL, + nonce BIGINT NOT NULL DEFAULT 0, -- The confirmed nonce value - current_nonce BIGINT NOT NULL, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP -); + current_nonce BIGINT NOT NULL DEFAULT 0, --- CREATE TABLE relayer_deps ( --- relayer_id VARCHAR(255) NOT NULL REFERENCES relayers(id), --- ); + -- Settings + max_inflight_txs BIGINT NOT NULL DEFAULT 5, + -- e.g. [ { "chainId": 123, "value": "0x123"} ] + gas_limits JSON NOT NULL DEFAULT '[]', --- Constant tx data - once a tx is created, this data should never change -CREATE TABLE transactions ( - id VARCHAR(255) PRIMARY KEY, - tx_to BYTEA NOT NULL, - data BYTEA NOT NULL, - value BYTEA NOT NULL, - gas_limit BYTEA NOT NULL, - nonce BIGINT NOT NULL, - relayer_id VARCHAR(255) NOT NULL REFERENCES relayers(id) + -- Time keeping fields + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ); --- Dynamic tx data & data used for escalations -CREATE TABLE sent_transactions ( - tx_id VARCHAR(255) PRIMARY KEY REFERENCES transactions(id), - initial_max_fee_per_gas BYTEA NOT NULL, - initial_max_priority_fee_per_gas BYTEA NOT NULL, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - first_submitted_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - mined_at TIMESTAMP, - escalation_count BIGINT NOT NULL DEFAULT 0, - last_escalation TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - reorg BOOL NOT NULL DEFAULT FALSE +-- Constant tx data - once a tx is created, this data should never change +CREATE TABLE transactions ( + id VARCHAR(255) PRIMARY KEY, + tx_to BYTEA NOT NULL, + data BYTEA NOT NULL, + value BYTEA NOT NULL, + gas_limit BYTEA NOT NULL, + nonce BIGINT NOT NULL, + priority transaction_priority NOT NULL, + relayer_id CHAR(36) NOT NULL REFERENCES relayers(id) ); -- Sent transaction attempts CREATE TABLE tx_hashes ( - tx_hash BYTEA PRIMARY KEY, - tx_id VARCHAR(255) NOT NULL REFERENCES transactions(id), - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - max_fee_per_gas BYTEA NOT NULL, + tx_hash BYTEA PRIMARY KEY, + tx_id VARCHAR(255) NOT NULL REFERENCES transactions(id) ON DELETE CASCADE, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + max_fee_per_gas BYTEA NOT NULL, max_priority_fee_per_gas BYTEA NOT NULL, - escalated BOOL NOT NULL DEFAULT FALSE, - -- pending | mined | finalized - status block_tx_status NOT NULL DEFAULT 'pending' + escalated BOOL NOT NULL DEFAULT FALSE +); + +-- Dynamic tx data & data used for escalations +CREATE TABLE sent_transactions ( + tx_id VARCHAR(255) PRIMARY KEY REFERENCES transactions(id) ON DELETE CASCADE, + initial_max_fee_per_gas BYTEA NOT NULL, + initial_max_priority_fee_per_gas BYTEA NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + first_submitted_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + mined_at TIMESTAMP, + escalation_count BIGINT NOT NULL DEFAULT 0, + last_escalation TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + reorg BOOL NOT NULL DEFAULT FALSE, + status tx_status NOT NULL DEFAULT 'pending', + -- If the status is mined or finalized this should be the actual tx hash that is mined or finalized + valid_tx_hash BYTEA NOT NULL REFERENCES tx_hashes(tx_hash) ); CREATE TABLE blocks ( - id BIGSERIAL PRIMARY KEY, block_number BIGINT NOT NULL, - chain_id BIGINT NOT NULL, - -- mined | finalized - status block_tx_status NOT NULL, - fee_estimate JSON + chain_id BIGINT NOT NULL, + timestamp TIMESTAMPTZ NOT NULL, + PRIMARY KEY (block_number, chain_id) ); CREATE TABLE block_txs ( - block_id BIGINT REFERENCES blocks(id), - tx_hash BYTEA NOT NULL, - PRIMARY KEY (block_id, tx_hash) + block_number BIGINT NOT NULL, + chain_id BIGINT NOT NULL, + tx_hash BYTEA NOT NULL, + FOREIGN KEY (block_number, chain_id) REFERENCES blocks (block_number, chain_id) ON DELETE CASCADE ); +CREATE TABLE block_fees ( + block_number BIGINT NOT NULL, + chain_id BIGINT NOT NULL, + gas_price NUMERIC(78, 0) NOT NULL, + fee_estimate JSON NOT NULL, + PRIMARY KEY (block_number, chain_id), + FOREIGN KEY (block_number, chain_id) REFERENCES blocks (block_number, chain_id) ON DELETE CASCADE +); + +CREATE TABLE api_keys ( + relayer_id CHAR(36) NOT NULL REFERENCES relayers(id) ON DELETE CASCADE, + key_hash BYTEA NOT NULL +); diff --git a/db/migrations/002_relayers_table_update.sql b/db/migrations/002_relayers_table_update.sql new file mode 100644 index 0000000..24e3053 --- /dev/null +++ b/db/migrations/002_relayers_table_update.sql @@ -0,0 +1,5 @@ +ALTER TABLE relayers +RENAME COLUMN gas_limits TO gas_price_limits; + +ALTER TABLE relayers +ADD COLUMN enabled BOOL NOT NULL DEFAULT TRUE; diff --git a/db/migrations/003_relayers_tx_limits.sql b/db/migrations/003_relayers_tx_limits.sql new file mode 100644 index 0000000..2edf370 --- /dev/null +++ b/db/migrations/003_relayers_tx_limits.sql @@ -0,0 +1,6 @@ +ALTER TABLE relayers +ADD COLUMN max_queued_txs BIGINT NOT NULL DEFAULT 20, +ADD CONSTRAINT check_max_queued_txs CHECK (max_queued_txs > max_inflight_txs); + +UPDATE relayers +SET max_queued_txs = GREATEST(max_inflight_txs, 20); diff --git a/manual_test.nu b/manual_test.nu index 9fa8df3..2943744 100644 --- a/manual_test.nu +++ b/manual_test.nu @@ -1,11 +1,41 @@ +## Setup dependencies in different terminals: +## DB +# docker run --rm -e POSTGRES_HOST_AUTH_METHOD=trust -p 5432:5432 postgres +## Can connect to using psql postgres://postgres:postgres@127.0.0.1:5432/database + +## Nodes +# anvil --chain-id 31337 -p 8545 --block-time 1 +# anvil --chain-id 31338 -p 8546 --block-time 1 + +## TxSitter +# cargo watch -x run +## or just +# cargo run + +echo "Start" + +let txSitter = "http://127.0.0.1:3000" +let anvilSocket = "127.0.0.1:8545" + +http post -t application/json $"($txSitter)/1/admin/network/31337" { + name: "Anvil network", + httpRpc: $"http://($anvilSocket)", + wsRpc: $"ws://($anvilSocket)" +} + echo "Creating relayer" -let relayer = http post -t application/json http://127.0.0.1:3000/1/relayer/create { "name": "My Relayer", "chainId": 31337 } +let relayer = http post -t application/json $"($txSitter)/1/admin/relayer" { "name": "My Relayer", "chainId": 31337 } + +echo "Create api key" +let apiKey = http post $"($txSitter)/1/admin/relayer/($relayer.relayerId)/key" "" + +$env.ETH_RPC_URL = $"($txSitter)/1/api/($apiKey.apiKey)/rpc" echo "Funding relayer" cast send --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 --value 100ether $relayer.address '' echo "Sending transaction" -let tx = http post -t application/json http://127.0.0.1:3000/1/tx/send { +let tx = http post -t application/json $"($txSitter)/1/api/($apiKey.apiKey)/tx" { "relayerId": $relayer.relayerId, "to": $relayer.address, "value": "10", @@ -15,12 +45,13 @@ let tx = http post -t application/json http://127.0.0.1:3000/1/tx/send { echo "Wait until tx is mined" for i in 0..100 { - let txResponse = http get http://127.0.0.1:3000/1/tx/($tx.txId) + let txResponse = http get $"($txSitter)/1/api/($apiKey.apiKey)/tx/($tx.txId)" if ($txResponse | get -i status) == "mined" { echo $txResponse break } else { + echo $txResponse sleep 1sec } } diff --git a/manual_test_kms.nu b/manual_test_kms.nu new file mode 100644 index 0000000..7ad746a --- /dev/null +++ b/manual_test_kms.nu @@ -0,0 +1,59 @@ +## Setup dependencies in different terminals: +## DB +# docker run --rm -e POSTGRES_HOST_AUTH_METHOD=trust -p 5432:5432 postgres +## Can connect to using psql postgres://postgres:postgres@127.0.0.1:5432/database +## TxSitter +# cargo watch -x run +## or just +# cargo run + +echo "Start" + +let txSitter = "http://127.0.0.1:3000" + +http post -t application/json $"($txSitter)/1/admin/network/11155111" { + name: "Ethereum Sepolia", + httpRpc: $env.SEPOLIA_HTTP_RPC, + wsRpc: $env.SEPOLIA_WS_RPC, +} + +echo "Creating relayer" +let relayer = http post -t application/json $"($txSitter)/1/admin/relayer" { "name": "My Relayer", "chainId": 11155111 } + +http post -t application/json $"($txSitter)/1/admin/relayer/($relayer.relayerId)" { + gasPriceLimits: [ + { chainId: 11155111, value: "0x123" } + ] +} + +echo "Create api key" +let apiKey = http post $"($txSitter)/1/admin/relayer/($relayer.relayerId)/key" "" + +$env.ETH_RPC_URL = $"($txSitter)/1/api/($apiKey.apiKey)/rpc" + +echo "Funding relayer" +cast send --private-key $env.PRIVATE_KEY --value 1ether $relayer.address '' + +echo "Sending transaction" +let tx = http post -t application/json $"($txSitter)/1/api/($apiKey.apiKey)/tx" { + "relayerId": $relayer.relayerId, + "to": $relayer.address, + "value": "10", + "data": "" + "gasLimit": "150000" +} + +echo "Wait until tx is mined" +for i in 0..100 { + let txResponse = http get $"($txSitter)/1/api/($apiKey.apiKey)/tx/($tx.txId)" + + if ($txResponse | get -i status) == "mined" { + echo $txResponse + break + } else { + echo $txResponse + sleep 1sec + } +} + +echo "Success!" diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 381275b..b6e0a4d 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,3 @@ [toolchain] channel = "nightly-2023-11-15" +components = [ "clippy" ] diff --git a/src/api_key.rs b/src/api_key.rs new file mode 100644 index 0000000..06a1216 --- /dev/null +++ b/src/api_key.rs @@ -0,0 +1,245 @@ +use std::borrow::Cow; +use std::str::FromStr; + +use base64::Engine; +use rand::rngs::OsRng; +use rand::Rng; +use serde::Serialize; +use sha3::{Digest, Sha3_256}; + +const DEFAULT_SECRET_LEN: usize = 16; +const MIN_SECRET_LEN: usize = 16; +const MAX_SECRET_LEN: usize = 32; +const UUID_LEN: usize = 16; + +#[derive(Clone, Eq, PartialEq)] +struct ApiSecret(Vec); + +/// Derive Serialize manually to avoid leaking the secret. +impl Serialize for ApiSecret { + fn serialize( + &self, + serializer: S, + ) -> Result { + serializer.collect_str(&"***") + } +} + +/// Derive Debug manually to avoid leaking the secret. +impl std::fmt::Debug for ApiSecret { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("ApiSecret").field(&"***").finish() + } +} + +/// Zero out the secret when dropped. +impl Drop for ApiSecret { + fn drop(&mut self) { + self.0.iter_mut().for_each(|b| *b = 0); + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ApiKey { + relayer_id: String, + secret: ApiSecret, +} + +impl ApiKey { + pub fn new( + relayer_id: impl ToString, + secret: Vec, + ) -> eyre::Result { + if secret.len() < MIN_SECRET_LEN || secret.len() > MAX_SECRET_LEN { + eyre::bail!("invalid api key"); + } + let relayer_id = relayer_id.to_string(); + + Ok(Self { + relayer_id, + secret: ApiSecret(secret), + }) + } + + pub fn random(relayer_id: impl ToString) -> Self { + let relayer_id = relayer_id.to_string(); + + Self { + relayer_id, + secret: ApiSecret(OsRng.gen::<[u8; DEFAULT_SECRET_LEN]>().into()), + } + } + + pub fn api_key_secret_hash(&self) -> [u8; 32] { + Sha3_256::digest(self.secret.0.clone()).into() + } + + pub fn relayer_id(&self) -> &str { + &self.relayer_id + } +} + +impl Serialize for ApiKey { + fn serialize( + &self, + serializer: S, + ) -> Result { + serializer + .serialize_str(&self.reveal().map_err(serde::ser::Error::custom)?) + } +} + +impl<'de> serde::Deserialize<'de> for ApiKey { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + >::deserialize(deserializer)? + .parse() + .map_err(serde::de::Error::custom) + } +} + +impl FromStr for ApiKey { + type Err = eyre::ErrReport; + + fn from_str(s: &str) -> Result { + let buffer = base64::prelude::BASE64_URL_SAFE.decode(s)?; + + if buffer.len() < UUID_LEN + MIN_SECRET_LEN + || buffer.len() > UUID_LEN + MAX_SECRET_LEN + { + eyre::bail!("invalid api key"); + } + + let relayer_id = uuid::Uuid::from_slice(&buffer[..UUID_LEN])?; + let relayer_id = relayer_id.to_string(); + + let secret = ApiSecret(buffer[UUID_LEN..].into()); + + Ok(Self { relayer_id, secret }) + } +} + +impl ApiKey { + pub fn reveal(&self) -> eyre::Result { + let relayer_id = uuid::Uuid::parse_str(&self.relayer_id) + .map_err(|_| std::fmt::Error)?; + + let bytes = relayer_id + .as_bytes() + .iter() + .cloned() + .chain(self.secret.0.iter().cloned()) + .collect::>(); + + Ok(base64::prelude::BASE64_URL_SAFE.encode(bytes)) + } +} + +#[cfg(test)] +mod tests { + use rand::rngs::OsRng; + + use super::*; + + fn random_api_key() -> ApiKey { + ApiKey::new( + uuid::Uuid::new_v4().to_string(), + OsRng.gen::<[u8; DEFAULT_SECRET_LEN]>().into(), + ) + .unwrap() + } + + fn invalid_short_api_key() -> ApiKey { + let mut buf = [0u8; MAX_SECRET_LEN + 1]; + OsRng.fill(&mut buf[..]); + ApiKey { + relayer_id: uuid::Uuid::new_v4().to_string(), + secret: ApiSecret(buf.into()), + } + } + + fn invalid_long_api_key() -> ApiKey { + let mut buf = [0u8; MAX_SECRET_LEN + 1]; + OsRng.fill(&mut buf[..]); + ApiKey { + relayer_id: uuid::Uuid::new_v4().to_string(), + secret: ApiSecret(buf.into()), + } + } + + #[test] + fn from_to_str() { + let api_key = random_api_key(); + + let api_key_str = api_key.reveal().unwrap(); + + println!("api_key_str = {api_key_str}"); + + let api_key_parsed = api_key_str.parse::().unwrap(); + + assert_eq!(api_key.relayer_id, api_key_parsed.relayer_id); + assert_eq!(api_key.secret, api_key_parsed.secret); + } + + #[test] + fn assert_api_secret_debug() { + let api_secret = random_api_key().secret; + assert_eq!(&format!("{:?}", api_secret), "ApiSecret(\"***\")"); + } + + #[test] + fn assert_api_key_length_validation() { + let long_api_key = invalid_long_api_key(); + + let _ = ApiKey::new( + long_api_key.relayer_id.clone(), + long_api_key.secret.0.clone(), + ) + .expect_err("long api key should be invalid"); + + let _ = ApiKey::from_str(&long_api_key.reveal().unwrap()) + .expect_err("long api key should be invalid"); + + let short_api_key = invalid_short_api_key(); + + let _ = ApiKey::new( + short_api_key.relayer_id.clone(), + short_api_key.secret.0.clone(), + ) + .expect_err("short api key should be invalid"); + + let _ = ApiKey::from_str(&short_api_key.reveal().unwrap()) + .expect_err("short api key should be invalid"); + } + + #[test] + fn from_to_serde_json() { + let api_key = random_api_key(); + + let api_key_json = serde_json::to_string(&api_key).unwrap(); + + println!("api_key_str = {api_key_json}"); + + let api_key_parsed: ApiKey = + serde_json::from_str(&api_key_json).unwrap(); + + assert_eq!(api_key, api_key_parsed); + } + + #[test] + fn from_to_serde_json_owned() { + let api_key = random_api_key(); + + let api_key_json: serde_json::Value = + serde_json::to_value(&api_key).unwrap(); + + println!("api_key_str = {api_key_json}"); + + let api_key_parsed: ApiKey = + serde_json::from_value(api_key_json).unwrap(); + + assert_eq!(api_key, api_key_parsed); + } +} diff --git a/src/app.rs b/src/app.rs index f3fffac..f3a1c49 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,24 +1,21 @@ -use std::collections::HashMap; -use std::sync::Arc; - use ethers::middleware::SignerMiddleware; -use ethers::providers::{Http, Middleware, Provider}; -use ethers::types::{BlockNumber, U256}; -use ethers_signers::Signer; -use eyre::{Context, ContextCompat}; +use ethers::providers::{Http, Provider, Ws}; +use ethers::signers::Signer; +use eyre::Context; +use crate::api_key::ApiKey; use crate::config::{Config, KeysConfig}; -use crate::db::{BlockTxStatus, Database}; +use crate::db::data::RpcKind; +use crate::db::Database; use crate::keys::{KeysSource, KmsKeys, LocalKeys, UniversalSigner}; -use crate::tasks::index::fetch_block_with_fee_estimates; -pub type AppMiddleware = SignerMiddleware>, UniversalSigner>; +pub type AppGenericMiddleware = + SignerMiddleware, UniversalSigner>; +pub type AppMiddleware = AppGenericMiddleware; pub struct App { pub config: Config, - pub rpcs: HashMap>>, - pub keys_source: Box, pub db: Database, @@ -26,32 +23,45 @@ pub struct App { impl App { pub async fn new(config: Config) -> eyre::Result { - let rpcs = init_rpcs(&config).await?; let keys_source = init_keys_source(&config).await?; let db = Database::new(&config.database).await?; - seed_initial_blocks(&rpcs, &db).await?; - Ok(Self { config, - rpcs, keys_source, db, }) } - pub async fn fetch_signer_middleware( + pub async fn http_provider( + &self, + chain_id: u64, + ) -> eyre::Result> { + let url = self.db.get_network_rpc(chain_id, RpcKind::Http).await?; + + let provider = Provider::::try_from(url.as_str())?; + + Ok(provider) + } + + pub async fn ws_provider( &self, - chain_id: impl Into, + chain_id: u64, + ) -> eyre::Result> { + let url = self.db.get_network_rpc(chain_id, RpcKind::Ws).await?; + + let ws = Ws::connect(url.as_str()).await?; + let provider = Provider::new(ws); + + Ok(provider) + } + + pub async fn signer_middleware( + &self, + chain_id: u64, key_id: String, ) -> eyre::Result { - let chain_id: U256 = chain_id.into(); - - let rpc = self - .rpcs - .get(&chain_id) - .context("Missing RPC for chain id")? - .clone(); + let rpc = self.http_provider(chain_id).await?; let wallet = self .keys_source @@ -59,12 +69,24 @@ impl App { .await .context("Missing signer")?; - let wallet = wallet.with_chain_id(chain_id.as_u64()); + let wallet = wallet.with_chain_id(chain_id); let middlware = SignerMiddleware::new(rpc, wallet); Ok(middlware) } + + pub async fn is_authorized( + &self, + api_token: &ApiKey, + ) -> eyre::Result { + self.db + .is_api_key_valid( + api_token.relayer_id(), + api_token.api_key_secret_hash(), + ) + .await + } } async fn init_keys_source( @@ -81,45 +103,3 @@ async fn init_keys_source( Ok(keys_source) } - -async fn init_rpcs( - config: &Config, -) -> eyre::Result>>> { - let mut providers = HashMap::new(); - - for rpc_url in &config.rpc.rpcs { - let provider = Provider::::try_from(rpc_url.as_str())?; - let chain_id = provider.get_chainid().await?; - - providers.insert(chain_id, Arc::new(provider)); - } - - Ok(providers) -} - -async fn seed_initial_blocks( - rpcs: &HashMap>>, - db: &Database, -) -> eyre::Result<()> { - for (chain_id, rpc) in rpcs { - tracing::info!("Seeding block for chain id {chain_id}"); - - if !db.has_blocks_for_chain(chain_id.as_u64()).await? { - let (block, fee_estimates) = - fetch_block_with_fee_estimates(rpc, BlockNumber::Latest) - .await? - .context("Missing latest block")?; - - db.save_block( - block.number.context("Missing block number")?.as_u64(), - chain_id.as_u64(), - &block.transactions, - &fee_estimates, - BlockTxStatus::Mined, - ) - .await?; - } - } - - Ok(()) -} diff --git a/src/aws/ethers_signer.rs b/src/aws/ethers_signer.rs index 3296679..7c8eb89 100644 --- a/src/aws/ethers_signer.rs +++ b/src/aws/ethers_signer.rs @@ -6,7 +6,6 @@ use aws_sdk_kms::operation::get_public_key::{ }; use aws_sdk_kms::operation::sign::{SignError, SignOutput}; use aws_sdk_kms::types::{MessageType, SigningAlgorithmSpec}; -use aws_smithy_types::body::SdkBody; use aws_smithy_types::Blob; use ethers::core::k256::ecdsa::{ Error as K256Error, Signature as KSig, VerifyingKey, @@ -15,7 +14,6 @@ use ethers::core::types::transaction::eip2718::TypedTransaction; use ethers::core::types::transaction::eip712::Eip712; use ethers::core::types::{Address, Signature as EthSig, H256}; use ethers::core::utils::hash_message; -use hyper::Response; use tracing::{debug, instrument, trace}; mod utils; @@ -34,7 +32,7 @@ use utils::{apply_eip155, verifying_key_to_address}; /// use rusoto_core::Client; /// use rusoto_kms::{Kms, KmsClient}; /// -/// user ethers_signers::Signer; +/// user ethers::signers::Signer; /// /// let client = Client::new_with( /// EnvironmentProvider::default(), @@ -81,9 +79,9 @@ impl std::fmt::Display for AwsSigner { #[derive(thiserror::Error, Debug)] pub enum AwsSignerError { #[error("{0}")] - SignError(#[from] SdkError>), + SignError(#[from] SdkError), #[error("{0}")] - GetPublicKeyError(#[from] SdkError>), + GetPublicKeyError(#[from] SdkError), #[error("{0}")] K256(#[from] K256Error), #[error("{0}")] @@ -114,7 +112,7 @@ impl From for AwsSignerError { async fn request_get_pubkey( kms: &aws_sdk_kms::Client, key_id: T, -) -> Result>> +) -> Result> where T: AsRef, { @@ -130,7 +128,7 @@ async fn request_sign_digest( kms: &aws_sdk_kms::Client, key_id: T, digest: [u8; 32], -) -> Result>> +) -> Result> where T: AsRef, { @@ -143,7 +141,9 @@ where .signing_algorithm(SigningAlgorithmSpec::EcdsaSha256) .send() .await; + trace!("{:?}", &resp); + resp } @@ -310,6 +310,7 @@ impl ethers::signers::Signer for AwsSigner { #[cfg(test)] mod tests { + use aws_config::BehaviorVersion; use aws_credential_types::Credentials; use aws_sdk_kms::Client as KmsClient; use aws_types::region::Region; @@ -324,7 +325,7 @@ mod tests { let credentials = Credentials::from_keys(access_key, secret_access_key, None); - let config = aws_config::from_env() + let config = aws_config::defaults(BehaviorVersion::latest()) .credentials_provider(credentials) .region(Region::new("us-west-1")) .load() @@ -335,7 +336,7 @@ mod tests { #[allow(dead_code)] async fn env_client() -> KmsClient { - let config = aws_config::from_env() + let config = aws_config::defaults(BehaviorVersion::latest()) .region(Region::new("us-west-1")) .load() .await; diff --git a/src/broadcast_utils.rs b/src/broadcast_utils.rs index e4172d9..f1d791d 100644 --- a/src/broadcast_utils.rs +++ b/src/broadcast_utils.rs @@ -1,77 +1,58 @@ -use ethers::types::{Eip1559TransactionRequest, U256}; +use ethers::types::U256; +use eyre::ContextCompat; use self::gas_estimation::FeesEstimate; +use crate::app::App; +use crate::types::RelayerInfo; pub mod gas_estimation; -const BASE_FEE_PER_GAS_SURGE_FACTOR: u64 = 2; - -// TODO: Adjust -const MIN_PRIORITY_FEE: U256 = U256([10, 0, 0, 0]); -const MAX_GAS_PRICE: U256 = U256([100_000_000_000, 0, 0, 0]); - /// Returns a tuple of max and max priority fee per gas pub fn calculate_gas_fees_from_estimates( estimates: &FeesEstimate, tx_priority_index: usize, max_base_fee_per_gas: U256, -) -> eyre::Result<(U256, U256)> { +) -> (U256, U256) { let max_priority_fee_per_gas = estimates.percentile_fees[tx_priority_index]; - let max_priority_fee_per_gas = - std::cmp::max(max_priority_fee_per_gas, MIN_PRIORITY_FEE); - let max_fee_per_gas = max_base_fee_per_gas + max_priority_fee_per_gas; - let max_fee_per_gas = std::cmp::min(max_fee_per_gas, MAX_GAS_PRICE); - Ok((max_fee_per_gas, max_priority_fee_per_gas)) + (max_fee_per_gas, max_priority_fee_per_gas) } -/// Calculates the max base fee per gas -/// Returns an error if the base fee per gas is too high -/// -/// i.e. the base fee from estimates surged by a factor -pub fn calculate_max_base_fee_per_gas( - estimates: &FeesEstimate, -) -> eyre::Result { - let base_fee_per_gas = estimates.base_fee_per_gas; - - if base_fee_per_gas > MAX_GAS_PRICE { - tracing::warn!("Base fee per gas is too high, retrying later"); - eyre::bail!("Base fee per gas is too high"); +pub async fn should_send_relayer_transactions( + app: &App, + relayer: &RelayerInfo, +) -> eyre::Result { + if !relayer.enabled { + tracing::warn!( + relayer_id = relayer.id, + chain_id = relayer.chain_id, + "Relayer is disabled, skipping transactions broadcast" + ); + + return Ok(false); } - // Surge the base fee per gas - let max_base_fee_per_gas = base_fee_per_gas * BASE_FEE_PER_GAS_SURGE_FACTOR; - - Ok(max_base_fee_per_gas) -} - -pub fn escalate_priority_fee( - max_base_fee_per_gas: U256, - max_network_fee_per_gas: U256, - current_max_priority_fee_per_gas: U256, - escalation_count: usize, - tx: &mut Eip1559TransactionRequest, -) { - // Min increase of 20% on the priority fee required for a replacement tx - let increased_gas_price_percentage = - U256::from(100 + (10 * (1 + escalation_count))); - - let factor = U256::from(100); - - let new_max_priority_fee_per_gas = current_max_priority_fee_per_gas - * increased_gas_price_percentage - / factor; - - let new_max_priority_fee_per_gas = - std::cmp::min(new_max_priority_fee_per_gas, max_network_fee_per_gas); - - let new_max_fee_per_gas = - max_base_fee_per_gas + new_max_priority_fee_per_gas; - let new_max_fee_per_gas = - std::cmp::min(new_max_fee_per_gas, max_network_fee_per_gas); + for gas_limit in &relayer.gas_price_limits.0 { + let chain_fees = app + .db + .get_latest_block_fees_by_chain_id(relayer.chain_id) + .await? + .context("Missing block")?; + + if chain_fees.gas_price > gas_limit.value.0 { + tracing::warn!( + relayer_id = relayer.id, + chain_id = relayer.chain_id, + gas_price = ?chain_fees.gas_price, + gas_limit = ?gas_limit.value.0, + "Gas price is too high for relayer" + ); + + return Ok(false); + } + } - tx.max_fee_per_gas = Some(new_max_fee_per_gas); - tx.max_priority_fee_per_gas = Some(new_max_priority_fee_per_gas); + Ok(true) } diff --git a/src/client.rs b/src/client.rs index 8b13789..c92f639 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1 +1,166 @@ +use reqwest::Response; +use thiserror::Error; +use crate::api_key::ApiKey; +use crate::server::routes::network::NewNetworkInfo; +use crate::server::routes::relayer::{ + CreateApiKeyResponse, CreateRelayerRequest, CreateRelayerResponse, +}; +use crate::server::routes::transaction::{ + GetTxResponse, SendTxRequest, SendTxResponse, +}; +use crate::server::ApiError; +use crate::types::RelayerUpdate; + +pub struct TxSitterClient { + client: reqwest::Client, + url: String, +} + +#[derive(Debug, Error)] +pub enum ClientError { + #[error("Reqwest error: {0}")] + Reqwest(#[from] reqwest::Error), + + #[error("Serialization error: {0}")] + Serde(#[from] serde_json::Error), + + #[error("API error: {0}")] + TxSitter(#[from] ApiError), + + #[error("Invalid API key: {0}")] + InvalidApiKey(eyre::Error), +} + +impl TxSitterClient { + pub fn new(url: impl ToString) -> Self { + Self { + client: reqwest::Client::new(), + url: url.to_string(), + } + } + + async fn post(&self, url: &str) -> Result + where + R: serde::de::DeserializeOwned, + { + let response = self.client.post(url).send().await?; + + let response = Self::validate_response(response).await?; + + Ok(response.json().await?) + } + + async fn json_post( + &self, + url: &str, + body: T, + ) -> Result + where + T: serde::Serialize, + R: serde::de::DeserializeOwned, + { + let response = self.client.post(url).json(&body).send().await?; + + let response = Self::validate_response(response).await?; + + Ok(response.json().await?) + } + + async fn json_get(&self, url: &str) -> Result + where + R: serde::de::DeserializeOwned, + { + let response = self.client.get(url).send().await?; + + let response = Self::validate_response(response).await?; + + Ok(response.json().await?) + } + + async fn validate_response( + response: Response, + ) -> Result { + if !response.status().is_success() { + let body: ApiError = response.json().await?; + return Err(ClientError::TxSitter(body)); + } + + Ok(response) + } + + pub async fn create_relayer( + &self, + req: &CreateRelayerRequest, + ) -> Result { + self.json_post(&format!("{}/1/admin/relayer", self.url), req) + .await + } + + pub async fn create_relayer_api_key( + &self, + relayer_id: &str, + ) -> Result { + self.post(&format!("{}/1/admin/relayer/{relayer_id}/key", self.url,)) + .await + } + + pub async fn update_relayer( + &self, + relayer_id: &str, + relayer_update: RelayerUpdate, + ) -> Result<(), ClientError> { + self.json_post( + &format!("{}/1/admin/relayer/{relayer_id}", self.url), + relayer_update, + ) + .await + } + + pub async fn send_tx( + &self, + api_key: &ApiKey, + req: &SendTxRequest, + ) -> Result { + self.json_post( + &format!( + "{}/1/api/{}/tx", + self.url, + api_key.reveal().map_err(ClientError::InvalidApiKey)? + ), + req, + ) + .await + } + + pub async fn get_tx( + &self, + api_key: &ApiKey, + tx_id: &str, + ) -> Result { + self.json_get(&format!( + "{}/1/api/{}/tx/{tx_id}", + self.url, + api_key.reveal().map_err(ClientError::InvalidApiKey)?, + tx_id = tx_id + )) + .await + } + + pub async fn create_network( + &self, + chain_id: u64, + req: &NewNetworkInfo, + ) -> Result<(), ClientError> { + let response = self + .client + .post(&format!("{}/1/admin/network/{}", self.url, chain_id)) + .json(&req) + .send() + .await?; + + Self::validate_response(response).await?; + + Ok(()) + } +} diff --git a/src/config.rs b/src/config.rs index 03af392..35037bb 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,14 +1,45 @@ use std::net::SocketAddr; +use std::path::Path; use std::time::Duration; +use config::FileFormat; use serde::{Deserialize, Serialize}; +use crate::api_key::ApiKey; + +pub fn load_config<'a>( + config_files: impl Iterator, +) -> eyre::Result { + let mut settings = config::Config::builder(); + + for config_file in config_files { + settings = settings.add_source( + config::File::from(config_file).format(FileFormat::Toml), + ); + } + + let settings = settings + .add_source( + config::Environment::with_prefix("TX_SITTER").separator("__"), + ) + .add_source( + config::Environment::with_prefix("TX_SITTER_EXT") + .separator("__") + .try_parsing(true) + .list_separator(","), + ) + .build()?; + + let config = settings.try_deserialize::()?; + + Ok(config) +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct Config { pub service: TxSitterConfig, pub server: ServerConfig, - pub rpc: RpcConfig, pub database: DatabaseConfig, pub keys: KeysConfig, } @@ -18,6 +49,55 @@ pub struct Config { pub struct TxSitterConfig { #[serde(with = "humantime_serde")] pub escalation_interval: Duration, + + #[serde(with = "humantime_serde", default = "default_soft_reorg_interval")] + pub soft_reorg_interval: Duration, + + #[serde(with = "humantime_serde", default = "default_hard_reorg_interval")] + pub hard_reorg_interval: Duration, + + #[serde(default)] + pub datadog_enabled: bool, + + #[serde(default)] + pub statsd_enabled: bool, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub predefined: Option, +} + +const fn default_soft_reorg_interval() -> Duration { + Duration::from_secs(60) +} + +const fn default_hard_reorg_interval() -> Duration { + Duration::from_secs(60 * 60) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct Predefined { + pub network: PredefinedNetwork, + pub relayer: PredefinedRelayer, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct PredefinedNetwork { + pub chain_id: u64, + pub name: String, + pub http_rpc: String, + pub ws_rpc: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct PredefinedRelayer { + pub id: String, + pub name: String, + pub key_id: String, + pub chain_id: u64, + pub api_key: ApiKey, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -25,23 +105,66 @@ pub struct TxSitterConfig { pub struct ServerConfig { pub host: SocketAddr, - #[serde(default)] - pub disable_auth: bool, + pub username: Option, + pub password: Option, +} + +impl ServerConfig { + pub fn credentials(&self) -> Option<(&str, &str)> { + let username = self.username.as_deref()?; + let password = self.password.as_deref()?; + + Some((username, password)) + } } #[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -pub struct RpcConfig { - #[serde(default)] - pub rpcs: Vec, +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum DatabaseConfig { + ConnectionString(DbConnectionString), + Parts(DbParts), +} + +impl DatabaseConfig { + pub fn connection_string(s: impl ToString) -> Self { + Self::ConnectionString(DbConnectionString { + connection_string: s.to_string(), + }) + } + + pub fn to_connection_string(&self) -> String { + match self { + Self::ConnectionString(s) => s.connection_string.clone(), + Self::Parts(parts) => { + format!( + "postgres://{}:{}@{}:{}/{}", + parts.username, + parts.password, + parts.host, + parts.port, + parts.database + ) + } + } + } } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] -pub struct DatabaseConfig { +pub struct DbConnectionString { pub connection_string: String, } +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct DbParts { + pub host: String, + pub port: String, + pub username: String, + pub password: String, + pub database: String, +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "snake_case", tag = "kind")] pub enum KeysConfig { @@ -51,41 +174,149 @@ pub enum KeysConfig { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] -pub struct KmsKeysConfig { - pub region: String, -} +pub struct KmsKeysConfig {} -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct LocalKeysConfig {} +impl KeysConfig { + pub fn is_local(&self) -> bool { + matches!(self, Self::Local(_)) + } +} + #[cfg(test)] mod tests { + use indoc::indoc; + use super::*; + const WITH_DB_CONNECTION_STRING: &str = indoc! {r#" + [service] + escalation_interval = "1h" + soft_reorg_interval = "1m" + hard_reorg_interval = "1h" + datadog_enabled = false + statsd_enabled = false + + [server] + host = "127.0.0.1:3000" + + [database] + kind = "connection_string" + connection_string = "postgres://postgres:postgres@127.0.0.1:52804/database" + + [keys] + kind = "local" + "#}; + + const WITH_DB_PARTS: &str = indoc! {r#" + [service] + escalation_interval = "1h" + soft_reorg_interval = "1m" + hard_reorg_interval = "1h" + datadog_enabled = false + statsd_enabled = false + + [server] + host = "127.0.0.1:3000" + + [database] + kind = "parts" + host = "host" + port = "5432" + username = "user" + password = "pass" + database = "db" + + [keys] + kind = "local" + "#}; + #[test] - fn sample() { + fn with_db_connection_string() { let config = Config { service: TxSitterConfig { escalation_interval: Duration::from_secs(60 * 60), + soft_reorg_interval: default_soft_reorg_interval(), + hard_reorg_interval: default_hard_reorg_interval(), + datadog_enabled: false, + statsd_enabled: false, + predefined: None, }, server: ServerConfig { host: SocketAddr::from(([127, 0, 0, 1], 3000)), - disable_auth: false, + username: None, + password: None, }, - rpc: RpcConfig { - rpcs: vec!["hello".to_string()], + database: DatabaseConfig::connection_string( + "postgres://postgres:postgres@127.0.0.1:52804/database" + .to_string(), + ), + keys: KeysConfig::Local(LocalKeysConfig::default()), + }; + + let toml = toml::to_string_pretty(&config).unwrap(); + + assert_eq!(toml, WITH_DB_CONNECTION_STRING); + } + + #[test] + fn with_db_parts() { + let config = Config { + service: TxSitterConfig { + escalation_interval: Duration::from_secs(60 * 60), + soft_reorg_interval: default_soft_reorg_interval(), + hard_reorg_interval: default_hard_reorg_interval(), + datadog_enabled: false, + statsd_enabled: false, + predefined: None, }, - database: DatabaseConfig { - connection_string: - "postgres://postgres:postgres@127.0.0.1:52804/database" - .to_string(), + server: ServerConfig { + host: SocketAddr::from(([127, 0, 0, 1], 3000)), + username: None, + password: None, }, - keys: KeysConfig::Local(LocalKeysConfig {}), + database: DatabaseConfig::Parts(DbParts { + host: "host".to_string(), + port: "5432".to_string(), + username: "user".to_string(), + password: "pass".to_string(), + database: "db".to_string(), + }), + keys: KeysConfig::Local(LocalKeysConfig::default()), }; let toml = toml::to_string_pretty(&config).unwrap(); - println!("{}", toml); + assert_eq!(toml, WITH_DB_PARTS); + } + + #[test] + fn env_config_test() { + std::env::set_var("TX_SITTER__DATABASE__KIND", "parts"); + std::env::set_var("TX_SITTER__DATABASE__HOST", "dbHost"); + std::env::set_var("TX_SITTER__DATABASE__PORT", "dbPort"); + std::env::set_var("TX_SITTER__DATABASE__DATABASE", "dbName"); + std::env::set_var("TX_SITTER__DATABASE__USERNAME", "dbUsername"); + std::env::set_var("TX_SITTER__DATABASE__PASSWORD", "dbPassword"); + std::env::set_var("TX_SITTER__SERVICE__ESCALATION_INTERVAL", "1m"); + std::env::set_var("TX_SITTER__SERVICE__DATADOG_ENABLED", "true"); + std::env::set_var("TX_SITTER__SERVICE__STATSD_ENABLED", "true"); + std::env::set_var("TX_SITTER__SERVER__HOST", "0.0.0.0:8080"); + std::env::set_var("TX_SITTER__SERVER__USERNAME", "authUsername"); + std::env::set_var("TX_SITTER__SERVER__PASSWORD", "authPassword"); + std::env::set_var("TX_SITTER__KEYS__KIND", "kms"); + + let config = load_config(std::iter::empty()).unwrap(); + + assert!(config.service.statsd_enabled); + assert!(config.service.datadog_enabled); + assert_eq!(config.service.escalation_interval, Duration::from_secs(60)); + assert_eq!( + config.database.to_connection_string(), + "postgres://dbUsername:dbPassword@dbHost:dbPort/dbName" + ); } } diff --git a/src/db.rs b/src/db.rs index 82a1e96..ec2b221 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,16 +1,22 @@ +#![allow(clippy::too_many_arguments)] + use std::time::Duration; +use chrono::{DateTime, Utc}; use ethers::types::{Address, H256, U256}; use sqlx::migrate::{MigrateDatabase, Migrator}; +use sqlx::types::{BigDecimal, Json}; use sqlx::{Pool, Postgres, Row}; +use tracing::instrument; use crate::broadcast_utils::gas_estimation::FeesEstimate; use crate::config::DatabaseConfig; +use crate::types::{RelayerInfo, RelayerUpdate, TransactionPriority}; pub mod data; -use self::data::{AddressWrapper, ReadTxData}; -pub use self::data::{BlockTxStatus, TxForEscalation, UnsentTx}; +use self::data::{BlockFees, H256Wrapper, NetworkStats, ReadTxData, RpcKind}; +pub use self::data::{TxForEscalation, TxStatus, UnsentTx}; // Statically link in migration files static MIGRATOR: Migrator = sqlx::migrate!("db/migrations"); @@ -21,27 +27,20 @@ pub struct Database { impl Database { pub async fn new(config: &DatabaseConfig) -> eyre::Result { - let pool = loop { - if !Postgres::database_exists(&config.connection_string).await? { - Postgres::create_database(&config.connection_string).await?; - } + let connection_string = config.to_connection_string(); - let pool = Pool::connect(&config.connection_string).await?; + if !Postgres::database_exists(&connection_string).await? { + Postgres::create_database(&connection_string).await?; + } - if let Err(err) = MIGRATOR.run(&pool).await { - tracing::error!("{err:?}"); - tracing::warn!("Migration mismatch dropping previosu db"); - drop(pool); - // Drop the DB if it's out of date - ONLY FOR TESTING - Postgres::drop_database(&config.connection_string).await?; - } else { - break pool; - } - }; + let pool = Pool::connect(&connection_string).await?; + + MIGRATOR.run(&pool).await?; Ok(Self { pool }) } + #[instrument(skip(self), level = "debug")] pub async fn create_relayer( &self, id: &str, @@ -52,8 +51,8 @@ impl Database { ) -> eyre::Result<()> { sqlx::query( r#" - INSERT INTO relayers (id, name, chain_id, key_id, address, nonce, current_nonce) - VALUES ($1, $2, $3, $4, $5, 0, 0) + INSERT INTO relayers (id, name, chain_id, key_id, address) + VALUES ($1, $2, $3, $4, $5) "#, ) .bind(id) @@ -67,6 +66,195 @@ impl Database { Ok(()) } + #[instrument(skip(self), level = "debug")] + pub async fn update_relayer( + &self, + id: &str, + update: &RelayerUpdate, + ) -> eyre::Result<()> { + let mut tx = self.pool.begin().await?; + + let RelayerUpdate { + relayer_name, + max_inflight_txs, + max_queued_txs, + gas_price_limits, + enabled, + } = update; + + if let Some(name) = relayer_name { + sqlx::query( + r#" + UPDATE relayers + SET name = $2 + WHERE id = $1 + "#, + ) + .bind(id) + .bind(name) + .execute(tx.as_mut()) + .await?; + } + + if let Some(max_inflight_txs) = max_inflight_txs { + sqlx::query( + r#" + UPDATE relayers + SET max_inflight_txs = $2 + WHERE id = $1 + "#, + ) + .bind(id) + .bind(*max_inflight_txs as i64) + .execute(tx.as_mut()) + .await?; + } + + if let Some(max_queued_txs) = max_queued_txs { + sqlx::query( + r#" + UPDATE relayers + SET max_queued_txs = $2 + WHERE id = $1 + "#, + ) + .bind(id) + .bind(*max_queued_txs as i64) + .execute(tx.as_mut()) + .await?; + } + + if let Some(gas_price_limits) = gas_price_limits { + sqlx::query( + r#" + UPDATE relayers + SET gas_price_limits = $2 + WHERE id = $1 + "#, + ) + .bind(id) + .bind(Json(gas_price_limits)) + .execute(tx.as_mut()) + .await?; + } + + if let Some(enabled) = enabled { + sqlx::query( + r#" + UPDATE relayers + SET enabled = $2 + WHERE id = $1 + "#, + ) + .bind(id) + .bind(*enabled) + .execute(tx.as_mut()) + .await?; + } + + tx.commit().await?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_relayers(&self) -> eyre::Result> { + Ok(sqlx::query_as( + r#" + SELECT + id, + name, + chain_id, + key_id, + address, + nonce, + current_nonce, + max_inflight_txs, + gas_price_limits, + enabled + FROM relayers + "#, + ) + .fetch_all(&self.pool) + .await?) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_relayers_by_chain_id( + &self, + chain_id: u64, + ) -> eyre::Result> { + Ok(sqlx::query_as( + r#" + SELECT + id, + name, + chain_id, + key_id, + address, + nonce, + current_nonce, + max_inflight_txs, + max_queued_txs, + gas_price_limits, + enabled + FROM relayers + WHERE chain_id = $1 + "#, + ) + .bind(chain_id as i64) + .fetch_all(&self.pool) + .await?) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_relayer(&self, id: &str) -> eyre::Result { + Ok(sqlx::query_as( + r#" + SELECT + id, + name, + chain_id, + key_id, + address, + nonce, + current_nonce, + max_inflight_txs, + max_queued_txs, + gas_price_limits, + enabled + FROM relayers + WHERE id = $1 + "#, + ) + .bind(id) + .fetch_one(&self.pool) + .await?) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_relayer_pending_txs( + &self, + relayer_id: &str, + ) -> eyre::Result { + let (tx_count,): (i64,) = sqlx::query_as( + r#" + SELECT COUNT(1) + FROM transactions t + LEFT JOIN sent_transactions s ON (t.id = s.tx_id) + WHERE t.relayer_id = $1 + AND (s.tx_id IS NULL OR s.status = $2) + "#, + ) + .bind(relayer_id) + .bind(TxStatus::Pending) + .fetch_one(&self.pool) + .await?; + + Ok(tx_count as usize) + } + + #[instrument(skip(self), level = "debug")] pub async fn create_transaction( &self, tx_id: &str, @@ -74,19 +262,34 @@ impl Database { data: &[u8], value: U256, gas_limit: U256, + priority: TransactionPriority, relayer_id: &str, ) -> eyre::Result<()> { let mut tx = self.pool.begin().await?; + let mut value_bytes = [0u8; 32]; value.to_big_endian(&mut value_bytes); let mut gas_limit_bytes = [0u8; 32]; gas_limit.to_big_endian(&mut gas_limit_bytes); + let (nonce,): (i64,) = sqlx::query_as( + r#" + UPDATE relayers + SET nonce = nonce + 1, + updated_at = now() + WHERE id = $1 + RETURNING nonce - 1 + "#, + ) + .bind(relayer_id) + .fetch_one(tx.as_mut()) + .await?; + sqlx::query( r#" - INSERT INTO transactions (id, tx_to, data, value, gas_limit, relayer_id, nonce) - VALUES ($1, $2, $3, $4, $5, $6, (SELECT nonce FROM relayers WHERE id = $6)) + INSERT INTO transactions (id, tx_to, data, value, gas_limit, priority, relayer_id, nonce) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) "#, ) .bind(tx_id) @@ -94,19 +297,9 @@ impl Database { .bind(data) .bind(value_bytes) .bind(gas_limit_bytes) + .bind(priority) .bind(relayer_id) - .execute(tx.as_mut()) - .await?; - - sqlx::query( - r#" - UPDATE relayers - SET nonce = nonce + 1, - updated_at = now() - WHERE id = $1 - "#, - ) - .bind(relayer_id) + .bind(nonce) .execute(tx.as_mut()) .await?; @@ -115,25 +308,24 @@ impl Database { Ok(()) } - pub async fn get_unsent_txs( - &self, - max_inflight_txs: usize, - ) -> eyre::Result> { + #[instrument(skip(self), level = "debug")] + pub async fn get_unsent_txs(&self) -> eyre::Result> { Ok(sqlx::query_as( r#" - SELECT t.id, t.tx_to, t.data, t.value, t.gas_limit, t.nonce, r.key_id, r.chain_id - FROM transactions t - LEFT JOIN sent_transactions s ON (t.id = s.tx_id) + SELECT r.id as relayer_id, t.id, t.tx_to, t.data, t.value, t.gas_limit, t.priority, t.nonce, r.key_id, r.chain_id + FROM transactions t + LEFT JOIN sent_transactions s ON (t.id = s.tx_id) INNER JOIN relayers r ON (t.relayer_id = r.id) - WHERE s.tx_id IS NULL - AND (t.nonce - r.current_nonce < $1); + WHERE s.tx_id IS NULL + AND (t.nonce - r.current_nonce < r.max_inflight_txs) + ORDER BY r.id, t.nonce ASC "#, ) - .bind(max_inflight_txs as i64) .fetch_all(&self.pool) .await?) } + #[instrument(skip(self), level = "debug")] pub async fn insert_tx_broadcast( &self, tx_id: &str, @@ -153,83 +345,116 @@ impl Database { sqlx::query( r#" - INSERT INTO sent_transactions (tx_id, initial_max_fee_per_gas, initial_max_priority_fee_per_gas) - VALUES ($1, $2, $3) - "# + INSERT INTO tx_hashes (tx_id, tx_hash, max_fee_per_gas, max_priority_fee_per_gas) + VALUES ($1, $2, $3, $4) + "#, ) .bind(tx_id) + .bind(tx_hash.as_bytes()) .bind(initial_max_fee_per_gas_bytes) .bind(initial_max_priority_fee_per_gas_bytes) - .execute(tx.as_mut()).await?; + .execute(tx.as_mut()) + .await?; sqlx::query( r#" - INSERT INTO tx_hashes (tx_id, tx_hash, max_fee_per_gas, max_priority_fee_per_gas) + INSERT INTO sent_transactions (tx_id, initial_max_fee_per_gas, initial_max_priority_fee_per_gas, valid_tx_hash) VALUES ($1, $2, $3, $4) - "#, + "# ) .bind(tx_id) - .bind(tx_hash.as_bytes()) .bind(initial_max_fee_per_gas_bytes) .bind(initial_max_priority_fee_per_gas_bytes) - .execute(tx.as_mut()) - .await?; + .bind(tx_hash.as_bytes()) + .execute(tx.as_mut()).await?; tx.commit().await?; Ok(()) } - pub async fn get_latest_block_fees_by_chain_id( + #[instrument(skip(self), level = "debug")] + pub async fn get_latest_block_number_without_fee_estimates( &self, chain_id: u64, - ) -> eyre::Result> { - let row = sqlx::query( + ) -> eyre::Result> { + let block_number: Option<(i64,)> = sqlx::query_as( r#" - SELECT fee_estimate - FROM blocks - WHERE chain_id = $1 - AND status = $2 - AND fee_estimate IS NOT NULL + SELECT block_number + FROM blocks + WHERE chain_id = $1 + AND block_number NOT IN ( + SELECT block_number + FROM block_fees + WHERE chain_id = $1 + ) ORDER BY block_number DESC - LIMIT 1 + LIMIT 1 "#, ) .bind(chain_id as i64) - .bind(BlockTxStatus::Mined) .fetch_optional(&self.pool) .await?; - let item = row - .map(|row| row.try_get::, _>(0)) - .transpose()?; + Ok(block_number.map(|(n,)| n as u64)) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_latest_block_number( + &self, + chain_id: u64, + ) -> eyre::Result> { + let block_number: Option<(i64,)> = sqlx::query_as( + r#" + SELECT block_number + FROM blocks + WHERE chain_id = $1 + ORDER BY block_number DESC + LIMIT 1 + "#, + ) + .bind(chain_id as i64) + .fetch_optional(&self.pool) + .await?; - Ok(item.map(|json_fee_estimate| json_fee_estimate.0)) + Ok(block_number.map(|(n,)| n as u64)) } - pub async fn get_next_block_numbers( + #[instrument(skip(self), level = "debug")] + pub async fn get_latest_block_fees_by_chain_id( &self, - ) -> eyre::Result> { - let rows: Vec<(i64, i64)> = sqlx::query_as( + chain_id: u64, + ) -> eyre::Result> { + let row: Option<(Json, BigDecimal)> = sqlx::query_as( r#" - SELECT MAX(block_number) + 1, chain_id - FROM blocks - WHERE status = $1 - GROUP BY chain_id + SELECT bf.fee_estimate, bf.gas_price + FROM blocks b + JOIN block_fees bf ON (b.block_number = bf.block_number AND b.chain_id = bf.chain_id) + WHERE b.chain_id = $1 + ORDER BY b.block_number DESC + LIMIT 1 "#, ) - .bind(BlockTxStatus::Mined) - .fetch_all(&self.pool) + .bind(chain_id as i64) + .fetch_optional(&self.pool) .await?; - Ok(rows - .into_iter() - .map(|(block_number, chain_id)| { - (block_number as u64, chain_id as u64) - }) - .collect()) + let Some((fees, gas_price)) = row else { + return Ok(None); + }; + + let fee_estimates = fees.0; + + let gas_price_str = gas_price.to_string(); + let gas_price = U256::from_dec_str(&gas_price_str)?; + + Ok(Some(BlockFees { + fee_estimates, + gas_price, + })) } + #[instrument(skip(self), level = "debug")] pub async fn has_blocks_for_chain( &self, chain_id: u64, @@ -250,156 +475,371 @@ impl Database { Ok(row.try_get::(0)?) } + #[instrument(skip(self), level = "debug")] pub async fn save_block( &self, block_number: u64, chain_id: u64, + timestamp: DateTime, txs: &[H256], - fee_estimates: &FeesEstimate, - status: BlockTxStatus, ) -> eyre::Result<()> { let mut db_tx = self.pool.begin().await?; - // let fee_estimates = serde_json::to_string(&fee_estimates)?; + // Prune previously inserted block + sqlx::query( + r#" + DELETE + FROM blocks + WHERE block_number = $1 + AND chain_id = $2 + "#, + ) + .bind(block_number as i64) + .bind(chain_id as i64) + .execute(db_tx.as_mut()) + .await?; - let (block_id,): (i64,) = sqlx::query_as( + // Insert new block + // There can be no conflict since we remove the previous one + sqlx::query( r#" - INSERT INTO blocks (block_number, chain_id, fee_estimate, status) - VALUES ($1, $2, $3, $4) - RETURNING id + INSERT INTO blocks (block_number, chain_id, timestamp) + VALUES ($1, $2, $3) "#, ) .bind(block_number as i64) .bind(chain_id as i64) - .bind(sqlx::types::Json(fee_estimates)) - .bind(status) - .fetch_one(db_tx.as_mut()) + .bind(timestamp) + .execute(db_tx.as_mut()) .await?; - for tx_hash in txs { - sqlx::query( - r#" - INSERT INTO block_txs (block_id, tx_hash) - VALUES ($1, $2) - "#, + let txs: Vec<_> = txs.iter().map(|tx| H256Wrapper(*tx)).collect(); + + sqlx::query( + r#" + INSERT INTO block_txs (block_number, chain_id, tx_hash) + SELECT $1, $2, unnested.tx_hash + FROM UNNEST($3::BYTEA[]) AS unnested(tx_hash) + WHERE EXISTS ( + SELECT 1 + FROM tx_hashes + WHERE tx_hashes.tx_hash = unnested.tx_hash ) - .bind(block_id) - .bind(tx_hash.as_bytes()) - .execute(db_tx.as_mut()) - .await?; - } + "#, + ) + .bind(block_number as i64) + .bind(chain_id as i64) + .bind(&txs[..]) + .execute(db_tx.as_mut()) + .await?; db_tx.commit().await?; Ok(()) } - pub async fn update_transactions( + #[instrument(skip(self), level = "debug")] + pub async fn save_block_fees( &self, - status: BlockTxStatus, + block_number: u64, + chain_id: u64, + fee_estimates: &FeesEstimate, + gas_price: U256, ) -> eyre::Result<()> { + // TODO: Figure out how to do this without parsing + let gas_price: BigDecimal = gas_price.to_string().parse()?; + sqlx::query( r#" - UPDATE tx_hashes h - SET status = $1 - FROM transactions t, block_txs bt, blocks b, relayers r - WHERE t.id = h.tx_id - AND b.id = bt.block_id - AND h.tx_hash = bt.tx_hash - AND r.chain_id = b.chain_id - AND r.id = t.relayer_id - AND h.status = $2 - AND b.status = $1 - "#, - ) - .bind(status) - .bind(status.previous()) + INSERT INTO block_fees (block_number, chain_id, fee_estimate, gas_price) + VALUES ($1, $2, $3, $4) + "#, + ) + .bind(block_number as i64) + .bind(chain_id as i64) + .bind(Json(fee_estimates)) + .bind(gas_price) .execute(&self.pool) .await?; - Ok(()) } - pub async fn fetch_txs_for_escalation( - &self, - escalation_interval: Duration, - ) -> eyre::Result> { - Ok(sqlx::query_as( + /// Returns a list of soft reorged txs + #[instrument(skip(self), level = "debug", ret)] + pub async fn handle_soft_reorgs(&self) -> eyre::Result> { + let mut tx = self.pool.begin().await?; + + // Fetch txs which have valid tx hash different than what is actually mined + let items: Vec<(String, H256Wrapper)> = sqlx::query_as( r#" - SELECT t.id, t.tx_to, t.data, t.value, t.gas_limit, t.nonce, - r.key_id, r.chain_id, - s.initial_max_fee_per_gas, s.initial_max_priority_fee_per_gas, s.escalation_count - FROM transactions t - JOIN sent_transactions s ON t.id = s.tx_id - JOIN tx_hashes h ON t.id = h.tx_id - JOIN relayers r ON t.relayer_id = r.id - WHERE now() - h.created_at > $1 - AND h.status = $2 - AND NOT h.escalated + SELECT t.id, h.tx_hash + FROM transactions t + JOIN sent_transactions s ON t.id = s.tx_id + JOIN tx_hashes h ON t.id = h.tx_id + JOIN block_txs bt ON h.tx_hash = bt.tx_hash + WHERE h.tx_hash <> s.valid_tx_hash + AND s.status = $1 "#, ) - .bind(escalation_interval) - .bind(BlockTxStatus::Pending) - .fetch_all(&self.pool) - .await?) - } + .bind(TxStatus::Mined) + .fetch_all(tx.as_mut()) + .await?; - pub async fn escalate_tx( - &self, - tx_id: &str, - tx_hash: H256, - max_fee_per_gas: U256, - max_priority_fee_per_gas: U256, - ) -> eyre::Result<()> { - let mut tx = self.pool.begin().await?; + let (tx_ids, tx_hashes): (Vec<_>, Vec<_>) = items.into_iter().unzip(); sqlx::query( r#" - UPDATE tx_hashes - SET escalated = true - WHERE tx_id = $1 + UPDATE sent_transactions s + SET valid_tx_hash = mined.tx_hash + FROM transactions t, + UNNEST($1::TEXT[], $2::BYTEA[]) AS mined(tx_id, tx_hash) + WHERE t.id = mined.tx_id + AND t.id = s.tx_id "#, ) - .bind(tx_id) + .bind(&tx_ids) + .bind(&tx_hashes) .execute(tx.as_mut()) .await?; - sqlx::query( + tx.commit().await?; + + Ok(tx_ids) + } + + /// Returns a list of hard reorged txs + #[instrument(skip(self), level = "debug", ret)] + pub async fn handle_hard_reorgs(&self) -> eyre::Result> { + let mut tx = self.pool.begin().await?; + + // Fetch txs which are marked as mined + // but none of the associated tx hashes are present in block txs + let items: Vec<(String,)> = sqlx::query_as( r#" - UPDATE sent_transactions - SET escalation_count = escalation_count + 1 - WHERE tx_id = $1 + WITH reorg_candidates AS ( + SELECT t.id, h.tx_hash, bt.chain_id + FROM transactions t + JOIN sent_transactions s ON t.id = s.tx_id + JOIN tx_hashes h ON t.id = h.tx_id + LEFT JOIN block_txs bt ON h.tx_hash = bt.tx_hash + WHERE s.status = $1 + ) + SELECT r.id + FROM reorg_candidates r + GROUP BY r.id + HAVING COUNT(r.chain_id) = 0 "#, ) - .bind(tx_id) - .execute(tx.as_mut()) + .bind(TxStatus::Mined) + .fetch_all(tx.as_mut()) .await?; - let mut max_fee_per_gas_bytes = [0u8; 32]; - max_fee_per_gas.to_big_endian(&mut max_fee_per_gas_bytes); - - let mut max_priority_fee_per_gas_bytes = [0u8; 32]; - max_priority_fee_per_gas - .to_big_endian(&mut max_priority_fee_per_gas_bytes); + let tx_ids: Vec<_> = items.into_iter().map(|(x,)| x).collect(); + // Set status to pending + // and set valid tx hash to the latest tx hash sqlx::query( r#" - INSERT INTO tx_hashes (tx_id, tx_hash, max_fee_per_gas, max_priority_fee_per_gas) - VALUES ($1, $2, $3, $4) - "# + UPDATE sent_transactions s + SET status = $1, + valid_tx_hash = ( + SELECT tx_hash + FROM tx_hashes h + WHERE h.tx_id = s.tx_id + ORDER BY created_at DESC + LIMIT 1 + ), + mined_at = NULL + FROM transactions t, UNNEST($2::TEXT[]) AS reorged(tx_id) + WHERE t.id = reorged.tx_id + AND t.id = s.tx_id + "#, ) - .bind(tx_id) + .bind(TxStatus::Pending) + .bind(&tx_ids) + .execute(tx.as_mut()) + .await?; + + tx.commit().await?; + + Ok(tx_ids) + } + + /// Marks txs as mined if the associated tx hash is present in a block + /// + /// returns the tx ids and hashes for all mined txs + #[instrument(skip(self), level = "debug", ret)] + pub async fn mine_txs( + &self, + chain_id: u64, + ) -> eyre::Result> { + let updated_txs: Vec<(String, H256Wrapper)> = sqlx::query_as( + r#" + WITH cte AS ( + SELECT t.id, h.tx_hash, b.timestamp + FROM transactions t + JOIN sent_transactions s ON t.id = s.tx_id + JOIN tx_hashes h ON t.id = h.tx_id + JOIN block_txs bt ON h.tx_hash = bt.tx_hash + JOIN blocks b ON + bt.block_number = b.block_number + AND bt.chain_id = b.chain_id + WHERE s.status = $1 + AND b.chain_id = $2 + ) + UPDATE sent_transactions + SET status = $3, + valid_tx_hash = cte.tx_hash, + mined_at = cte.timestamp + FROM cte + WHERE sent_transactions.tx_id = cte.id + RETURNING sent_transactions.tx_id, sent_transactions.valid_tx_hash + "#, + ) + .bind(TxStatus::Pending) + .bind(chain_id as i64) + .bind(TxStatus::Mined) + .fetch_all(&self.pool) + .await?; + + Ok(updated_txs + .into_iter() + .map(|(id, hash)| (id, hash.0)) + .collect()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn finalize_txs( + &self, + finalization_timestmap: DateTime, + ) -> eyre::Result<()> { + let mut tx = self.pool.begin().await?; + + // Fetch txs which are marked as mined, but the associated valid tx hash + // is present in a block which is older than the given timestamp + let items: Vec<(String,)> = sqlx::query_as( + r#" + SELECT s.tx_id + FROM sent_transactions s + JOIN tx_hashes h ON s.valid_tx_hash = h.tx_hash + JOIN block_txs bt ON h.tx_hash = bt.tx_hash + JOIN blocks b ON bt.block_number = b.block_number AND bt.chain_id = b.chain_id + WHERE s.status = $1 + AND b.timestamp < $2 + "#, + ) + .bind(TxStatus::Mined) + .bind(finalization_timestmap) + .fetch_all(tx.as_mut()) + .await?; + + let tx_ids: Vec<_> = items.into_iter().map(|(x,)| x).collect(); + + // Set status to finalized + sqlx::query( + r#" + UPDATE sent_transactions s + SET status = $1 + FROM transactions t, UNNEST($2::TEXT[]) AS finalized(tx_id) + WHERE t.id = finalized.tx_id + AND t.id = s.tx_id + "#, + ) + .bind(TxStatus::Finalized) + .bind(&tx_ids) + .execute(tx.as_mut()) + .await?; + + tx.commit().await?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_txs_for_escalation( + &self, + escalation_interval: Duration, + ) -> eyre::Result> { + Ok(sqlx::query_as( + r#" + SELECT r.id as relayer_id, t.id, t.tx_to, t.data, t.value, t.gas_limit, t.nonce, + r.key_id, r.chain_id, + s.initial_max_fee_per_gas, s.initial_max_priority_fee_per_gas, s.escalation_count + FROM transactions t + JOIN sent_transactions s ON t.id = s.tx_id + JOIN tx_hashes h ON t.id = h.tx_id + JOIN relayers r ON t.relayer_id = r.id + WHERE now() - h.created_at > $1 + AND s.status = $2 + AND NOT h.escalated + "#, + ) + .bind(escalation_interval) + .bind(TxStatus::Pending) + .fetch_all(&self.pool) + .await?) + } + + #[instrument(skip(self), level = "debug")] + pub async fn escalate_tx( + &self, + tx_id: &str, + tx_hash: H256, + max_fee_per_gas: U256, + max_priority_fee_per_gas: U256, + ) -> eyre::Result<()> { + let mut tx = self.pool.begin().await?; + + sqlx::query( + r#" + UPDATE tx_hashes + SET escalated = true + WHERE tx_id = $1 + "#, + ) + .bind(tx_id) + .execute(tx.as_mut()) + .await?; + + let mut max_fee_per_gas_bytes = [0u8; 32]; + max_fee_per_gas.to_big_endian(&mut max_fee_per_gas_bytes); + + let mut max_priority_fee_per_gas_bytes = [0u8; 32]; + max_priority_fee_per_gas + .to_big_endian(&mut max_priority_fee_per_gas_bytes); + + sqlx::query( + r#" + INSERT INTO tx_hashes (tx_id, tx_hash, max_fee_per_gas, max_priority_fee_per_gas) + VALUES ($1, $2, $3, $4) + "# + ) + .bind(tx_id) .bind(tx_hash.as_bytes()) .bind(max_fee_per_gas_bytes) .bind(max_priority_fee_per_gas_bytes) .execute(tx.as_mut()) .await?; + sqlx::query( + r#" + UPDATE sent_transactions + SET escalation_count = escalation_count + 1, + valid_tx_hash = $2 + WHERE tx_id = $1 + "#, + ) + .bind(tx_id) + .bind(tx_hash.as_bytes()) + .execute(tx.as_mut()) + .await?; + tx.commit().await?; Ok(()) } + #[instrument(skip(self), level = "debug")] pub async fn read_tx( &self, tx_id: &str, @@ -407,12 +847,11 @@ impl Database { Ok(sqlx::query_as( r#" SELECT t.id as tx_id, t.tx_to as to, t.data, t.value, t.gas_limit, t.nonce, - h.tx_hash, h.status + h.tx_hash, s.status FROM transactions t - LEFT JOIN tx_hashes h ON t.id = h.tx_id + LEFT JOIN sent_transactions s ON t.id = s.tx_id + LEFT JOIN tx_hashes h ON s.valid_tx_hash = h.tx_hash WHERE t.id = $1 - ORDER BY h.created_at DESC, h.status DESC - LIMIT 1 "#, ) .bind(tx_id) @@ -420,24 +859,36 @@ impl Database { .await?) } - pub async fn fetch_relayer_addresses( + #[instrument(skip(self), level = "debug")] + pub async fn read_txs( &self, - chain_id: u64, - ) -> eyre::Result> { - let items: Vec<(AddressWrapper,)> = sqlx::query_as( + relayer_id: &str, + tx_status_filter: Option>, + ) -> eyre::Result> { + let (should_filter, status_filter) = match tx_status_filter { + Some(status) => (true, status), + None => (false, None), + }; + + Ok(sqlx::query_as( r#" - SELECT address - FROM relayers - WHERE chain_id = $1 + SELECT t.id as tx_id, t.tx_to as to, t.data, t.value, t.gas_limit, t.nonce, + h.tx_hash, s.status + FROM transactions t + LEFT JOIN sent_transactions s ON t.id = s.tx_id + LEFT JOIN tx_hashes h ON s.valid_tx_hash = h.tx_hash + WHERE t.relayer_id = $1 + AND ($2 = true AND s.status = $3) OR $2 = false "#, ) - .bind(chain_id as i64) + .bind(relayer_id) + .bind(should_filter) + .bind(status_filter) .fetch_all(&self.pool) - .await?; - - Ok(items.into_iter().map(|(wrapper,)| wrapper.0).collect()) + .await?) } + #[instrument(skip(self), level = "debug")] pub async fn update_relayer_nonce( &self, chain_id: u64, @@ -461,13 +912,286 @@ impl Database { Ok(()) } + + #[instrument(skip(self), level = "debug")] + pub async fn prune_blocks( + &self, + timestamp: DateTime, + ) -> eyre::Result<()> { + sqlx::query( + r#" + DELETE FROM blocks + WHERE timestamp < $1 + "#, + ) + .bind(timestamp) + .execute(&self.pool) + .await?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn prune_txs( + &self, + timestamp: DateTime, + ) -> eyre::Result<()> { + sqlx::query( + r#" + DELETE + FROM transactions t + USING sent_transactions s + WHERE t.id = s.tx_id + AND s.mined_at < $1 + AND s.status = $2 + "#, + ) + .bind(timestamp) + .bind(TxStatus::Finalized) + .execute(&self.pool) + .await?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn create_network( + &self, + chain_id: u64, + name: &str, + http_rpc: &str, + ws_rpc: &str, + ) -> eyre::Result<()> { + let mut tx = self.pool.begin().await?; + + sqlx::query( + r#" + INSERT INTO networks (chain_id, name) + VALUES ($1, $2) + "#, + ) + .bind(chain_id as i64) + .bind(name) + .execute(tx.as_mut()) + .await?; + + sqlx::query( + r#" + INSERT INTO rpcs (chain_id, url, kind) + VALUES + ($1, $2, $3), + ($1, $4, $5) + "#, + ) + .bind(chain_id as i64) + .bind(http_rpc) + .bind(RpcKind::Http) + .bind(ws_rpc) + .bind(RpcKind::Ws) + .execute(tx.as_mut()) + .await?; + + tx.commit().await?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_network_rpc( + &self, + chain_id: u64, + rpc_kind: RpcKind, + ) -> eyre::Result { + let row: (String,) = sqlx::query_as( + r#" + SELECT url + FROM rpcs + WHERE chain_id = $1 + AND kind = $2 + "#, + ) + .bind(chain_id as i64) + .bind(rpc_kind) + .fetch_one(&self.pool) + .await?; + + Ok(row.0) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_network_chain_ids(&self) -> eyre::Result> { + let items: Vec<(i64,)> = sqlx::query_as( + r#" + SELECT chain_id + FROM networks + "#, + ) + .fetch_all(&self.pool) + .await?; + + Ok(items.into_iter().map(|(x,)| x as u64).collect()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn create_api_key( + &self, + relayer_id: &str, + api_key_hash: [u8; 32], + ) -> eyre::Result<()> { + sqlx::query( + r#" + INSERT INTO api_keys (relayer_id, key_hash) + VALUES ($1, $2) + "#, + ) + .bind(relayer_id) + .bind(api_key_hash) + .execute(&self.pool) + .await?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug")] + pub async fn is_api_key_valid( + &self, + relayer_id: &str, + api_key_hash: [u8; 32], + ) -> eyre::Result { + let (is_valid,): (bool,) = sqlx::query_as( + r#" + SELECT EXISTS ( + SELECT 1 + FROM api_keys + WHERE relayer_id = $1 + AND key_hash = $2 + ) + "#, + ) + .bind(relayer_id) + .bind(api_key_hash) + .fetch_one(&self.pool) + .await?; + + Ok(is_valid) + } + + #[instrument(skip(self), level = "debug")] + pub async fn get_stats(&self, chain_id: u64) -> eyre::Result { + let (pending_txs,): (i64,) = sqlx::query_as( + r#" + SELECT COUNT(1) + FROM transactions t + JOIN relayers r ON (t.relayer_id = r.id) + LEFT JOIN sent_transactions s ON (t.id = s.tx_id) + WHERE s.tx_id IS NULL + AND r.chain_id = $1 + "#, + ) + .bind(chain_id as i64) + .fetch_one(&self.pool) + .await?; + + let (mined_txs,): (i64,) = sqlx::query_as( + r#" + SELECT COUNT(1) + FROM transactions t + JOIN relayers r ON (t.relayer_id = r.id) + LEFT JOIN sent_transactions s ON (t.id = s.tx_id) + WHERE s.status = $1 + AND r.chain_id = $2 + "#, + ) + .bind(TxStatus::Mined) + .bind(chain_id as i64) + .fetch_one(&self.pool) + .await?; + + let (finalized_txs,): (i64,) = sqlx::query_as( + r#" + SELECT COUNT(1) + FROM transactions t + JOIN relayers r ON (t.relayer_id = r.id) + LEFT JOIN sent_transactions s ON (t.id = s.tx_id) + WHERE s.status = $1 + AND r.chain_id = $2 + "#, + ) + .bind(TxStatus::Finalized) + .bind(chain_id as i64) + .fetch_one(&self.pool) + .await?; + + let (total_indexed_blocks,): (i64,) = sqlx::query_as( + r#" + SELECT COUNT(1) + FROM blocks + WHERE chain_id = $1 + "#, + ) + .bind(chain_id as i64) + .fetch_one(&self.pool) + .await?; + + let (block_txs,): (i64,) = sqlx::query_as( + r#" + SELECT COUNT(1) + FROM block_txs + WHERE chain_id = $1 + "#, + ) + .bind(chain_id as i64) + .fetch_one(&self.pool) + .await?; + + Ok(NetworkStats { + pending_txs: pending_txs as u64, + mined_txs: mined_txs as u64, + finalized_txs: finalized_txs as u64, + total_indexed_blocks: total_indexed_blocks as u64, + block_txs: block_txs as u64, + }) + } + + #[instrument(skip(self), level = "debug")] + pub async fn purge_unsent_txs(&self, relayer_id: &str) -> eyre::Result<()> { + sqlx::query( + r#" + UPDATE relayers + SET nonce = current_nonce + WHERE id = $1 + "#, + ) + .bind(relayer_id) + .execute(&self.pool) + .await?; + + sqlx::query( + r#" + DELETE FROM transactions + WHERE relayer_id = $1 + AND id NOT IN ( + SELECT tx_id FROM sent_transactions + ) + "#, + ) + .bind(relayer_id) + .execute(&self.pool) + .await?; + + Ok(()) + } } #[cfg(test)] mod tests { + use chrono::NaiveDate; + use eyre::ContextCompat; use postgres_docker_utils::DockerContainerGuard; use super::*; + use crate::db::data::U256Wrapper; + use crate::types::RelayerGasPriceLimit; async fn setup_db() -> eyre::Result<(Database, DockerContainerGuard)> { let db_container = postgres_docker_utils::setup().await?; @@ -475,20 +1199,417 @@ mod tests { let url = format!("postgres://postgres:postgres@{db_socket_addr}/database"); - let db = Database::new(&DatabaseConfig { - connection_string: url, - }) - .await?; + for _ in 0..5 { + match Database::new(&DatabaseConfig::connection_string(&url)).await + { + Ok(db) => return Ok((db, db_container)), + Err(err) => { + eprintln!("Failed to connect to the database: {err:?}"); + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + } + + Err(eyre::eyre!("Failed to connect to the database")) + } + + async fn full_update( + db: &Database, + chain_id: u64, + finalization_timestamp: DateTime, + ) -> eyre::Result<()> { + db.mine_txs(chain_id).await?; + + db.handle_soft_reorgs().await?; + db.handle_hard_reorgs().await?; + + db.finalize_txs(finalization_timestamp).await?; - Ok((db, db_container)) + Ok(()) } #[tokio::test] - async fn basic() -> eyre::Result<()> { + async fn migration() -> eyre::Result<()> { let (_db, _db_container) = setup_db().await?; - // db.create_relayer().await?; + Ok(()) + } + + #[tokio::test] + async fn reset_relayer() -> eyre::Result<()> { + let (db, _db_container) = setup_db().await?; + + let chain_id = 123; + let network_name = "network_name"; + let http_rpc = "http_rpc"; + let ws_rpc = "ws_rpc"; + + db.create_network(chain_id, network_name, http_rpc, ws_rpc) + .await?; + + let relayer_id = uuid(); + let relayer_id = relayer_id.as_str(); + let relayer_name = "relayer_name"; + let key_id = "key_id"; + let address = Address::from_low_u64_be(1); + + db.create_relayer(relayer_id, relayer_name, chain_id, key_id, address) + .await?; + + db.purge_unsent_txs(relayer_id).await?; Ok(()) } + + #[tokio::test] + async fn save_and_prune_blocks() -> eyre::Result<()> { + let (db, _db_container) = setup_db().await?; + + let block_timestamp = NaiveDate::from_ymd_opt(2023, 11, 23) + .unwrap() + .and_hms_opt(12, 32, 2) + .unwrap() + .and_utc(); + + let prune_timestamp = NaiveDate::from_ymd_opt(2023, 11, 23) + .unwrap() + .and_hms_opt(13, 32, 23) + .unwrap() + .and_utc(); + + let tx_hashes = vec![ + H256::from_low_u64_be(1), + H256::from_low_u64_be(2), + H256::from_low_u64_be(3), + ]; + + db.save_block(1, 1, block_timestamp, &tx_hashes).await?; + + assert!(db.has_blocks_for_chain(1).await?, "Should have blocks"); + + db.prune_blocks(prune_timestamp).await?; + + assert!(!db.has_blocks_for_chain(1).await?, "Should not have blocks"); + + Ok(()) + } + + #[tokio::test] + async fn relayer_methods() -> eyre::Result<()> { + let (db, _db_container) = setup_db().await?; + + let chain_id = 123; + let network_name = "network_name"; + let http_rpc = "http_rpc"; + let ws_rpc = "ws_rpc"; + + db.create_network(chain_id, network_name, http_rpc, ws_rpc) + .await?; + + let relayer_id = uuid(); + let relayer_id = relayer_id.as_str(); + + let relayer_name = "relayer_name"; + let key_id = "key_id"; + let relayer_address = Address::from_low_u64_be(1); + + db.create_relayer( + relayer_id, + relayer_name, + chain_id, + key_id, + relayer_address, + ) + .await?; + + let relayer = db.get_relayer(relayer_id).await?; + + assert_eq!(relayer.id, relayer_id); + assert_eq!(relayer.name, relayer_name); + assert_eq!(relayer.chain_id, chain_id); + assert_eq!(relayer.key_id, key_id); + assert_eq!(relayer.address.0, relayer_address); + assert_eq!(relayer.nonce, 0); + assert_eq!(relayer.current_nonce, 0); + assert_eq!(relayer.max_inflight_txs, 5); + assert_eq!(relayer.gas_price_limits.0, vec![]); + + db.update_relayer( + relayer_id, + &RelayerUpdate { + relayer_name: None, + max_inflight_txs: Some(10), + max_queued_txs: Some(20), + gas_price_limits: Some(vec![RelayerGasPriceLimit { + chain_id: 1, + value: U256Wrapper(U256::from(10_123u64)), + }]), + enabled: None, + }, + ) + .await?; + + let relayer = db.get_relayer(relayer_id).await?; + + assert_eq!(relayer.id, relayer_id); + assert_eq!(relayer.name, relayer_name); + assert_eq!(relayer.chain_id, chain_id); + assert_eq!(relayer.key_id, key_id); + assert_eq!(relayer.address.0, relayer_address); + assert_eq!(relayer.nonce, 0); + assert_eq!(relayer.current_nonce, 0); + assert_eq!(relayer.max_inflight_txs, 10); + assert_eq!(relayer.max_queued_txs, 20); + assert_eq!( + relayer.gas_price_limits.0, + vec![RelayerGasPriceLimit { + chain_id: 1, + value: U256Wrapper(U256::from(10_123u64)), + }] + ); + + Ok(()) + } + + #[tokio::test] + async fn tx_lifecycle() -> eyre::Result<()> { + let (db, _db_container) = setup_db().await?; + + let chain_id = 123; + let network_name = "network_name"; + let http_rpc = "http_rpc"; + let ws_rpc = "ws_rpc"; + + db.create_network(chain_id, network_name, http_rpc, ws_rpc) + .await?; + + let relayer_id = uuid(); + let relayer_id = relayer_id.as_str(); + + let relayer_name = "relayer_name"; + let key_id = "key_id"; + let relayer_address = Address::from_low_u64_be(1); + + db.create_relayer( + relayer_id, + relayer_name, + chain_id, + key_id, + relayer_address, + ) + .await?; + + let tx_id = "tx_id"; + let to = Address::from_low_u64_be(1); + let data: &[u8] = &[]; + let value = U256::from(0); + let gas_limit = U256::from(0); + let priority = TransactionPriority::Regular; + + let tx = db.read_tx(tx_id).await?; + assert!(tx.is_none(), "Tx has not been sent yet"); + + db.create_transaction( + tx_id, to, data, value, gas_limit, priority, relayer_id, + ) + .await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + assert_eq!(tx.tx_id, tx_id); + assert_eq!(tx.to.0, to); + assert_eq!(tx.data, data); + assert_eq!(tx.value.0, value); + assert_eq!(tx.gas_limit.0, gas_limit); + assert_eq!(tx.nonce, 0); + assert_eq!(tx.tx_hash, None); + + let unsent_txs = db.read_txs(relayer_id, None).await?; + assert_eq!(unsent_txs.len(), 1, "1 unsent tx"); + + let tx_hash_1 = H256::from_low_u64_be(1); + let tx_hash_2 = H256::from_low_u64_be(2); + let initial_max_fee_per_gas = U256::from(1); + let initial_max_priority_fee_per_gas = U256::from(1); + + db.insert_tx_broadcast( + tx_id, + tx_hash_1, + initial_max_fee_per_gas, + initial_max_priority_fee_per_gas, + ) + .await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + assert_eq!(tx.tx_hash.unwrap().0, tx_hash_1); + assert_eq!(tx.status, Some(TxStatus::Pending)); + + let unsent_txs = db.read_txs(relayer_id, Some(None)).await?; + assert_eq!(unsent_txs.len(), 0, "0 unsent tx"); + + let pending_txs = db + .read_txs(relayer_id, Some(Some(TxStatus::Pending))) + .await?; + assert_eq!(pending_txs.len(), 1, "1 pending tx"); + + let all_txs = db.read_txs(relayer_id, None).await?; + + assert_eq!(all_txs, pending_txs); + + db.escalate_tx( + tx_id, + tx_hash_2, + initial_max_fee_per_gas, + initial_max_priority_fee_per_gas, + ) + .await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + // By default we take the latest tx + assert_eq!(tx.tx_hash.unwrap().0, tx_hash_2); + assert_eq!(tx.status, Some(TxStatus::Pending)); + + // Do an update + let finalized_timestamp = ymd_hms(2023, 11, 23, 10, 32, 3); + full_update(&db, chain_id, finalized_timestamp).await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + // Nothing should have changed + assert_eq!(tx.tx_hash.unwrap().0, tx_hash_2); + assert_eq!(tx.status, Some(TxStatus::Pending)); + + // Save block + let block_number = 1; + let timestamp = ymd_hms(2023, 11, 23, 12, 32, 2); + let txs = &[tx_hash_1]; + + db.save_block(block_number, chain_id, timestamp, txs) + .await?; + + full_update(&db, chain_id, finalized_timestamp).await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + assert_eq!(tx.tx_hash.unwrap().0, tx_hash_1); + assert_eq!(tx.status, Some(TxStatus::Mined)); + + // Reorg + let txs = &[tx_hash_2]; + + db.save_block(block_number, chain_id, timestamp, txs) + .await?; + + full_update(&db, chain_id, finalized_timestamp).await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + assert_eq!(tx.tx_hash.unwrap().0, tx_hash_2); + assert_eq!(tx.status, Some(TxStatus::Mined)); + + // Destructive reorg + let txs = &[]; + + db.save_block(block_number, chain_id, timestamp, txs) + .await?; + + full_update(&db, chain_id, finalized_timestamp).await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + assert_eq!(tx.tx_hash.unwrap().0, tx_hash_2); + assert_eq!(tx.status, Some(TxStatus::Pending)); + + // Finalization + let txs = &[tx_hash_2]; + + db.save_block(block_number, chain_id, timestamp, txs) + .await?; + + let finalized_timestamp = ymd_hms(2023, 11, 23, 22, 0, 0); + full_update(&db, chain_id, finalized_timestamp).await?; + + let tx = db.read_tx(tx_id).await?.context("Missing tx")?; + + assert_eq!(tx.tx_hash.unwrap().0, tx_hash_2); + assert_eq!(tx.status, Some(TxStatus::Finalized)); + + Ok(()) + } + + #[tokio::test] + async fn blocks() -> eyre::Result<()> { + let (db, _db_container) = setup_db().await?; + + let block_numbers = [0, 1]; + let chain_id = 1; + let timestamp = ymd_hms(2023, 11, 23, 12, 32, 2); + let txs = &[ + H256::from_low_u64_be(1), + H256::from_low_u64_be(2), + H256::from_low_u64_be(3), + ]; + + db.save_block(block_numbers[0], chain_id, timestamp, txs) + .await?; + + db.save_block(block_numbers[1], chain_id, timestamp, txs) + .await?; + + let fee_estimates = FeesEstimate { + base_fee_per_gas: U256::from(13_132), + percentile_fees: vec![U256::from(0)], + }; + + let gas_price = U256::from(1_000_000_007); + + db.save_block_fees( + block_numbers[1], + chain_id, + &fee_estimates, + gas_price, + ) + .await?; + + let latest_block_number = + db.get_latest_block_number(chain_id) + .await? + .context("Could not get latest block number")?; + let block_fees = db.get_latest_block_fees_by_chain_id(chain_id).await?; + let block_fees = block_fees.context("Missing fees")?; + + assert_eq!(latest_block_number, block_numbers[1]); + assert_eq!( + block_fees.fee_estimates.base_fee_per_gas, + fee_estimates.base_fee_per_gas + ); + assert_eq!( + block_fees.fee_estimates.percentile_fees, + fee_estimates.percentile_fees + ); + assert_eq!(block_fees.gas_price, gas_price); + + Ok(()) + } + + fn ymd_hms( + year: i32, + month: u32, + day: u32, + hour: u32, + minute: u32, + second: u32, + ) -> DateTime { + NaiveDate::from_ymd_opt(year, month, day) + .unwrap() + .and_hms_opt(hour, minute, second) + .unwrap() + .and_utc() + } + + fn uuid() -> String { + uuid::Uuid::new_v4().to_string() + } } diff --git a/src/db/data.rs b/src/db/data.rs index 0513bba..b9113be 100644 --- a/src/db/data.rs +++ b/src/db/data.rs @@ -1,16 +1,22 @@ use ethers::types::{Address, H256, U256}; use serde::{Deserialize, Serialize}; -use sqlx::database::HasValueRef; +use sqlx::database::{HasArguments, HasValueRef}; +use sqlx::postgres::{PgHasArrayType, PgTypeInfo}; use sqlx::prelude::FromRow; use sqlx::Database; +use crate::broadcast_utils::gas_estimation::FeesEstimate; +use crate::types::TransactionPriority; + #[derive(Debug, Clone, FromRow)] pub struct UnsentTx { + pub relayer_id: String, pub id: String, pub tx_to: AddressWrapper, pub data: Vec, pub value: U256Wrapper, pub gas_limit: U256Wrapper, + pub priority: TransactionPriority, #[sqlx(try_from = "i64")] pub nonce: u64, pub key_id: String, @@ -20,6 +26,7 @@ pub struct UnsentTx { #[derive(Debug, Clone, FromRow)] pub struct TxForEscalation { + pub relayer_id: String, pub id: String, pub tx_to: AddressWrapper, pub data: Vec, @@ -36,7 +43,7 @@ pub struct TxForEscalation { pub escalation_count: usize, } -#[derive(Debug, Clone, FromRow)] +#[derive(Debug, Clone, FromRow, PartialEq, Eq)] pub struct ReadTxData { pub tx_id: String, pub to: AddressWrapper, @@ -48,15 +55,33 @@ pub struct ReadTxData { // Sent tx data pub tx_hash: Option, - pub status: Option, + pub status: Option, } #[derive(Debug, Clone)] -pub struct AddressWrapper(pub Address); +pub struct NetworkStats { + pub pending_txs: u64, + pub mined_txs: u64, + pub finalized_txs: u64, + pub total_indexed_blocks: u64, + pub block_txs: u64, +} + #[derive(Debug, Clone)] +pub struct BlockFees { + pub fee_estimates: FeesEstimate, + pub gas_price: U256, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct AddressWrapper(pub Address); + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] pub struct U256Wrapper(pub U256); -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct H256Wrapper(pub H256); impl<'r, DB> sqlx::Decode<'r, DB> for AddressWrapper @@ -88,6 +113,12 @@ where } } +impl From
for AddressWrapper { + fn from(value: Address) -> Self { + Self(value) + } +} + impl<'r, DB> sqlx::Decode<'r, DB> for U256Wrapper where DB: Database, @@ -117,6 +148,28 @@ where } } +impl<'q, DB> sqlx::Encode<'q, DB> for U256Wrapper +where + DB: Database, + [u8; 32]: sqlx::Encode<'q, DB>, +{ + fn encode_by_ref( + &self, + buf: &mut >::ArgumentBuffer, + ) -> sqlx::encode::IsNull { + let mut bytes = [0u8; 32]; + self.0.to_big_endian(&mut bytes); + + <[u8; 32] as sqlx::Encode>::encode_by_ref(&bytes, buf) + } +} + +impl From for U256Wrapper { + fn from(value: U256) -> Self { + Self(value) + } +} + impl<'r, DB> sqlx::Decode<'r, DB> for H256Wrapper where DB: Database, @@ -133,6 +186,25 @@ where } } +impl<'q, DB> sqlx::Encode<'q, DB> for H256Wrapper +where + DB: Database, + [u8; 32]: sqlx::Encode<'q, DB>, +{ + fn encode_by_ref( + &self, + buf: &mut >::ArgumentBuffer, + ) -> sqlx::encode::IsNull { + <[u8; 32] as sqlx::Encode>::encode_by_ref(&self.0 .0, buf) + } +} + +impl PgHasArrayType for H256Wrapper { + fn array_type_info() -> PgTypeInfo { + <[u8; 32] as PgHasArrayType>::array_type_info() + } +} + impl sqlx::Type for H256Wrapper where [u8; 32]: sqlx::Type, @@ -150,15 +222,15 @@ where Debug, Clone, Serialize, Deserialize, Copy, PartialEq, Eq, sqlx::Type, )] #[sqlx(rename_all = "camelCase")] -#[sqlx(type_name = "block_tx_status")] +#[sqlx(type_name = "tx_status")] #[serde(rename_all = "camelCase")] -pub enum BlockTxStatus { - Pending = 0, - Mined = 1, - Finalized = 2, +pub enum TxStatus { + Pending, + Mined, + Finalized, } -impl BlockTxStatus { +impl TxStatus { pub fn previous(self) -> Self { match self { Self::Pending => Self::Pending, @@ -167,3 +239,14 @@ impl BlockTxStatus { } } } + +#[derive( + Debug, Clone, Serialize, Deserialize, Copy, PartialEq, Eq, sqlx::Type, +)] +#[sqlx(rename_all = "camelCase")] +#[sqlx(type_name = "rpc_kind")] +#[serde(rename_all = "camelCase")] +pub enum RpcKind { + Http, + Ws, +} diff --git a/src/keys.rs b/src/keys.rs index 62f7155..3dc171c 100644 --- a/src/keys.rs +++ b/src/keys.rs @@ -1,14 +1,10 @@ -use aws_sdk_kms::types::{KeySpec, KeyUsageType}; -use aws_types::region::Region; -use ethers::core::k256::ecdsa::SigningKey; -use ethers_signers::Wallet; -use eyre::{Context, ContextCompat}; -pub use universal_signer::UniversalSigner; - -use crate::aws::ethers_signer::AwsSigner; -use crate::config::{KmsKeysConfig, LocalKeysConfig}; +pub mod kms_keys; +pub mod local_keys; +pub mod universal_signer; -mod universal_signer; +pub use kms_keys::KmsKeys; +pub use local_keys::LocalKeys; +pub use universal_signer::UniversalSigner; #[async_trait::async_trait] pub trait KeysSource: Send + Sync + 'static { @@ -18,114 +14,3 @@ pub trait KeysSource: Send + Sync + 'static { /// Loads the key using the provided id async fn load_signer(&self, id: String) -> eyre::Result; } - -pub struct KmsKeys { - kms_client: aws_sdk_kms::Client, -} - -impl KmsKeys { - pub async fn new(config: &KmsKeysConfig) -> eyre::Result { - let aws_config = aws_config::from_env() - .region(Region::new(config.region.clone())) - .load() - .await; - - let kms_client = aws_sdk_kms::Client::new(&aws_config); - - Ok(Self { kms_client }) - } -} - -#[async_trait::async_trait] -impl KeysSource for KmsKeys { - async fn new_signer(&self) -> eyre::Result<(String, UniversalSigner)> { - let kms_key = self - .kms_client - .create_key() - .key_spec(KeySpec::EccSecgP256K1) - .key_usage(KeyUsageType::SignVerify) - .send() - .await - .context("AWS Error")?; - - let key_id = - kms_key.key_metadata.context("Missing key metadata")?.key_id; - - let signer = AwsSigner::new( - self.kms_client.clone(), - key_id.clone(), - 1, // TODO: get chain id from provider - ) - .await?; - - Ok((key_id, UniversalSigner::Aws(signer))) - } - - async fn load_signer(&self, id: String) -> eyre::Result { - let signer = AwsSigner::new( - self.kms_client.clone(), - id.clone(), - 1, // TODO: get chain id from provider - ) - .await?; - - Ok(UniversalSigner::Aws(signer)) - } -} - -pub struct LocalKeys { - rng: rand::rngs::OsRng, -} - -impl LocalKeys { - pub fn new(_config: &LocalKeysConfig) -> Self { - Self { - rng: rand::rngs::OsRng, - } - } -} - -#[async_trait::async_trait] -impl KeysSource for LocalKeys { - async fn new_signer(&self) -> eyre::Result<(String, UniversalSigner)> { - let signing_key = SigningKey::random(&mut self.rng.clone()); - - let key_id = signing_key.to_bytes().to_vec(); - let key_id = hex::encode(key_id); - - let signer = Wallet::from(signing_key); - - Ok((key_id, UniversalSigner::Local(signer))) - } - - async fn load_signer(&self, id: String) -> eyre::Result { - let key_id = hex::decode(id)?; - let signing_key = SigningKey::from_slice(key_id.as_slice())?; - - let signer = Wallet::from(signing_key); - - Ok(UniversalSigner::Local(signer)) - } -} - -#[cfg(test)] -mod tests { - use ethers_signers::Signer; - - use super::*; - - #[tokio::test] - async fn local_roundtrip() -> eyre::Result<()> { - let keys_source = LocalKeys::new(&LocalKeysConfig {}); - - let (id, signer) = keys_source.new_signer().await?; - - let address = signer.address(); - - let signer = keys_source.load_signer(id).await?; - - assert_eq!(address, signer.address()); - - Ok(()) - } -} diff --git a/src/keys/kms_keys.rs b/src/keys/kms_keys.rs new file mode 100644 index 0000000..baabb29 --- /dev/null +++ b/src/keys/kms_keys.rs @@ -0,0 +1,59 @@ +use aws_config::BehaviorVersion; +use aws_sdk_kms::types::{KeySpec, KeyUsageType}; +use eyre::{Context, ContextCompat}; + +use super::{KeysSource, UniversalSigner}; +use crate::aws::ethers_signer::AwsSigner; +use crate::config::KmsKeysConfig; + +pub struct KmsKeys { + kms_client: aws_sdk_kms::Client, +} + +impl KmsKeys { + pub async fn new(_config: &KmsKeysConfig) -> eyre::Result { + let aws_config = + aws_config::load_defaults(BehaviorVersion::latest()).await; + + let kms_client = aws_sdk_kms::Client::new(&aws_config); + + Ok(Self { kms_client }) + } +} + +#[async_trait::async_trait] +impl KeysSource for KmsKeys { + async fn new_signer(&self) -> eyre::Result<(String, UniversalSigner)> { + let kms_key = self + .kms_client + .create_key() + .key_spec(KeySpec::EccSecgP256K1) + .key_usage(KeyUsageType::SignVerify) + .send() + .await + .context("AWS Error")?; + + let key_id = + kms_key.key_metadata.context("Missing key metadata")?.key_id; + + let signer = AwsSigner::new( + self.kms_client.clone(), + key_id.clone(), + 1, // TODO: get chain id from provider + ) + .await?; + + Ok((key_id, UniversalSigner::Aws(signer))) + } + + async fn load_signer(&self, id: String) -> eyre::Result { + let signer = AwsSigner::new( + self.kms_client.clone(), + id.clone(), + 1, // TODO: get chain id from provider + ) + .await?; + + Ok(UniversalSigner::Aws(signer)) + } +} diff --git a/src/keys/local_keys.rs b/src/keys/local_keys.rs new file mode 100644 index 0000000..8b6e334 --- /dev/null +++ b/src/keys/local_keys.rs @@ -0,0 +1,69 @@ +use ethers::core::k256::ecdsa::SigningKey; +use ethers::signers::Wallet; + +use super::universal_signer::UniversalSigner; +use super::KeysSource; +use crate::config::LocalKeysConfig; + +pub struct LocalKeys { + rng: rand::rngs::OsRng, +} + +impl LocalKeys { + pub fn new(_config: &LocalKeysConfig) -> Self { + Self { + rng: rand::rngs::OsRng, + } + } +} + +#[async_trait::async_trait] +impl KeysSource for LocalKeys { + async fn new_signer(&self) -> eyre::Result<(String, UniversalSigner)> { + let signing_key = SigningKey::random(&mut self.rng.clone()); + + let key_id = signing_key.to_bytes().to_vec(); + let key_id = hex::encode(key_id); + + let signer = Wallet::from(signing_key); + + Ok((key_id, UniversalSigner::Local(signer))) + } + + async fn load_signer(&self, id: String) -> eyre::Result { + let signing_key = signing_key_from_hex(&id)?; + + let signer = Wallet::from(signing_key); + + Ok(UniversalSigner::Local(signer)) + } +} + +pub fn signing_key_from_hex(s: &str) -> eyre::Result { + let key_id = hex::decode(s)?; + let signing_key = SigningKey::from_slice(key_id.as_slice())?; + + Ok(signing_key) +} + +#[cfg(test)] +mod tests { + use ethers::signers::Signer; + + use super::*; + + #[tokio::test] + async fn local_roundtrip() -> eyre::Result<()> { + let keys_source = LocalKeys::new(&LocalKeysConfig::default()); + + let (id, signer) = keys_source.new_signer().await?; + + let address = signer.address(); + + let signer = keys_source.load_signer(id).await?; + + assert_eq!(address, signer.address()); + + Ok(()) + } +} diff --git a/src/keys/universal_signer.rs b/src/keys/universal_signer.rs index 226cbcf..2a3db9d 100644 --- a/src/keys/universal_signer.rs +++ b/src/keys/universal_signer.rs @@ -2,8 +2,8 @@ use ethers::core::k256::ecdsa::SigningKey; use ethers::core::types::transaction::eip2718::TypedTransaction; use ethers::core::types::transaction::eip712::Eip712; use ethers::core::types::{Address, Signature as EthSig}; -use ethers::signers::Signer; -use ethers_signers::{Wallet, WalletError}; +use ethers::signers::{Signer, Wallet, WalletError}; +use ethers::types::Bytes; use thiserror::Error; use crate::aws::ethers_signer::AwsSigner; @@ -14,6 +14,20 @@ pub enum UniversalSigner { Local(Wallet), } +impl UniversalSigner { + pub async fn raw_signed_tx( + &self, + tx: &TypedTransaction, + ) -> eyre::Result { + let signature = match self { + Self::Aws(signer) => signer.sign_transaction(tx).await?, + Self::Local(signer) => signer.sign_transaction(tx).await?, + }; + + Ok(tx.rlp_signed(&signature)) + } +} + #[derive(Debug, Error)] pub enum UniversalError { #[error("AWS Signer Error: {0}")] diff --git a/src/lib.rs b/src/lib.rs index b52f53c..abb94b0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,13 +1,15 @@ +pub mod api_key; pub mod app; pub mod aws; +pub mod broadcast_utils; +pub mod client; pub mod config; pub mod db; +pub mod keys; +pub mod serde_utils; pub mod server; pub mod service; -pub mod task_backoff; +pub mod shutdown; +pub mod task_runner; pub mod tasks; - -pub mod broadcast_utils; -pub mod client; -pub mod keys; -pub mod serde_utils; +pub mod types; diff --git a/src/main.rs b/src/main.rs index a0416ce..412f807 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,18 +1,25 @@ use std::path::PathBuf; use clap::Parser; -use config::FileFormat; -use service::config::Config; -use service::service::Service; +use telemetry_batteries::metrics::statsd::StatsdBattery; +use telemetry_batteries::tracing::datadog::DatadogBattery; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; +use tx_sitter::config::load_config; +use tx_sitter::service::Service; +use tx_sitter::shutdown::spawn_await_shutdown_task; #[derive(Parser)] +#[command(author, version, about)] #[clap(rename_all = "kebab-case")] struct Args { - #[clap(short, long, default_value = "./config.toml")] - config: PathBuf, + #[clap(short, long)] + #[cfg_attr( + feature = "default-config", + clap(default_value = "config.toml") + )] + config: Vec, #[clap(short, long)] env_file: Vec, @@ -28,28 +35,35 @@ async fn main() -> eyre::Result<()> { dotenv::from_path(path)?; } - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().pretty().compact()) - .with(EnvFilter::from_default_env()) - .init(); - - let settings = config::Config::builder() - .add_source( - config::File::from(args.config.as_ref()).format(FileFormat::Toml), - ) - .add_source( - config::Environment::with_prefix("TX_SITTER").separator("__"), - ) - .add_source( - config::Environment::with_prefix("TX_SITTER_EXT") - .separator("__") - .try_parsing(true) - .list_separator(","), - ) - .build()?; - - let config = settings.try_deserialize::()?; + let config = load_config(args.config.iter().map(PathBuf::as_ref))?; + + let _shutdown_handle = if config.service.datadog_enabled { + let shutdown_handle = + DatadogBattery::init(None, "tx-sitter-monolith", None, true); + + Some(shutdown_handle) + } else { + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().pretty().compact()) + .with(EnvFilter::from_default_env()) + .init(); + + None + }; + + if config.service.statsd_enabled { + StatsdBattery::init( + "localhost", + 8125, + 5000, + 1024, + Some("tx_sitter_monolith"), + )?; + } + + spawn_await_shutdown_task(); + tracing::info!(?config, "Starting service"); let service = Service::new(config).await?; service.wait().await?; diff --git a/src/server.rs b/src/server.rs index cfe06ee..3a9b9ac 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,142 +1,24 @@ use std::sync::Arc; -use axum::extract::{Json, Path, State}; -use axum::http::StatusCode; -use axum::response::IntoResponse; use axum::routing::{get, post, IntoMakeService}; -use axum::{Router, TypedHeader}; -use ethers_signers::Signer; -use eyre::Result; +use axum::Router; use hyper::server::conn::AddrIncoming; -use middleware::AuthorizedRelayer; -use thiserror::Error; +use tower_http::validate_request::ValidateRequestHeaderLayer; -use self::data::{ - CreateRelayerRequest, CreateRelayerResponse, GetTxResponse, SendTxRequest, - SendTxResponse, +use self::routes::relayer::{ + create_relayer, create_relayer_api_key, get_relayer, get_relayers, + purge_unsent_txs, relayer_rpc, update_relayer, }; +use self::routes::transaction::{get_tx, get_txs, send_tx}; +use self::trace_layer::MatchedPathMakeSpan; use crate::app::App; -pub mod data; +mod error; mod middleware; +pub mod routes; +mod trace_layer; -#[derive(Debug, Error)] -pub enum ApiError { - #[error("Invalid key encoding")] - KeyEncoding, - - #[error("Invalid key length")] - KeyLength, - - #[error("Unauthorized")] - Unauthorized, - - #[error("Invalid format")] - InvalidFormat, - - #[error("Missing tx")] - MissingTx, - - #[error("Internal error {0}")] - Eyre(#[from] eyre::Report), -} - -impl IntoResponse for ApiError { - fn into_response(self) -> axum::response::Response { - let status_code = match self { - Self::KeyLength | Self::KeyEncoding => StatusCode::BAD_REQUEST, - Self::Unauthorized => StatusCode::UNAUTHORIZED, - Self::Eyre(_) => StatusCode::INTERNAL_SERVER_ERROR, - Self::InvalidFormat => StatusCode::BAD_REQUEST, - Self::MissingTx => StatusCode::NOT_FOUND, - }; - - let message = self.to_string(); - - (status_code, message).into_response() - } -} - -async fn send_tx( - State(app): State>, - TypedHeader(authorized_relayer): TypedHeader, - Json(req): Json, -) -> Result, ApiError> { - if !authorized_relayer.is_authorized(&req.relayer_id) { - return Err(ApiError::Unauthorized); - } - - let tx_id = if let Some(id) = req.tx_id { - id - } else { - uuid::Uuid::new_v4().to_string() - }; - - app.db - .create_transaction( - &tx_id, - req.to, - req.data.as_ref().map(|d| &d[..]).unwrap_or(&[]), - req.value, - req.gas_limit, - &req.relayer_id, - ) - .await?; - - Ok(Json(SendTxResponse { tx_id })) -} - -async fn get_tx( - State(app): State>, - Path(tx_id): Path, -) -> Result, ApiError> { - let tx = app.db.read_tx(&tx_id).await?.ok_or(ApiError::MissingTx)?; - - let get_tx_response = GetTxResponse { - tx_id: tx.tx_id, - to: tx.to.0, - data: if tx.data.is_empty() { - None - } else { - Some(tx.data.into()) - }, - value: tx.value.0, - gas_limit: tx.gas_limit.0, - nonce: tx.nonce, - tx_hash: tx.tx_hash.map(|h| h.0), - status: tx.status, - }; - - Ok(Json(get_tx_response)) -} - -async fn create_relayer( - State(app): State>, - Json(req): Json, -) -> Result, ApiError> { - let (key_id, signer) = app.keys_source.new_signer().await?; - - let address = signer.address(); - - let relayer_id = uuid::Uuid::new_v4(); - let relayer_id = relayer_id.to_string(); - - app.db - .create_relayer(&relayer_id, &req.name, req.chain_id, &key_id, address) - .await?; - - Ok(Json(CreateRelayerResponse { - relayer_id, - address, - })) -} - -async fn get_relayer( - State(_app): State>, - Path(_relayer_id): Path, -) -> &'static str { - "Hello, World!" -} +pub use self::error::ApiError; pub async fn serve(app: Arc) -> eyre::Result<()> { let server = spawn_server(app).await?; @@ -151,27 +33,41 @@ pub async fn serve(app: Arc) -> eyre::Result<()> { pub async fn spawn_server( app: Arc, ) -> eyre::Result>> { - let tx_routes = Router::new() - .route("/send", post(send_tx)) - .route("/:tx_id", get(get_tx)) - .layer(axum::middleware::from_fn_with_state( - app.clone(), - middleware::auth, - )) + let api_routes = Router::new() + .route("/:api_token/tx", post(send_tx)) + .route("/:api_token/tx/:tx_id", get(get_tx)) + .route("/:api_token/txs", get(get_txs)) + .route("/:api_token/rpc", post(relayer_rpc)) .with_state(app.clone()); - let relayer_routes = Router::new() - .route("/create", post(create_relayer)) - .route("/:relayer_id", get(get_relayer)) + let mut admin_routes = Router::new() + .route("/relayer", post(create_relayer)) + .route("/relayer/:relayer_id/reset", post(purge_unsent_txs)) + .route("/relayers", get(get_relayers)) + .route( + "/relayer/:relayer_id", + post(update_relayer).get(get_relayer), + ) + .route("/relayer/:relayer_id/key", post(create_relayer_api_key)) + .route("/network/:chain_id", post(routes::network::create_network)) .with_state(app.clone()); - // let network_routes = Router::new() - // .route("/"); + if let Some((username, password)) = app.config.server.credentials() { + admin_routes = admin_routes + .layer(ValidateRequestHeaderLayer::basic(username, password)); + } + + let v1_routes = Router::new() + .nest("/api", api_routes) + .nest("/admin", admin_routes); let router = Router::new() - .nest("/1/tx", tx_routes) - .nest("/1/relayer", relayer_routes) - .layer(tower_http::trace::TraceLayer::new_for_http()) + .nest("/1", v1_routes) + .route("/health", get(routes::health)) + .layer( + tower_http::trace::TraceLayer::new_for_http() + .make_span_with(MatchedPathMakeSpan), + ) .layer(axum::middleware::from_fn(middleware::log_response)); let server = axum::Server::bind(&app.config.server.host) diff --git a/src/server/data.rs b/src/server/data.rs deleted file mode 100644 index 22b2485..0000000 --- a/src/server/data.rs +++ /dev/null @@ -1,59 +0,0 @@ -use ethers::types::{Address, Bytes, H256, U256}; -use serde::{Deserialize, Serialize}; - -use crate::db::BlockTxStatus; - -#[derive(Debug, Default, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct SendTxRequest { - pub relayer_id: String, - pub to: Address, - #[serde(with = "crate::serde_utils::decimal_u256")] - pub value: U256, - #[serde(default)] - pub data: Option, - #[serde(with = "crate::serde_utils::decimal_u256")] - pub gas_limit: U256, - #[serde(default)] - pub tx_id: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct SendTxResponse { - pub tx_id: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct GetTxResponse { - pub tx_id: String, - pub to: Address, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub data: Option, - #[serde(with = "crate::serde_utils::decimal_u256")] - pub value: U256, - #[serde(with = "crate::serde_utils::decimal_u256")] - pub gas_limit: U256, - pub nonce: u64, - - // Sent tx data - #[serde(default, skip_serializing_if = "Option::is_none")] - pub tx_hash: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub status: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct CreateRelayerRequest { - pub name: String, - pub chain_id: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct CreateRelayerResponse { - pub relayer_id: String, - pub address: Address, -} diff --git a/src/server/error.rs b/src/server/error.rs new file mode 100644 index 0000000..2b2456b --- /dev/null +++ b/src/server/error.rs @@ -0,0 +1,123 @@ +use axum::response::IntoResponse; +use hyper::StatusCode; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Debug, Error, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ApiError { + #[error("Invalid key encoding")] + KeyEncoding, + + #[error("Invalid key length")] + KeyLength, + + #[error("Unauthorized")] + Unauthorized, + + #[error("Invalid format")] + InvalidFormat, + + #[error("Missing tx")] + MissingTx, + + #[error("Relayer is disabled")] + RelayerDisabled, + + #[error("Too many queued transactions, max: {max}, current: {current}")] + TooManyTransactions { max: usize, current: usize }, + + #[error("Internal error {0}")] + #[serde(with = "serde_eyre")] + Other(#[from] eyre::Report), +} + +impl IntoResponse for ApiError { + fn into_response(self) -> axum::response::Response { + let status_code = match self { + Self::KeyLength | Self::KeyEncoding => StatusCode::BAD_REQUEST, + Self::Unauthorized => StatusCode::UNAUTHORIZED, + Self::Other(_) => StatusCode::INTERNAL_SERVER_ERROR, + Self::InvalidFormat => StatusCode::BAD_REQUEST, + Self::MissingTx => StatusCode::NOT_FOUND, + Self::RelayerDisabled => StatusCode::FORBIDDEN, + Self::TooManyTransactions { .. } => StatusCode::TOO_MANY_REQUESTS, + }; + + let message = serde_json::to_string(&self) + .expect("Failed to serialize error message"); + + (status_code, message).into_response() + } +} + +// Mostly used for tests +impl PartialEq for ApiError { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + ( + Self::TooManyTransactions { + max: l_max, + current: l_current, + }, + Self::TooManyTransactions { + max: r_max, + current: r_current, + }, + ) => l_max == r_max && l_current == r_current, + (Self::Other(l0), Self::Other(r0)) => { + l0.to_string() == r0.to_string() + } + _ => { + core::mem::discriminant(self) == core::mem::discriminant(other) + } + } + } +} + +mod serde_eyre { + use std::borrow::Cow; + + use serde::Deserialize; + + pub fn serialize( + error: &eyre::Report, + serializer: S, + ) -> Result + where + S: serde::Serializer, + { + let error = error.to_string(); + serializer.serialize_str(&error) + } + + pub fn deserialize<'de, D>( + deserializer: D, + ) -> Result + where + D: serde::Deserializer<'de>, + { + let error = Cow::<'static, str>::deserialize(deserializer)?; + Ok(eyre::eyre!(error)) + } +} + +#[cfg(test)] +mod tests { + use test_case::test_case; + + use super::*; + + #[test_case(ApiError::KeyLength, r#""keyLength""# ; "Key length")] + #[test_case(ApiError::Other(eyre::eyre!("Test error")), r#"{"other":"Test error"}"# ; "Other error")] + #[test_case(ApiError::TooManyTransactions { max: 10, current: 20 }, r#"{"tooManyTransactions":{"max":10,"current":20}}"# ; "Too many transactions")] + fn serialization(error: ApiError, expected: &str) { + let serialized = serde_json::to_string(&error).unwrap(); + + assert_eq!(serialized, expected); + + let deserialized = serde_json::from_str::(expected).unwrap(); + + assert_eq!(error, deserialized); + } +} diff --git a/src/server/middleware.rs b/src/server/middleware.rs index 50ba8c4..a56ef0c 100644 --- a/src/server/middleware.rs +++ b/src/server/middleware.rs @@ -1,5 +1,3 @@ -mod auth_middleware; mod log_response_middleware; -pub use self::auth_middleware::{auth, AuthorizedRelayer}; pub use self::log_response_middleware::log_response; diff --git a/src/server/middleware/auth_middleware.rs b/src/server/middleware/auth_middleware.rs deleted file mode 100644 index 8935450..0000000 --- a/src/server/middleware/auth_middleware.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::sync::Arc; - -use axum::extract::{Query, State}; -use axum::http::{HeaderName, HeaderValue, Request}; -use axum::middleware::Next; -use axum::response::{IntoResponse, Response}; -use headers::Header; -use serde::{Deserialize, Serialize}; - -use crate::app::App; -use crate::server::ApiError; - -pub const AUTHORIZED_RELAYER: &str = "x-authorized-relayer"; -static HEADER_NAME: HeaderName = HeaderName::from_static(AUTHORIZED_RELAYER); - -pub enum AuthorizedRelayer { - Named(String), - Any, -} - -impl AuthorizedRelayer { - pub fn is_authorized(&self, relayer_id: &str) -> bool { - match self { - AuthorizedRelayer::Any => true, - AuthorizedRelayer::Named(name) => name == relayer_id, - } - } -} - -impl Header for AuthorizedRelayer { - fn name() -> &'static HeaderName { - &HEADER_NAME - } - - fn decode<'i, I>(values: &mut I) -> Result - where - Self: Sized, - I: Iterator, - { - let value = values.next().ok_or_else(headers::Error::invalid)?; - let value = value - .to_str() - .map_err(|_| headers::Error::invalid())? - .to_owned(); - - if value == "*" { - Ok(Self::Any) - } else { - Ok(Self::Named(value)) - } - } - - fn encode>(&self, values: &mut E) { - match self { - AuthorizedRelayer::Named(name) => values - .extend(std::iter::once(HeaderValue::from_str(name).unwrap())), - AuthorizedRelayer::Any => { - values.extend(std::iter::once(HeaderValue::from_static("*"))) - } - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct AuthParams { - #[serde(default)] - api_key: Option, -} - -pub async fn auth( - State(context): State>, - Query(query): Query, - request: Request, - next: Next, -) -> Response { - let (mut parts, body) = request.into_parts(); - - if context.config.server.disable_auth { - parts - .headers - .insert(AUTHORIZED_RELAYER, HeaderValue::from_str("*").unwrap()); - } else { - let authorized_relayer = match auth_inner(context.clone(), query).await - { - Ok(relayer_id) => relayer_id, - Err(error) => return error.into_response(), - }; - - parts.headers.insert( - AUTHORIZED_RELAYER, - HeaderValue::from_str(&authorized_relayer).unwrap(), - ); - } - - let request = Request::from_parts(parts, body); - - next.run(request).await -} - -async fn auth_inner( - _app: Arc, - _query: AuthParams, -) -> Result { - todo!("Add tables to DB and implement") - // let mut api_key = None; - - // TODO: Support Bearer in auth header - // let auth_header = parts.headers.get(AUTHORIZATION); - // if let Some(auth_header) = auth_header { - // todo!() - // } - - // if let Some(api_key_from_query) = query.api_key { - // api_key = Some(api_key_from_query); - // } - - // let Some(api_key) = api_key else { - // return Err(ApiError::Unauthorized); - // }; - - // let api_key = hex::decode(&api_key).map_err(|err| { - // tracing::warn!(?err, "Error decoding api key"); - - // ApiError::KeyEncoding - // })?; - - // let api_key: [u8; 32] = - // api_key.try_into().map_err(|_| ApiError::KeyLength)?; - - // let api_key_hash = Sha3_256::digest(&api_key); - - // let api_key_hash = hex::encode(api_key_hash); - - // // let relayer_id = context - // // .api_keys_db - // // .get_relayer_id_by_hash(api_key_hash) - // // .await? - // // .ok_or_else(|| ApiError::Unauthorized)?; - - // let relayer_id = todo!(); - - // Ok(relayer_id) -} diff --git a/src/server/routes.rs b/src/server/routes.rs new file mode 100644 index 0000000..de17332 --- /dev/null +++ b/src/server/routes.rs @@ -0,0 +1,9 @@ +use hyper::StatusCode; + +pub mod network; +pub mod relayer; +pub mod transaction; + +pub async fn health() -> StatusCode { + StatusCode::OK +} diff --git a/src/server/routes/network.rs b/src/server/routes/network.rs new file mode 100644 index 0000000..2c0b349 --- /dev/null +++ b/src/server/routes/network.rs @@ -0,0 +1,75 @@ +use std::sync::Arc; + +use axum::extract::{Json, Path, State}; +use eyre::Result; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::app::App; +use crate::server::ApiError; +use crate::service::Service; +use crate::task_runner::TaskRunner; + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NewNetworkInfo { + pub name: String, + pub http_rpc: String, + pub ws_rpc: String, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NetworkInfo { + pub chain_id: u64, + pub name: String, + pub http_rpc: String, + pub ws_rpc: String, +} + +#[tracing::instrument(skip(app))] +pub async fn create_network( + State(app): State>, + Path(chain_id): Path, + Json(network): Json, +) -> Result<(), ApiError> { + let http_url: Url = network.http_rpc.parse().map_err(|err| { + tracing::error!("Invalid http rpc url: {}", err); + ApiError::InvalidFormat + })?; + + let ws_url: Url = network.ws_rpc.parse().map_err(|err| { + tracing::error!("Invalid ws rpc url: {}", err); + ApiError::InvalidFormat + })?; + + app.db + .create_network( + chain_id, + &network.name, + http_url.as_str(), + ws_url.as_str(), + ) + .await?; + + let task_runner = TaskRunner::new(app.clone()); + Service::spawn_chain_tasks(&task_runner, chain_id)?; + + Ok(()) +} + +#[tracing::instrument(skip(_app))] +pub async fn _get_network( + State(_app): State>, + Path(_chain_id): Path, +) -> &'static str { + "Hello, World!" +} + +#[tracing::instrument(skip(_app))] +pub async fn _get_networks( + State(_app): State>, + Path(_chain_id): Path, +) -> &'static str { + "Hello, World!" +} diff --git a/src/server/routes/relayer.rs b/src/server/routes/relayer.rs new file mode 100644 index 0000000..066f4ab --- /dev/null +++ b/src/server/routes/relayer.rs @@ -0,0 +1,168 @@ +use std::sync::Arc; + +use axum::extract::{Json, Path, State}; +use ethers::signers::Signer; +use ethers::types::Address; +use eyre::Result; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +use crate::api_key::ApiKey; +use crate::app::App; +use crate::server::ApiError; +use crate::types::{RelayerInfo, RelayerUpdate}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateRelayerRequest { + pub name: String, + pub chain_id: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateRelayerResponse { + pub relayer_id: String, + pub address: Address, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcRequest { + pub id: i32, + pub method: String, + #[serde(default)] + pub params: Value, + pub jsonrpc: JsonRpcVersion, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcResponse { + pub id: i32, + pub result: Value, + pub jsonrpc: JsonRpcVersion, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum JsonRpcVersion { + #[serde(rename = "2.0")] + V2, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateApiKeyResponse { + pub api_key: ApiKey, +} + +#[tracing::instrument(skip(app))] +pub async fn create_relayer( + State(app): State>, + Json(req): Json, +) -> Result, ApiError> { + let (key_id, signer) = app.keys_source.new_signer().await?; + + let address = signer.address(); + + let relayer_id = uuid::Uuid::new_v4(); + let relayer_id = relayer_id.to_string(); + + app.db + .create_relayer(&relayer_id, &req.name, req.chain_id, &key_id, address) + .await?; + + Ok(Json(CreateRelayerResponse { + relayer_id, + address, + })) +} + +#[tracing::instrument(skip(app))] +pub async fn update_relayer( + State(app): State>, + Path(relayer_id): Path, + Json(req): Json, +) -> Result, ApiError> { + app.db.update_relayer(&relayer_id, &req).await?; + + Ok(Json(())) +} + +#[tracing::instrument(skip(app))] +pub async fn get_relayers( + State(app): State>, +) -> Result>, ApiError> { + let relayer_info = app.db.get_relayers().await?; + + Ok(Json(relayer_info)) +} + +#[tracing::instrument(skip(app))] +pub async fn get_relayer( + State(app): State>, + Path(relayer_id): Path, +) -> Result, ApiError> { + let relayer_info = app.db.get_relayer(&relayer_id).await?; + + Ok(Json(relayer_info)) +} + +/// Resets the relayer +/// deletes all unsent txs +/// and resets nonce to the current confirmed nonce +#[tracing::instrument(skip(app))] +pub async fn purge_unsent_txs( + State(app): State>, + Path(relayer_id): Path, +) -> Result, ApiError> { + app.db.purge_unsent_txs(&relayer_id).await?; + + Ok(Json(())) +} + +#[tracing::instrument(skip(app, api_token))] +pub async fn relayer_rpc( + State(app): State>, + Path(api_token): Path, + Json(req): Json, +) -> Result, ApiError> { + if !app.is_authorized(&api_token).await? { + return Err(ApiError::Unauthorized); + } + + let relayer_info = app.db.get_relayer(api_token.relayer_id()).await?; + + // TODO: Cache? + let http_provider = app.http_provider(relayer_info.chain_id).await?; + let url = http_provider.url(); + + let response = reqwest::Client::new() + .post(url.clone()) + .json(&req) + .send() + .await + .map_err(|err| { + eyre::eyre!("Error sending request to {}: {}", url, err) + })?; + + let response: Value = response.json().await.map_err(|err| { + eyre::eyre!("Error parsing response from {}: {}", url, err) + })?; + + Ok(Json(response)) +} + +#[tracing::instrument(skip(app))] +pub async fn create_relayer_api_key( + State(app): State>, + Path(relayer_id): Path, +) -> Result, ApiError> { + let api_key = ApiKey::random(&relayer_id); + + app.db + .create_api_key(&relayer_id, api_key.api_key_secret_hash()) + .await?; + + Ok(Json(CreateApiKeyResponse { api_key })) +} diff --git a/src/server/routes/transaction.rs b/src/server/routes/transaction.rs new file mode 100644 index 0000000..45e16d5 --- /dev/null +++ b/src/server/routes/transaction.rs @@ -0,0 +1,222 @@ +use std::sync::Arc; + +use axum::extract::{Json, Path, Query, State}; +use ethers::types::{Address, Bytes, H256, U256}; +use eyre::Result; +use serde::{Deserialize, Serialize}; + +use crate::api_key::ApiKey; +use crate::app::App; +use crate::db::TxStatus; +use crate::server::ApiError; +use crate::types::TransactionPriority; + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SendTxRequest { + pub to: Address, + #[serde(with = "crate::serde_utils::decimal_u256")] + pub value: U256, + #[serde(default)] + pub data: Option, + #[serde(with = "crate::serde_utils::decimal_u256")] + pub gas_limit: U256, + #[serde(default)] + pub priority: TransactionPriority, + #[serde(default)] + pub tx_id: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SendTxResponse { + pub tx_id: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetTxQuery { + #[serde(default)] + pub status: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetTxResponse { + pub tx_id: String, + pub to: Address, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub data: Option, + #[serde(with = "crate::serde_utils::decimal_u256")] + pub value: U256, + #[serde(with = "crate::serde_utils::decimal_u256")] + pub gas_limit: U256, + pub nonce: u64, + + // Sent tx data + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tx_hash: Option, + pub status: GetTxResponseStatus, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(rename_all = "camelCase")] +pub enum GetTxResponseStatus { + TxStatus(TxStatus), + Unsent(UnsentStatus), +} + +// We need this status as a separate enum to avoid manual serialization +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum UnsentStatus { + Unsent, +} + +#[tracing::instrument(skip(app, api_token))] +pub async fn send_tx( + State(app): State>, + Path(api_token): Path, + Json(req): Json, +) -> Result, ApiError> { + if !app.is_authorized(&api_token).await? { + return Err(ApiError::Unauthorized); + } + + let tx_id = if let Some(id) = req.tx_id { + id + } else { + uuid::Uuid::new_v4().to_string() + }; + + let relayer = app.db.get_relayer(api_token.relayer_id()).await?; + + if !relayer.enabled { + return Err(ApiError::RelayerDisabled); + } + + let relayer_queued_tx_count = app + .db + .get_relayer_pending_txs(api_token.relayer_id()) + .await?; + + if relayer_queued_tx_count > relayer.max_queued_txs as usize { + return Err(ApiError::TooManyTransactions { + max: relayer.max_queued_txs as usize, + current: relayer_queued_tx_count, + }); + } + + app.db + .create_transaction( + &tx_id, + req.to, + req.data.as_ref().map(|d| &d[..]).unwrap_or(&[]), + req.value, + req.gas_limit, + req.priority, + api_token.relayer_id(), + ) + .await?; + + tracing::info!(tx_id, "Transaction created"); + + Ok(Json(SendTxResponse { tx_id })) +} + +#[tracing::instrument(skip(app, api_token))] +pub async fn get_txs( + State(app): State>, + Path(api_token): Path, + Query(query): Query, +) -> Result>, ApiError> { + if !app.is_authorized(&api_token).await? { + return Err(ApiError::Unauthorized); + } + + let txs = match query.status { + Some(GetTxResponseStatus::TxStatus(status)) => { + app.db + .read_txs(api_token.relayer_id(), Some(Some(status))) + .await? + } + Some(GetTxResponseStatus::Unsent(_)) => { + app.db.read_txs(api_token.relayer_id(), Some(None)).await? + } + None => app.db.read_txs(api_token.relayer_id(), None).await?, + }; + + let txs = + txs.into_iter() + .map(|tx| GetTxResponse { + tx_id: tx.tx_id, + to: tx.to.0, + data: if tx.data.is_empty() { + None + } else { + Some(tx.data.into()) + }, + value: tx.value.0, + gas_limit: tx.gas_limit.0, + nonce: tx.nonce, + tx_hash: tx.tx_hash.map(|h| h.0), + status: tx.status.map(GetTxResponseStatus::TxStatus).unwrap_or( + GetTxResponseStatus::Unsent(UnsentStatus::Unsent), + ), + }) + .collect(); + + Ok(Json(txs)) +} + +#[tracing::instrument(skip(app, api_token))] +pub async fn get_tx( + State(app): State>, + Path((api_token, tx_id)): Path<(ApiKey, String)>, +) -> Result, ApiError> { + if !app.is_authorized(&api_token).await? { + return Err(ApiError::Unauthorized); + } + + let tx = app.db.read_tx(&tx_id).await?.ok_or(ApiError::MissingTx)?; + + let get_tx_response = GetTxResponse { + tx_id: tx.tx_id, + to: tx.to.0, + data: if tx.data.is_empty() { + None + } else { + Some(tx.data.into()) + }, + value: tx.value.0, + gas_limit: tx.gas_limit.0, + nonce: tx.nonce, + tx_hash: tx.tx_hash.map(|h| h.0), + status: tx + .status + .map(GetTxResponseStatus::TxStatus) + .unwrap_or(GetTxResponseStatus::Unsent(UnsentStatus::Unsent)), + }; + + Ok(Json(get_tx_response)) +} + +#[cfg(test)] +mod tests { + use test_case::test_case; + + use super::*; + + #[test_case(GetTxResponseStatus::TxStatus(TxStatus::Pending) => "pending")] + #[test_case(GetTxResponseStatus::Unsent(UnsentStatus::Unsent) => "unsent")] + fn get_tx_response_status_serialization( + status: GetTxResponseStatus, + ) -> &'static str { + let json = serde_json::to_string(&status).unwrap(); + + let s = json.trim_start_matches('\"').trim_end_matches('\"'); + + Box::leak(s.to_owned().into_boxed_str()) + } +} diff --git a/src/server/trace_layer.rs b/src/server/trace_layer.rs new file mode 100644 index 0000000..85987b9 --- /dev/null +++ b/src/server/trace_layer.rs @@ -0,0 +1,25 @@ +use axum::extract::MatchedPath; +use hyper::Request; +use tower_http::trace::MakeSpan; +use tracing::{Level, Span}; + +/// MakeSpan to remove api keys from logs +#[derive(Clone)] +pub(crate) struct MatchedPathMakeSpan; + +impl MakeSpan for MatchedPathMakeSpan { + fn make_span(&mut self, request: &Request) -> Span { + let matched_path = request + .extensions() + .get::() + .map(MatchedPath::as_str); + + tracing::span!( + Level::DEBUG, + "request", + method = %request.method(), + matched_path, + version = ?request.version(), + ) + } +} diff --git a/src/service.rs b/src/service.rs index b07c6c4..e610e44 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1,11 +1,13 @@ use std::net::SocketAddr; use std::sync::Arc; +use ethers::signers::{Signer, Wallet}; use tokio::task::JoinHandle; use crate::app::App; use crate::config::Config; -use crate::task_backoff::TaskRunner; +use crate::keys::local_keys::signing_key_from_hex; +use crate::task_runner::TaskRunner; use crate::tasks; pub struct Service { @@ -18,10 +20,24 @@ impl Service { pub async fn new(config: Config) -> eyre::Result { let app = Arc::new(App::new(config).await?); + let chain_ids = app.db.get_network_chain_ids().await?; + let task_runner = TaskRunner::new(app.clone()); task_runner.add_task("Broadcast transactions", tasks::broadcast_txs); - task_runner.add_task("Index transactions", tasks::index_blocks); - task_runner.add_task("Escalate transactions", tasks::escalate_txs); + task_runner.add_task("Escalate transactions", tasks::escalate_txs_task); + task_runner.add_task("Prune blocks", tasks::prune_blocks); + task_runner.add_task("Prune transactions", tasks::prune_txs); + task_runner.add_task("Finalize transactions", tasks::finalize_txs); + task_runner.add_task("Handle soft reorgs", tasks::handle_soft_reorgs); + task_runner.add_task("Handle hard reorgs", tasks::handle_hard_reorgs); + + if app.config.service.statsd_enabled { + task_runner.add_task("Emit metrics", tasks::emit_metrics); + } + + for chain_id in chain_ids { + Self::spawn_chain_tasks(&task_runner, chain_id)?; + } let server = crate::server::spawn_server(app.clone()).await?; let local_addr = server.local_addr(); @@ -30,6 +46,8 @@ impl Service { Ok(()) }); + initialize_predefined_values(&app).await?; + Ok(Self { _app: app, local_addr, @@ -37,6 +55,23 @@ impl Service { }) } + pub fn spawn_chain_tasks( + task_runner: &TaskRunner, + chain_id: u64, + ) -> eyre::Result<()> { + task_runner.add_task( + format!("Index blocks (chain id: {})", chain_id), + move |app| crate::tasks::index::index_chain(app, chain_id), + ); + + task_runner.add_task( + format!("Estimate fees (chain id: {})", chain_id), + move |app| crate::tasks::index::estimate_gas(app, chain_id), + ); + + Ok(()) + } + pub fn local_addr(&self) -> SocketAddr { self.local_addr } @@ -47,3 +82,53 @@ impl Service { Ok(()) } } + +async fn initialize_predefined_values( + app: &Arc, +) -> Result<(), eyre::Error> { + if app.config.service.predefined.is_some() && !app.config.keys.is_local() { + eyre::bail!("Predefined relayers are only supported with local keys"); + } + + let Some(predefined) = app.config.service.predefined.as_ref() else { + return Ok(()); + }; + + tracing::warn!("Running with predefined values is not recommended in a production environment"); + + app.db + .create_network( + predefined.network.chain_id, + &predefined.network.name, + &predefined.network.http_rpc, + &predefined.network.ws_rpc, + ) + .await?; + + let task_runner = TaskRunner::new(app.clone()); + Service::spawn_chain_tasks(&task_runner, predefined.network.chain_id)?; + + let secret_key = signing_key_from_hex(&predefined.relayer.key_id)?; + + let signer = Wallet::from(secret_key); + let address = signer.address(); + + app.db + .create_relayer( + &predefined.relayer.id, + &predefined.relayer.name, + predefined.relayer.chain_id, + &predefined.relayer.key_id, + address, + ) + .await?; + + app.db + .create_api_key( + predefined.relayer.api_key.relayer_id(), + predefined.relayer.api_key.api_key_secret_hash(), + ) + .await?; + + Ok(()) +} diff --git a/src/shutdown.rs b/src/shutdown.rs new file mode 100644 index 0000000..fa35a31 --- /dev/null +++ b/src/shutdown.rs @@ -0,0 +1,28 @@ +use core::panic; + +use tokio::signal::unix::{signal, SignalKind}; + +pub fn spawn_await_shutdown_task() { + tokio::spawn(async { + let result = await_shutdown_signal().await; + if let Err(err) = result { + tracing::error!("Error while waiting for shutdown signal: {}", err); + panic!("Error while waiting for shutdown signal: {}", err); + } + + tracing::info!("Shutdown complete"); + std::process::exit(0); + }); +} + +pub async fn await_shutdown_signal() -> eyre::Result<()> { + let mut sigint = signal(SignalKind::interrupt())?; + let mut sigterm = signal(SignalKind::terminate())?; + + tokio::select! { + _ = sigint.recv() => { tracing::info!("SIGINT received, shutting down"); } + _ = sigterm.recv() => { tracing::info!("SIGTERM received, shutting down"); } + }; + + Ok(()) +} diff --git a/src/task_backoff.rs b/src/task_runner.rs similarity index 74% rename from src/task_backoff.rs rename to src/task_runner.rs index 4099e36..f5b4367 100644 --- a/src/task_backoff.rs +++ b/src/task_runner.rs @@ -2,24 +2,28 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use futures::Future; - -use crate::app::App; +use tokio::task::JoinHandle; const FAILURE_MONITORING_PERIOD: Duration = Duration::from_secs(60); -pub struct TaskRunner { - app: Arc, +pub struct TaskRunner { + app: Arc, } -impl TaskRunner { - pub fn new(app: Arc) -> Self { +impl TaskRunner { + pub fn new(app: Arc) -> Self { Self { app } } +} - pub fn add_task(&self, label: S, task: C) +impl TaskRunner +where + T: Send + Sync + 'static, +{ + pub fn add_task(&self, label: S, task: C) -> JoinHandle<()> where S: ToString, - C: Fn(Arc) -> F + Send + Sync + 'static, + C: Fn(Arc) -> F + Send + Sync + 'static, F: Future> + Send + 'static, { let app = self.app.clone(); @@ -29,12 +33,12 @@ impl TaskRunner { let mut failures = vec![]; loop { - tracing::info!(label, "Running task"); + tracing::info!(task_label = label, "Running task"); let result = task(app.clone()).await; if let Err(err) = result { - tracing::error!(label, error = ?err, "Task failed"); + tracing::error!(task_label = label, error = ?err, "Task failed"); failures.push(Instant::now()); let backoff = determine_backoff(&failures); @@ -43,11 +47,11 @@ impl TaskRunner { prune_failures(&mut failures); } else { - tracing::info!(label, "Task finished"); + tracing::info!(task_label = label, "Task finished"); break; } } - }); + }) } } diff --git a/src/tasks.rs b/src/tasks.rs index 8839ce6..7e13973 100644 --- a/src/tasks.rs +++ b/src/tasks.rs @@ -1,7 +1,15 @@ pub mod broadcast; pub mod escalate; +pub mod finalize; +pub mod handle_reorgs; pub mod index; +pub mod metrics; +pub mod prune; pub use self::broadcast::broadcast_txs; -pub use self::escalate::escalate_txs; -pub use self::index::index_blocks; +pub use self::escalate::escalate_txs_task; +pub use self::finalize::finalize_txs; +pub use self::handle_reorgs::{handle_hard_reorgs, handle_soft_reorgs}; +pub use self::index::index_chain; +pub use self::metrics::emit_metrics; +pub use self::prune::{prune_blocks, prune_txs}; diff --git a/src/tasks/broadcast.rs b/src/tasks/broadcast.rs index b7c7f21..e2e97e8 100644 --- a/src/tasks/broadcast.rs +++ b/src/tasks/broadcast.rs @@ -1,84 +1,176 @@ +use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use ethers::providers::Middleware; use ethers::types::transaction::eip2718::TypedTransaction; use ethers::types::transaction::eip2930::AccessList; -use ethers::types::{Address, Eip1559TransactionRequest, NameOrAddress}; +use ethers::types::{Address, Eip1559TransactionRequest, NameOrAddress, H256}; use eyre::ContextCompat; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use itertools::Itertools; use crate::app::App; use crate::broadcast_utils::{ - calculate_gas_fees_from_estimates, calculate_max_base_fee_per_gas, + calculate_gas_fees_from_estimates, should_send_relayer_transactions, }; +use crate::db::UnsentTx; -const MAX_IN_FLIGHT_TXS: usize = 5; +const NO_TXS_SLEEP_DURATION: Duration = Duration::from_secs(2); pub async fn broadcast_txs(app: Arc) -> eyre::Result<()> { loop { - let txs = app.db.get_unsent_txs(MAX_IN_FLIGHT_TXS).await?; - - // TODO: Parallelize per chain id? - for tx in txs { - tracing::info!(tx.id, "Sending tx"); - - let middleware = app - .fetch_signer_middleware(tx.chain_id, tx.key_id.clone()) - .await?; - - let estimates = app - .db - .get_latest_block_fees_by_chain_id(tx.chain_id) - .await? - .context("Missing block")?; - - let max_base_fee_per_gas = - calculate_max_base_fee_per_gas(&estimates)?; - - let (max_fee_per_gas, max_priority_fee_per_gas) = - calculate_gas_fees_from_estimates( - &estimates, - 2, // Priority - 50th percentile - max_base_fee_per_gas, - )?; - - let eip1559_tx = Eip1559TransactionRequest { - from: None, - to: Some(NameOrAddress::from(Address::from(tx.tx_to.0))), - gas: Some(tx.gas_limit.0), - value: Some(tx.value.0), - data: Some(tx.data.into()), - nonce: Some(tx.nonce.into()), - access_list: AccessList::default(), - max_priority_fee_per_gas: Some(max_priority_fee_per_gas), - max_fee_per_gas: Some(max_fee_per_gas), - chain_id: Some(tx.chain_id.into()), - }; - - tracing::debug!(?eip1559_tx, "Sending tx"); - - // TODO: Is it possible that we send a tx but don't store it in the DB? - // TODO: Be smarter about error handling - a tx can fail to be sent - // e.g. because the relayer is out of funds - // but we don't want to retry it forever - let pending_tx = middleware - .send_transaction(TypedTransaction::Eip1559(eip1559_tx), None) - .await?; - - let tx_hash = pending_tx.tx_hash(); - - tracing::info!(?tx.id, ?tx_hash, "Tx sent successfully"); - - app.db - .insert_tx_broadcast( - &tx.id, - tx_hash, - max_fee_per_gas, - max_priority_fee_per_gas, - ) - .await?; + let txs = app.db.get_unsent_txs().await?; + let num_txs = txs.len(); + + let txs_by_relayer = sort_txs_by_relayer(txs); + + let mut futures = FuturesUnordered::new(); + + for (relayer_id, txs) in txs_by_relayer { + futures.push(broadcast_relayer_txs(&app, relayer_id, txs)); + } + + while let Some(result) = futures.next().await { + if let Err(err) = result { + tracing::error!(error = ?err, "Failed broadcasting transactions"); + } + } + + if num_txs == 0 { + tokio::time::sleep(NO_TXS_SLEEP_DURATION).await; } + } +} - tokio::time::sleep(Duration::from_secs(5)).await; +#[tracing::instrument(skip(app, txs))] +async fn broadcast_relayer_txs( + app: &App, + relayer_id: String, + txs: Vec, +) -> eyre::Result<()> { + if txs.is_empty() { + return Ok(()); } + + let relayer = app.db.get_relayer(&relayer_id).await?; + + if !should_send_relayer_transactions(app, &relayer).await? { + tracing::warn!(relayer_id = relayer_id, "Skipping relayer broadcasts"); + + return Ok(()); + } + + tracing::info!( + relayer_id, + num_txs = txs.len(), + "Broadcasting relayer transactions" + ); + + for tx in txs { + broadcast_relayer_tx(app, tx).await?; + } + + Ok(()) +} + +#[tracing::instrument(skip(app, tx), fields(relayer_id = tx.relayer_id, tx_id = tx.id))] +async fn broadcast_relayer_tx(app: &App, tx: UnsentTx) -> eyre::Result<()> { + tracing::info!(tx_id = tx.id, nonce = tx.nonce, "Sending transaction"); + + let middleware = app + .signer_middleware(tx.chain_id, tx.key_id.clone()) + .await?; + + let fees = app + .db + .get_latest_block_fees_by_chain_id(tx.chain_id) + .await? + .context("Missing block fees")?; + + let max_base_fee_per_gas = fees.fee_estimates.base_fee_per_gas; + + let (max_fee_per_gas, max_priority_fee_per_gas) = + calculate_gas_fees_from_estimates( + &fees.fee_estimates, + tx.priority.to_percentile_index(), + max_base_fee_per_gas, + ); + + let mut typed_transaction = + TypedTransaction::Eip1559(Eip1559TransactionRequest { + from: None, + to: Some(NameOrAddress::from(Address::from(tx.tx_to.0))), + gas: Some(tx.gas_limit.0), + value: Some(tx.value.0), + data: Some(tx.data.into()), + nonce: Some(tx.nonce.into()), + access_list: AccessList::default(), + max_priority_fee_per_gas: Some(max_priority_fee_per_gas), + max_fee_per_gas: Some(max_fee_per_gas), + chain_id: Some(tx.chain_id.into()), + }); + + // Fill and simulate the transaction + middleware + .fill_transaction(&mut typed_transaction, None) + .await?; + + // Get the raw signed tx and derive the tx hash + let raw_signed_tx = middleware + .signer() + .raw_signed_tx(&typed_transaction) + .await?; + let tx_hash = H256::from(ethers::utils::keccak256(&raw_signed_tx)); + + tracing::debug!(tx_id = tx.id, "Saving transaction"); + app.db + .insert_tx_broadcast( + &tx.id, + tx_hash, + max_fee_per_gas, + max_priority_fee_per_gas, + ) + .await?; + + tracing::debug!(tx_id = tx.id, "Sending transaction"); + + let pending_tx = middleware.send_raw_transaction(raw_signed_tx).await; + + let pending_tx = match pending_tx { + Ok(pending_tx) => pending_tx, + Err(err) => { + tracing::error!(tx_id = tx.id, error = ?err, "Failed to send transaction"); + return Ok(()); + } + }; + + tracing::info!( + tx_id = tx.id, + tx_nonce = tx.nonce, + tx_hash = ?tx_hash, + ?pending_tx, + "Transaction broadcast" + ); + + Ok(()) +} + +fn sort_txs_by_relayer( + mut txs: Vec, +) -> HashMap> { + txs.sort_unstable_by_key(|tx| tx.relayer_id.clone()); + let txs_by_relayer = txs.into_iter().group_by(|tx| tx.relayer_id.clone()); + + txs_by_relayer + .into_iter() + .map(|(relayer_id, txs)| { + let mut txs = txs.collect_vec(); + + txs.sort_unstable_by_key(|tx| tx.nonce); + + (relayer_id, txs) + }) + .collect() } diff --git a/src/tasks/escalate.rs b/src/tasks/escalate.rs index b840144..55367f6 100644 --- a/src/tasks/escalate.rs +++ b/src/tasks/escalate.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::sync::Arc; use ethers::providers::Middleware; @@ -5,81 +6,190 @@ use ethers::types::transaction::eip2718::TypedTransaction; use ethers::types::transaction::eip2930::AccessList; use ethers::types::{Address, Eip1559TransactionRequest, NameOrAddress, U256}; use eyre::ContextCompat; +use futures::stream::FuturesUnordered; +use futures::StreamExt; use crate::app::App; +use crate::broadcast_utils::should_send_relayer_transactions; +use crate::db::TxForEscalation; +use crate::types::RelayerInfo; -pub async fn escalate_txs(app: Arc) -> eyre::Result<()> { +pub async fn escalate_txs_task(app: Arc) -> eyre::Result<()> { loop { - let txs_for_escalation = app - .db - .fetch_txs_for_escalation(app.config.service.escalation_interval) - .await?; - - for tx in txs_for_escalation { - tracing::info!(tx.id, "Escalating tx"); - - let middleware = app - .fetch_signer_middleware(tx.chain_id, tx.key_id.clone()) - .await?; - - let escalation = tx.escalation_count + 1; - - let estimates = app - .db - .get_latest_block_fees_by_chain_id(tx.chain_id) - .await? - .context("Missing block")?; - - // Min increase of 20% on the priority fee required for a replacement tx - let increased_gas_price_percentage = - U256::from(100 + (10 * (1 + escalation))); - - let factor = U256::from(100); - - let max_priority_fee_per_gas_increase = - tx.initial_max_priority_fee_per_gas.0 - * increased_gas_price_percentage - / factor; - - // TODO: Add limits per network - let max_priority_fee_per_gas = - tx.initial_max_priority_fee_per_gas.0 - + max_priority_fee_per_gas_increase; - - let max_fee_per_gas = - estimates.base_fee_per_gas + max_priority_fee_per_gas; - - let eip1559_tx = Eip1559TransactionRequest { - from: None, - to: Some(NameOrAddress::from(Address::from(tx.tx_to.0))), - gas: Some(tx.gas_limit.0), - value: Some(tx.value.0), - data: Some(tx.data.into()), - nonce: Some(tx.nonce.into()), - access_list: AccessList::default(), - max_priority_fee_per_gas: Some(max_priority_fee_per_gas), - max_fee_per_gas: Some(max_fee_per_gas), - chain_id: Some(tx.chain_id.into()), - }; - - let pending_tx = middleware - .send_transaction(TypedTransaction::Eip1559(eip1559_tx), None) - .await?; - - let tx_hash = pending_tx.tx_hash(); - - tracing::info!(?tx.id, ?tx_hash, "Tx escalated"); - - app.db - .escalate_tx( - &tx.id, - tx_hash, - max_fee_per_gas, - max_priority_fee_per_gas, - ) - .await?; - } + escalate_txs(&app).await?; tokio::time::sleep(app.config.service.escalation_interval).await; } } + +#[tracing::instrument(skip(app))] +async fn escalate_txs(app: &App) -> eyre::Result<()> { + tracing::info!("Escalating transactions"); + + let txs_for_escalation = app + .db + .get_txs_for_escalation(app.config.service.escalation_interval) + .await?; + + tracing::info!("Got {} transactions to escalate", txs_for_escalation.len()); + + let txs_for_escalation = split_txs_per_relayer(txs_for_escalation); + + let mut futures = FuturesUnordered::new(); + + for (relayer_id, txs) in txs_for_escalation { + futures.push(escalate_relayer_txs(app, relayer_id, txs)); + } + + while let Some(result) = futures.next().await { + if let Err(err) = result { + tracing::error!(error = ?err, "Failed escalating txs"); + } + } + + Ok(()) +} + +#[tracing::instrument(skip(app, txs))] +async fn escalate_relayer_txs( + app: &App, + relayer_id: String, + txs: Vec, +) -> eyre::Result<()> { + let relayer = app.db.get_relayer(&relayer_id).await?; + + if txs.is_empty() { + tracing::info!("No transactions to escalate"); + } + + for tx in txs { + escalate_relayer_tx(app, &relayer, tx).await?; + } + + Ok(()) +} + +#[tracing::instrument(skip(app, relayer, tx), fields(tx_id = tx.id))] +async fn escalate_relayer_tx( + app: &App, + relayer: &RelayerInfo, + tx: TxForEscalation, +) -> eyre::Result<()> { + if !should_send_relayer_transactions(app, relayer).await? { + tracing::warn!(relayer_id = relayer.id, "Skipping relayer escalations"); + + return Ok(()); + } + + tracing::info!( + tx_id = tx.id, + escalation_count = tx.escalation_count, + "Escalating transaction" + ); + + let escalation = tx.escalation_count + 1; + + let middleware = app + .signer_middleware(tx.chain_id, tx.key_id.clone()) + .await?; + + tracing::info!("Escalating transaction - got middleware"); + + let fees = app + .db + .get_latest_block_fees_by_chain_id(tx.chain_id) + .await? + .context("Missing block")?; + + tracing::info!("Escalating transaction - got block fees"); + + // Min increase of 20% on the priority fee required for a replacement tx + let factor = U256::from(100); + let increased_gas_price_percentage = + factor + U256::from(20 * (1 + escalation)); + + let initial_max_priority_fee_per_gas = + tx.initial_max_priority_fee_per_gas.0; + + let initial_max_fee_per_gas = tx.initial_max_fee_per_gas.0; + + let max_priority_fee_per_gas = initial_max_priority_fee_per_gas + * increased_gas_price_percentage + / factor; + + let max_fee_per_gas = + max_priority_fee_per_gas + fees.fee_estimates.base_fee_per_gas; + + let eip1559_tx = Eip1559TransactionRequest { + from: None, + to: Some(NameOrAddress::from(Address::from(tx.tx_to.0))), + gas: Some(tx.gas_limit.0), + value: Some(tx.value.0), + data: Some(tx.data.into()), + nonce: Some(tx.nonce.into()), + access_list: AccessList::default(), + max_priority_fee_per_gas: Some(max_priority_fee_per_gas), + max_fee_per_gas: Some(max_fee_per_gas), + chain_id: Some(tx.chain_id.into()), + }; + + tracing::info!("Escalating transaction - assembled tx"); + + let pending_tx = middleware + .send_transaction(TypedTransaction::Eip1559(eip1559_tx), None) + .await; + + tracing::info!("Escalating transaction - sent tx"); + + let pending_tx = match pending_tx { + Ok(pending_tx) => pending_tx, + Err(err) => { + tracing::error!(tx_id = tx.id, error = ?err, "Failed to escalate transaction"); + return Ok(()); + } + }; + + tracing::info!("Escalating transaction - got pending tx"); + + let tx_hash = pending_tx.tx_hash(); + + tracing::info!( + tx_id = tx.id, + ?tx_hash, + ?initial_max_priority_fee_per_gas, + ?initial_max_fee_per_gas, + ?max_priority_fee_per_gas, + ?max_fee_per_gas, + ?pending_tx, + "Escalated transaction" + ); + + app.db + .escalate_tx(&tx.id, tx_hash, max_fee_per_gas, max_fee_per_gas) + .await?; + + tracing::info!(tx_id = tx.id, "Escalated transaction saved"); + + Ok(()) +} + +fn split_txs_per_relayer( + txs: Vec, +) -> HashMap> { + let mut txs_per_relayer = HashMap::new(); + + for tx in txs { + let relayer_id = tx.relayer_id.clone(); + + let txs_for_relayer = + txs_per_relayer.entry(relayer_id).or_insert_with(Vec::new); + + txs_for_relayer.push(tx); + } + + for (_, txs) in txs_per_relayer.iter_mut() { + txs.sort_by_key(|tx| tx.escalation_count); + } + + txs_per_relayer +} diff --git a/src/tasks/finalize.rs b/src/tasks/finalize.rs new file mode 100644 index 0000000..9ee3d87 --- /dev/null +++ b/src/tasks/finalize.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; +use std::time::Duration; + +use crate::app::App; + +const TIME_BETWEEN_FINALIZATIONS_SECONDS: i64 = 60; + +pub async fn finalize_txs(app: Arc) -> eyre::Result<()> { + loop { + let finalization_timestamp = + chrono::Utc::now() - chrono::Duration::seconds(60 * 60); + + tracing::info!( + "Finalizing txs mined before {}", + finalization_timestamp + ); + + app.db.finalize_txs(finalization_timestamp).await?; + + tokio::time::sleep(Duration::from_secs( + TIME_BETWEEN_FINALIZATIONS_SECONDS as u64, + )) + .await; + } +} diff --git a/src/tasks/handle_reorgs.rs b/src/tasks/handle_reorgs.rs new file mode 100644 index 0000000..a18aa15 --- /dev/null +++ b/src/tasks/handle_reorgs.rs @@ -0,0 +1,31 @@ +use std::sync::Arc; + +use crate::app::App; + +pub async fn handle_hard_reorgs(app: Arc) -> eyre::Result<()> { + loop { + tracing::info!("Handling hard reorgs"); + + let reorged_txs = app.db.handle_hard_reorgs().await?; + + for tx in reorged_txs { + tracing::info!(tx_id = tx, "Transaction hard reorged"); + } + + tokio::time::sleep(app.config.service.hard_reorg_interval).await; + } +} + +pub async fn handle_soft_reorgs(app: Arc) -> eyre::Result<()> { + loop { + tracing::info!("Handling soft reorgs"); + + let txs = app.db.handle_soft_reorgs().await?; + + for tx in txs { + tracing::info!(tx_id = tx, "Transaction soft reorged"); + } + + tokio::time::sleep(app.config.service.soft_reorg_interval).await; + } +} diff --git a/src/tasks/index.rs b/src/tasks/index.rs index 77fd7f5..49eaa8b 100644 --- a/src/tasks/index.rs +++ b/src/tasks/index.rs @@ -1,124 +1,256 @@ use std::sync::Arc; use std::time::Duration; +use chrono::{DateTime, Utc}; use ethers::providers::{Http, Middleware, Provider}; -use ethers::types::{Block, BlockNumber, H256, U256}; -use eyre::ContextCompat; +use ethers::types::{Block, BlockNumber, H256}; +use eyre::{Context, ContextCompat}; +use futures::stream::FuturesUnordered; +use futures::StreamExt; use crate::app::App; use crate::broadcast_utils::gas_estimation::{ estimate_percentile_fees, FeesEstimate, }; -use crate::db::BlockTxStatus; +use crate::types::RelayerInfo; const BLOCK_FEE_HISTORY_SIZE: usize = 10; -const TRAILING_BLOCK_OFFSET: u64 = 5; const FEE_PERCENTILES: [f64; 5] = [5.0, 25.0, 50.0, 75.0, 95.0]; +const TIME_BETWEEN_FEE_ESTIMATION_SECONDS: u64 = 30; -pub async fn index_blocks(app: Arc) -> eyre::Result<()> { +const GAS_PRICE_FOR_METRICS_FACTOR: f64 = 1e-9; + +pub async fn index_chain(app: Arc, chain_id: u64) -> eyre::Result<()> { loop { - let next_block_numbers = app.db.get_next_block_numbers().await?; - - // TODO: Parallelize - for (block_number, chain_id) in next_block_numbers { - let chain_id = U256::from(chain_id); - let rpc = app - .rpcs - .get(&chain_id) - .context("Missing RPC for chain id")?; - - if let Some((block, fee_estimates)) = - fetch_block_with_fee_estimates(rpc, block_number).await? - { - app.db - .save_block( - block_number, - chain_id.as_u64(), - &block.transactions, - &fee_estimates, - BlockTxStatus::Mined, - ) - .await?; - - let relayer_addresses = - app.db.fetch_relayer_addresses(chain_id.as_u64()).await?; - - // TODO: Parallelize - for relayer_address in relayer_addresses { - let tx_count = rpc - .get_transaction_count(relayer_address, None) - .await?; - - app.db - .update_relayer_nonce( - chain_id.as_u64(), - relayer_address, - tx_count.as_u64(), - ) - .await?; - } - - if block_number > TRAILING_BLOCK_OFFSET { - let (block, fee_estimates) = - fetch_block_with_fee_estimates( - rpc, - block_number - TRAILING_BLOCK_OFFSET, - ) - .await? - .context("Missing trailing block")?; - - app.db - .save_block( - block_number, - chain_id.as_u64(), - &block.transactions, - &fee_estimates, - BlockTxStatus::Finalized, - ) - .await?; - } - } else { - tokio::time::sleep(Duration::from_secs(5)).await; - } + let ws_rpc = app.ws_provider(chain_id).await?; + let rpc = app.http_provider(chain_id).await?; + + // Subscribe to new block with the WS client which uses an unbounded receiver, buffering the stream + let mut blocks_stream = ws_rpc.subscribe_blocks().await?; + + // Get the first block from the stream, backfilling any missing blocks from the latest block in the db to the chain head + if let Some(latest_block) = blocks_stream.next().await { + backfill_to_block(app.clone(), chain_id, &rpc, latest_block) + .await?; } - app.db.update_transactions(BlockTxStatus::Mined).await?; - app.db.update_transactions(BlockTxStatus::Finalized).await?; + // Index incoming blocks from the stream + while let Some(block) = blocks_stream.next().await { + index_block(app.clone(), chain_id, &rpc, block).await?; + } } } -pub async fn fetch_block_with_fee_estimates( +#[tracing::instrument(skip(app, rpc, block))] +pub async fn index_block( + app: Arc, + chain_id: u64, rpc: &Provider, - block_id: impl Into, -) -> eyre::Result, FeesEstimate)>> { - let block_id = block_id.into(); + block: Block, +) -> eyre::Result<()> { + let block_number = block.number.context("Missing block number")?.as_u64(); - let block = rpc.get_block(block_id).await?; + tracing::info!(chain_id, block_number, "Indexing block"); - let Some(block) = block else { - return Ok(None); - }; + let block_timestamp_seconds = block.timestamp.as_u64(); + let block_timestamp = + DateTime::::from_timestamp(block_timestamp_seconds as i64, 0) + .context("Invalid timestamp")?; - let fee_history = rpc - .fee_history(BLOCK_FEE_HISTORY_SIZE, block_id, &FEE_PERCENTILES) + let block = rpc + .get_block(block_number) + .await? + .context("Missing block")?; + + app.db + .save_block( + block.number.unwrap().as_u64(), + chain_id, + block_timestamp, + &block.transactions, + ) .await?; - let fee_estimates = estimate_percentile_fees(&fee_history)?; + let mined_txs = app.db.mine_txs(chain_id).await?; + + let metric_labels: [(&str, String); 1] = + [("chain_id", chain_id.to_string())]; + for tx in mined_txs { + tracing::info!( + tx_id = tx.0, + tx_hash = ?tx.1, + "Tx mined" + ); + + metrics::counter!("tx_mined", &metric_labels).increment(1); + } + + let relayers = app.db.get_relayers_by_chain_id(chain_id).await?; - Ok(Some((block, fee_estimates))) + update_relayer_nonces(&relayers, &app, rpc, chain_id).await?; + + Ok(()) } -pub async fn fetch_block( +#[tracing::instrument(skip(app, rpc, latest_block))] +pub async fn backfill_to_block( + app: Arc, + chain_id: u64, rpc: &Provider, - block_id: impl Into, -) -> eyre::Result, FeesEstimate)>> { - let block_id = block_id.into(); + latest_block: Block, +) -> eyre::Result<()> { + // Get the latest block from the db + if let Some(latest_db_block_number) = + app.db.get_latest_block_number(chain_id).await? + { + let next_block_number: u64 = latest_db_block_number + 1; + + // Get the first block from the stream and backfill any missing blocks + let latest_block_number = latest_block + .number + .context("Missing block number")? + .as_u64(); - let block = rpc.get_block(block_id).await?; + if latest_block_number > next_block_number { + // Backfill blocks between the last synced block and the chain head, non inclusive + for block_number in next_block_number..latest_block_number { + let block = rpc + .get_block::(block_number.into()) + .await? + .context(format!( + "Could not get block at height {}", + block_number + ))?; - let Some(block) = block else { - return Ok(None); + index_block(app.clone(), chain_id, rpc, block).await?; + } + } + + // Index the latest block after backfilling + index_block(app.clone(), chain_id, rpc, latest_block).await?; }; + Ok(()) +} + +pub async fn estimate_gas(app: Arc, chain_id: u64) -> eyre::Result<()> { + let rpc = app.http_provider(chain_id).await?; + + loop { + let latest_block_number = app + .db + .get_latest_block_number_without_fee_estimates(chain_id) + .await?; + + let Some(latest_block_number) = latest_block_number else { + tracing::info!(chain_id, "No blocks to estimate fees for"); + + tokio::time::sleep(Duration::from_secs(2)).await; + + continue; + }; + + tracing::info!( + chain_id, + block_number = latest_block_number, + "Estimating fees" + ); + + let fee_estimates = get_block_fee_estimates(&rpc, latest_block_number) + .await + .context("Failed to fetch fee estimates")?; + + let gas_price = rpc.get_gas_price().await?; + + app.db + .save_block_fees( + latest_block_number, + chain_id, + &fee_estimates, + gas_price, + ) + .await?; + + let labels = [("chain_id", chain_id.to_string())]; + metrics::gauge!("gas_price", &labels) + .set(gas_price.as_u64() as f64 * GAS_PRICE_FOR_METRICS_FACTOR); + metrics::gauge!("base_fee_per_gas", &labels).set( + fee_estimates.base_fee_per_gas.as_u64() as f64 + * GAS_PRICE_FOR_METRICS_FACTOR, + ); + + for (i, percentile) in FEE_PERCENTILES.iter().enumerate() { + let percentile_fee = fee_estimates.percentile_fees[i]; + + metrics::gauge!( + "percentile_fee", + &[ + ("chain_id", chain_id.to_string()), + ("percentile", percentile.to_string()), + ] + ) + .set(percentile_fee.as_u64() as f64 * GAS_PRICE_FOR_METRICS_FACTOR); + } + + tokio::time::sleep(Duration::from_secs( + TIME_BETWEEN_FEE_ESTIMATION_SECONDS, + )) + .await; + } +} + +async fn update_relayer_nonces( + relayers: &[RelayerInfo], + app: &App, + rpc: &Provider, + chain_id: u64, +) -> Result<(), eyre::Error> { + let mut futures = FuturesUnordered::new(); + + for relayer in relayers { + futures.push(update_relayer_nonce(app, rpc, relayer, chain_id)); + } + + while let Some(result) = futures.next().await { + result?; + } + + Ok(()) +} + +#[tracing::instrument(skip(app, rpc, relayer), fields(relayer_id = relayer.id))] +async fn update_relayer_nonce( + app: &App, + rpc: &Provider, + relayer: &RelayerInfo, + chain_id: u64, +) -> eyre::Result<()> { + let tx_count = rpc.get_transaction_count(relayer.address.0, None).await?; + + if tx_count.as_u64() == relayer.current_nonce { + return Ok(()); + } + + tracing::info!( + relayer_id = relayer.id, + current_nonce = %relayer.current_nonce, + nonce = %relayer.nonce, + new_current_nonce = %tx_count.as_u64(), + relayer_address = ?relayer.address.0, + "Updating relayer nonce" + ); + + app.db + .update_relayer_nonce(chain_id, relayer.address.0, tx_count.as_u64()) + .await?; + + Ok(()) +} + +pub async fn get_block_fee_estimates( + rpc: &Provider, + block_id: impl Into, +) -> eyre::Result { + let block_id = block_id.into(); let fee_history = rpc .fee_history(BLOCK_FEE_HISTORY_SIZE, block_id, &FEE_PERCENTILES) @@ -126,5 +258,5 @@ pub async fn fetch_block( let fee_estimates = estimate_percentile_fees(&fee_history)?; - Ok(Some((block, fee_estimates))) + Ok(fee_estimates) } diff --git a/src/tasks/metrics.rs b/src/tasks/metrics.rs new file mode 100644 index 0000000..f8e989b --- /dev/null +++ b/src/tasks/metrics.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; +use std::time::Duration; + +use crate::app::App; + +const EMIT_METRICS_INTERVAL: Duration = Duration::from_secs(1); + +pub async fn emit_metrics(app: Arc) -> eyre::Result<()> { + loop { + let chain_ids = app.db.get_network_chain_ids().await?; + + for chain_id in chain_ids { + let stats = app.db.get_stats(chain_id).await?; + + // TODO: Add labels for env, etc. + let labels = [("chain_id", chain_id.to_string())]; + + metrics::gauge!("pending_txs", &labels) + .set(stats.pending_txs as f64); + metrics::gauge!("mined_txs", &labels).set(stats.mined_txs as f64); + metrics::gauge!("finalized_txs", &labels) + .set(stats.finalized_txs as f64); + metrics::gauge!("total_indexed_blocks", &labels) + .set(stats.total_indexed_blocks as f64); + metrics::gauge!("block_txs", &labels).set(stats.block_txs as f64); + } + + tokio::time::sleep(EMIT_METRICS_INTERVAL).await; + } +} diff --git a/src/tasks/prune.rs b/src/tasks/prune.rs new file mode 100644 index 0000000..c505eb0 --- /dev/null +++ b/src/tasks/prune.rs @@ -0,0 +1,53 @@ +use std::sync::Arc; +use std::time::Duration; + +use chrono::Utc; + +use crate::app::App; + +const BLOCK_PRUNING_INTERVAL: Duration = Duration::from_secs(60); +const TX_PRUNING_INTERVAL: Duration = Duration::from_secs(60); + +const fn minutes(seconds: i64) -> i64 { + seconds * 60 +} + +const fn hours(seconds: i64) -> i64 { + minutes(seconds) * 60 +} + +const fn days(seconds: i64) -> i64 { + hours(seconds) * 24 +} + +// TODO: This should be a per network setting +const BLOCK_PRUNE_AGE_SECONDS: i64 = days(7); +// NOTE: We must prune txs earlier than blocks +// as a missing block tx indicates a hard reorg +const TX_PRUNE_AGE_SECONDS: i64 = days(5); + +pub async fn prune_blocks(app: Arc) -> eyre::Result<()> { + loop { + let prune_age = chrono::Duration::seconds(BLOCK_PRUNE_AGE_SECONDS); + let block_prune_timestamp = Utc::now() - prune_age; + + tracing::info!(?block_prune_timestamp, "Pruning blocks"); + + app.db.prune_blocks(block_prune_timestamp).await?; + + tokio::time::sleep(BLOCK_PRUNING_INTERVAL).await; + } +} + +pub async fn prune_txs(app: Arc) -> eyre::Result<()> { + loop { + let prune_age = chrono::Duration::seconds(TX_PRUNE_AGE_SECONDS); + let tx_prune_timestamp = Utc::now() - prune_age; + + tracing::info!(?tx_prune_timestamp, "Pruning txs"); + + app.db.prune_txs(tx_prune_timestamp).await?; + + tokio::time::sleep(TX_PRUNING_INTERVAL).await; + } +} diff --git a/src/types.rs b/src/types.rs new file mode 100644 index 0000000..657e245 --- /dev/null +++ b/src/types.rs @@ -0,0 +1,153 @@ +use serde::{Deserialize, Serialize}; +use sqlx::prelude::FromRow; +use sqlx::types::Json; + +use crate::db::data::{AddressWrapper, U256Wrapper}; + +#[derive(Deserialize, Serialize, Debug, Clone, Copy, Default, sqlx::Type)] +#[serde(rename_all = "camelCase")] +#[sqlx(type_name = "transaction_priority", rename_all = "camelCase")] +pub enum TransactionPriority { + // 5th percentile + Slowest = 0, + // 25th percentile + Slow = 1, + // 50th percentile + #[default] + Regular = 2, + // 75th percentile + Fast = 3, + // 95th percentile + Fastest = 4, +} + +impl TransactionPriority { + pub fn to_percentile_index(self) -> usize { + self as usize + } +} + +#[derive(Deserialize, Serialize, Debug, Clone, FromRow)] +#[serde(rename_all = "camelCase")] +pub struct RelayerInfo { + pub id: String, + pub name: String, + #[sqlx(try_from = "i64")] + pub chain_id: u64, + pub key_id: String, + pub address: AddressWrapper, + #[sqlx(try_from = "i64")] + pub nonce: u64, + #[sqlx(try_from = "i64")] + pub current_nonce: u64, + #[sqlx(try_from = "i64")] + pub max_inflight_txs: u64, + #[sqlx(try_from = "i64")] + pub max_queued_txs: u64, + pub gas_price_limits: Json>, + pub enabled: bool, +} + +#[derive(Deserialize, Serialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +pub struct RelayerUpdate { + #[serde(default)] + pub relayer_name: Option, + #[serde(default)] + pub max_inflight_txs: Option, + #[serde(default)] + pub max_queued_txs: Option, + #[serde(default)] + pub gas_price_limits: Option>, + #[serde(default)] + pub enabled: Option, +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct RelayerGasPriceLimit { + pub value: U256Wrapper, + pub chain_id: i64, +} + +impl RelayerUpdate { + pub fn with_relayer_name(mut self, relayer_name: String) -> Self { + self.relayer_name = Some(relayer_name); + self + } + + pub fn with_max_inflight_txs(mut self, max_inflight_txs: u64) -> Self { + self.max_inflight_txs = Some(max_inflight_txs); + self + } + + pub fn with_max_queued_txs(mut self, max_queued_txs: u64) -> Self { + self.max_queued_txs = Some(max_queued_txs); + self + } + + pub fn with_gas_price_limits( + mut self, + gas_price_limits: Vec, + ) -> Self { + self.gas_price_limits = Some(gas_price_limits); + self + } + + pub fn with_enabled(mut self, enabled: bool) -> Self { + self.enabled = Some(enabled); + self + } +} + +#[cfg(test)] +mod tests { + use ethers::types::{Address, U256}; + + use super::*; + + #[test] + fn relayer_info_serialize() { + let info = RelayerInfo { + id: "id".to_string(), + name: "name".to_string(), + chain_id: 1, + key_id: "key_id".to_string(), + address: AddressWrapper(Address::zero()), + nonce: 0, + current_nonce: 0, + max_inflight_txs: 0, + max_queued_txs: 0, + gas_price_limits: Json(vec![RelayerGasPriceLimit { + value: U256Wrapper(U256::zero()), + chain_id: 1, + }]), + enabled: true, + }; + + let json = serde_json::to_string_pretty(&info).unwrap(); + + let expected = indoc::indoc! {r#" + { + "id": "id", + "name": "name", + "chainId": 1, + "keyId": "key_id", + "address": "0x0000000000000000000000000000000000000000", + "nonce": 0, + "currentNonce": 0, + "maxInflightTxs": 0, + "maxQueuedTxs": 0, + "gasPriceLimits": [ + { + "value": "0x0", + "chainId": 1 + } + ], + "enabled": true + } + "#}; + + assert_eq!(json.trim(), expected.trim()); + } +} diff --git a/tests/common/anvil_builder.rs b/tests/common/anvil_builder.rs new file mode 100644 index 0000000..ac1fd55 --- /dev/null +++ b/tests/common/anvil_builder.rs @@ -0,0 +1,67 @@ +use std::time::Duration; + +use ethers::providers::Middleware; +use ethers::types::{Eip1559TransactionRequest, U256}; +use ethers::utils::{Anvil, AnvilInstance}; + +use super::prelude::{ + setup_middleware, DEFAULT_ANVIL_ACCOUNT, DEFAULT_ANVIL_BLOCK_TIME, + SECONDARY_ANVIL_PRIVATE_KEY, +}; + +#[derive(Debug, Clone, Default)] +pub struct AnvilBuilder { + pub block_time: Option, + pub port: Option, +} + +impl AnvilBuilder { + pub fn block_time(mut self, block_time: u64) -> Self { + self.block_time = Some(block_time); + self + } + + pub fn port(mut self, port: u16) -> Self { + self.port = Some(port); + self + } + + pub async fn spawn(self) -> eyre::Result { + let mut anvil = Anvil::new(); + + let block_time = if let Some(block_time) = self.block_time { + block_time + } else { + DEFAULT_ANVIL_BLOCK_TIME + }; + anvil = anvil.block_time(block_time); + + if let Some(port) = self.port { + anvil = anvil.port(port); + } + + let anvil = anvil.spawn(); + + let middleware = + setup_middleware(anvil.endpoint(), SECONDARY_ANVIL_PRIVATE_KEY) + .await?; + + // Wait for the chain to start and produce at least one block + tokio::time::sleep(Duration::from_secs(block_time)).await; + + // We need to seed some transactions so we can get fee estimates on the first block + middleware + .send_transaction( + Eip1559TransactionRequest { + to: Some(DEFAULT_ANVIL_ACCOUNT.into()), + value: Some(U256::from(100u64)), + ..Default::default() + }, + None, + ) + .await? + .await?; + + Ok(anvil) + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 0b25bcc..c514749 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,23 +1,14 @@ #![allow(dead_code)] // Needed because this module is imported as module by many test crates -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use std::time::Duration; use ethers::core::k256::ecdsa::SigningKey; use ethers::middleware::SignerMiddleware; use ethers::providers::{Http, Middleware, Provider}; -use ethers::signers::LocalWallet; -use ethers::types::{Address, Eip1559TransactionRequest, H160, U256}; -use ethers_signers::Signer; -use fake_rpc::DoubleAnvil; +use ethers::signers::{LocalWallet, Signer}; +use ethers::types::{Address, H160, U256}; use postgres_docker_utils::DockerContainerGuard; -use service::config::{ - Config, DatabaseConfig, KeysConfig, LocalKeysConfig, RpcConfig, - ServerConfig, TxSitterConfig, -}; -use service::service::Service; -use tokio::task::JoinHandle; use tracing::level_filters::LevelFilter; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; @@ -25,6 +16,33 @@ use tracing_subscriber::EnvFilter; pub type AppMiddleware = SignerMiddleware>, LocalWallet>; +mod anvil_builder; +mod service_builder; + +pub use self::anvil_builder::AnvilBuilder; +pub use self::service_builder::ServiceBuilder; + +#[allow(unused_imports)] +pub mod prelude { + pub use std::time::Duration; + + pub use ethers::prelude::{Http, Provider}; + pub use ethers::providers::Middleware; + pub use ethers::types::{Eip1559TransactionRequest, H256, U256}; + pub use ethers::utils::parse_units; + pub use futures::stream::FuturesUnordered; + pub use futures::StreamExt; + pub use tx_sitter::api_key::ApiKey; + pub use tx_sitter::client::TxSitterClient; + pub use tx_sitter::server::routes::relayer::{ + CreateApiKeyResponse, CreateRelayerRequest, CreateRelayerResponse, + }; + pub use tx_sitter::server::routes::transaction::SendTxRequest; + pub use url::Url; + + pub use super::*; +} + pub const DEFAULT_ANVIL_ACCOUNT: Address = H160(hex_literal::hex!( "f39Fd6e51aad88F6F4ce6aB8827279cffFb92266" )); @@ -33,23 +51,18 @@ pub const DEFAULT_ANVIL_PRIVATE_KEY: &[u8] = &hex_literal::hex!( "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" ); +pub const SECONDARY_ANVIL_PRIVATE_KEY: &[u8] = &hex_literal::hex!( + "59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" +); + pub const ARBITRARY_ADDRESS: Address = H160(hex_literal::hex!( "1Ed53d680B8890DAe2a63f673a85fFDE1FD5C7a2" )); pub const DEFAULT_ANVIL_CHAIN_ID: u64 = 31337; +pub const DEFAULT_ANVIL_BLOCK_TIME: u64 = 2; -pub struct DoubleAnvilHandle { - pub double_anvil: Arc, - local_addr: SocketAddr, - server_handle: JoinHandle>, -} - -impl DoubleAnvilHandle { - pub fn local_addr(&self) -> String { - self.local_addr.to_string() - } -} +pub const DEFAULT_RELAYER_ID: &str = "1b908a34-5dc1-4d2d-a146-5eb46e975830"; pub fn setup_tracing() { tracing_subscriber::registry() @@ -58,7 +71,7 @@ pub fn setup_tracing() { EnvFilter::builder() .with_default_directive(LevelFilter::INFO.into()) // Logging from fake_rpc can get very messy so we set it to warn only - .parse_lossy("info,fake_rpc=warn"), + .parse_lossy("info,tx_sitter=debug,fake_rpc=warn"), ) .init(); } @@ -71,79 +84,11 @@ pub async fn setup_db() -> eyre::Result<(String, DockerContainerGuard)> { Ok((url, db_container)) } -pub async fn setup_double_anvil() -> eyre::Result { - let (double_anvil, server) = fake_rpc::serve(0).await; - - let local_addr = server.local_addr(); - - let server_handle = tokio::spawn(async move { - server.await?; - Ok(()) - }); - - let middleware = setup_middleware( - format!("http://{local_addr}"), - DEFAULT_ANVIL_PRIVATE_KEY, - ) - .await?; - - // We need to seed some transactions so we can get fee estimates on the first block - middleware - .send_transaction( - Eip1559TransactionRequest { - to: Some(DEFAULT_ANVIL_ACCOUNT.into()), - value: Some(U256::from(100u64)), - ..Default::default() - }, - None, - ) - .await? - .await?; - - Ok(DoubleAnvilHandle { - double_anvil, - local_addr, - server_handle, - }) -} - -pub async fn setup_service( - rpc_url: &str, - db_connection_url: &str, - escalation_interval: Duration, -) -> eyre::Result { - println!("rpc_url.to_string() = {}", rpc_url); - - let config = Config { - service: TxSitterConfig { - escalation_interval, - }, - server: ServerConfig { - host: SocketAddr::V4(SocketAddrV4::new( - Ipv4Addr::new(127, 0, 0, 1), - 0, - )), - disable_auth: true, - }, - rpc: RpcConfig { - rpcs: vec![format!("http://{}", rpc_url.to_string())], - }, - database: DatabaseConfig { - connection_string: db_connection_url.to_string(), - }, - keys: KeysConfig::Local(LocalKeysConfig {}), - }; - - let service = Service::new(config).await?; - - Ok(service) -} - pub async fn setup_middleware( rpc_url: impl AsRef, private_key: &[u8], ) -> eyre::Result { - let provider = Provider::::new(rpc_url.as_ref().parse()?); + let provider = setup_provider(rpc_url).await?; let wallet = LocalWallet::from(SigningKey::from_slice(private_key)?) .with_chain_id(provider.get_chainid().await?.as_u64()); @@ -152,3 +97,31 @@ pub async fn setup_middleware( Ok(middleware) } + +pub async fn setup_provider( + rpc_url: impl AsRef, +) -> eyre::Result> { + let provider = Provider::::new(rpc_url.as_ref().parse()?); + + Ok(provider) +} + +pub async fn await_balance( + provider: &Provider, + expected_balance: U256, + address: Address, +) -> eyre::Result<()> { + for _ in 0..50 { + let balance = provider.get_balance(address, None).await?; + + tracing::info!(?balance, ?expected_balance, "Checking balance"); + + if balance >= expected_balance { + return Ok(()); + } else { + tokio::time::sleep(Duration::from_secs(5)).await; + } + } + + eyre::bail!("Balance did not reach expected value"); +} diff --git a/tests/common/service_builder.rs b/tests/common/service_builder.rs new file mode 100644 index 0000000..b76e92e --- /dev/null +++ b/tests/common/service_builder.rs @@ -0,0 +1,99 @@ +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::time::Duration; + +use ethers::utils::AnvilInstance; +use tx_sitter::api_key::ApiKey; +use tx_sitter::client::TxSitterClient; +use tx_sitter::config::{ + Config, DatabaseConfig, KeysConfig, LocalKeysConfig, Predefined, + PredefinedNetwork, PredefinedRelayer, ServerConfig, TxSitterConfig, +}; +use tx_sitter::service::Service; + +use super::prelude::{ + DEFAULT_ANVIL_CHAIN_ID, DEFAULT_ANVIL_PRIVATE_KEY, DEFAULT_RELAYER_ID, +}; + +pub struct ServiceBuilder { + escalation_interval: Duration, + soft_reorg_interval: Duration, + hard_reorg_interval: Duration, +} + +impl Default for ServiceBuilder { + fn default() -> Self { + Self { + escalation_interval: Duration::from_secs(30), + soft_reorg_interval: Duration::from_secs(45), + hard_reorg_interval: Duration::from_secs(60), + } + } +} + +impl ServiceBuilder { + pub fn escalation_interval(mut self, interval: Duration) -> Self { + self.escalation_interval = interval; + self + } + + pub fn soft_reorg_interval(mut self, interval: Duration) -> Self { + self.soft_reorg_interval = interval; + self + } + + pub fn hard_reorg_interval(mut self, interval: Duration) -> Self { + self.hard_reorg_interval = interval; + self + } + + pub async fn build( + self, + anvil: &AnvilInstance, + db_url: &str, + ) -> eyre::Result<(Service, TxSitterClient)> { + let anvil_private_key = hex::encode(DEFAULT_ANVIL_PRIVATE_KEY); + + let config = Config { + service: TxSitterConfig { + escalation_interval: self.escalation_interval, + soft_reorg_interval: self.soft_reorg_interval, + hard_reorg_interval: self.hard_reorg_interval, + datadog_enabled: false, + statsd_enabled: false, + predefined: Some(Predefined { + network: PredefinedNetwork { + chain_id: DEFAULT_ANVIL_CHAIN_ID, + name: "Anvil".to_string(), + http_rpc: anvil.endpoint(), + ws_rpc: anvil.ws_endpoint(), + }, + relayer: PredefinedRelayer { + name: "Anvil".to_string(), + id: DEFAULT_RELAYER_ID.to_string(), + key_id: anvil_private_key, + chain_id: DEFAULT_ANVIL_CHAIN_ID, + // TODO: Use this key in tests + api_key: ApiKey::random(DEFAULT_RELAYER_ID), + }, + }), + }, + server: ServerConfig { + host: SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(127, 0, 0, 1), + 0, + )), + username: None, + password: None, + }, + database: DatabaseConfig::connection_string(db_url), + keys: KeysConfig::Local(LocalKeysConfig::default()), + }; + + let service = Service::new(config).await?; + + let client = + TxSitterClient::new(format!("http://{}", service.local_addr())); + + Ok((service, client)) + } +} diff --git a/tests/create_relayer.rs b/tests/create_relayer.rs index 4a0acf6..17a8233 100644 --- a/tests/create_relayer.rs +++ b/tests/create_relayer.rs @@ -1,36 +1,23 @@ -use std::time::Duration; - -use service::server::data::{CreateRelayerRequest, CreateRelayerResponse}; - -use crate::common::*; - mod common; -const ESCALATION_INTERVAL: Duration = Duration::from_secs(30); +use crate::common::prelude::*; #[tokio::test] async fn create_relayer() -> eyre::Result<()> { setup_tracing(); let (db_url, _db_container) = setup_db().await?; - let double_anvil = setup_double_anvil().await?; + let anvil = AnvilBuilder::default().spawn().await?; - let service = - setup_service(&double_anvil.local_addr(), &db_url, ESCALATION_INTERVAL) - .await?; + let (_service, client) = + ServiceBuilder::default().build(&anvil, &db_url).await?; - let addr = service.local_addr(); - - let response = reqwest::Client::new() - .post(&format!("http://{}/1/relayer/create", addr)) - .json(&CreateRelayerRequest { + let CreateRelayerResponse { .. } = client + .create_relayer(&CreateRelayerRequest { name: "Test relayer".to_string(), chain_id: DEFAULT_ANVIL_CHAIN_ID, }) - .send() .await?; - let _response: CreateRelayerResponse = response.json().await?; - Ok(()) } diff --git a/tests/disabled_relayer.rs b/tests/disabled_relayer.rs new file mode 100644 index 0000000..20a1953 --- /dev/null +++ b/tests/disabled_relayer.rs @@ -0,0 +1,53 @@ +mod common; + +use tx_sitter::types::RelayerUpdate; + +use crate::common::prelude::*; + +#[tokio::test] +async fn disabled_relayer() -> eyre::Result<()> { + setup_tracing(); + + let (db_url, _db_container) = setup_db().await?; + let anvil = AnvilBuilder::default().spawn().await?; + + let (_service, client) = + ServiceBuilder::default().build(&anvil, &db_url).await?; + + tracing::info!("Creating relayer"); + let CreateRelayerResponse { relayer_id, .. } = client + .create_relayer(&CreateRelayerRequest { + name: "Test relayer".to_string(), + chain_id: DEFAULT_ANVIL_CHAIN_ID, + }) + .await?; + + tracing::info!("Creating API key"); + let CreateApiKeyResponse { api_key } = + client.create_relayer_api_key(&relayer_id).await?; + + tracing::info!("Disabling relayer"); + client + .update_relayer( + &relayer_id, + RelayerUpdate::default().with_enabled(false), + ) + .await?; + + let value: U256 = parse_units("1", "ether")?.into(); + let response = client + .send_tx( + &api_key, + &SendTxRequest { + to: ARBITRARY_ADDRESS, + value, + gas_limit: U256::from(21_000), + ..Default::default() + }, + ) + .await; + + assert!(response.is_err()); + + Ok(()) +} diff --git a/tests/escalation.rs b/tests/escalation.rs new file mode 100644 index 0000000..cab10c6 --- /dev/null +++ b/tests/escalation.rs @@ -0,0 +1,86 @@ +mod common; + +use crate::common::prelude::*; + +const ESCALATION_INTERVAL: Duration = Duration::from_secs(2); +const ANVIL_BLOCK_TIME: u64 = 6; + +#[tokio::test] +async fn escalation() -> eyre::Result<()> { + setup_tracing(); + + let (db_url, _db_container) = setup_db().await?; + let anvil = AnvilBuilder::default() + .block_time(ANVIL_BLOCK_TIME) + .spawn() + .await?; + + let (_service, client) = ServiceBuilder::default() + .escalation_interval(ESCALATION_INTERVAL) + .build(&anvil, &db_url) + .await?; + + let CreateApiKeyResponse { api_key } = + client.create_relayer_api_key(DEFAULT_RELAYER_ID).await?; + + let provider = setup_provider(anvil.endpoint()).await?; + + // Send a transaction + let value: U256 = parse_units("1", "ether")?.into(); + let tx = client + .send_tx( + &api_key, + &SendTxRequest { + to: ARBITRARY_ADDRESS, + value, + gas_limit: U256::from(21_000), + ..Default::default() + }, + ) + .await?; + + let initial_tx_hash = get_tx_hash(&client, &api_key, &tx.tx_id).await?; + + await_balance(&provider, value).await?; + let final_tx_hash = get_tx_hash(&client, &api_key, &tx.tx_id).await?; + + assert_ne!( + initial_tx_hash, final_tx_hash, + "Escalation should have occurred" + ); + + Ok(()) +} + +async fn await_balance( + provider: &Provider, + value: U256, +) -> eyre::Result<()> { + for _ in 0..24 { + let balance = provider.get_balance(ARBITRARY_ADDRESS, None).await?; + + if balance == value { + return Ok(()); + } else { + tokio::time::sleep(Duration::from_secs(3)).await; + } + } + + eyre::bail!("Balance not updated in time"); +} + +async fn get_tx_hash( + client: &TxSitterClient, + api_key: &ApiKey, + tx_id: &str, +) -> eyre::Result { + loop { + let tx = client.get_tx(api_key, tx_id).await?; + + if let Some(tx_hash) = tx.tx_hash { + return Ok(tx_hash); + } else { + tokio::time::sleep(Duration::from_secs(3)).await; + } + } +} diff --git a/tests/reorg.rs b/tests/reorg.rs new file mode 100644 index 0000000..a25ae9a --- /dev/null +++ b/tests/reorg.rs @@ -0,0 +1,74 @@ +mod common; + +use crate::common::prelude::*; + +#[tokio::test] +async fn reorg() -> eyre::Result<()> { + setup_tracing(); + + let (db_url, _db_container) = setup_db().await?; + let anvil = AnvilBuilder::default().spawn().await?; + let anvil_port = anvil.port(); + + let (_service, client) = ServiceBuilder::default() + .hard_reorg_interval(Duration::from_secs(2)) + .build(&anvil, &db_url) + .await?; + + let CreateApiKeyResponse { api_key } = + client.create_relayer_api_key(DEFAULT_RELAYER_ID).await?; + + let provider = setup_provider(anvil.endpoint()).await?; + + // Send a transaction + let value: U256 = parse_units("1", "ether")?.into(); + client + .send_tx( + &api_key, + &SendTxRequest { + to: ARBITRARY_ADDRESS, + value, + gas_limit: U256::from(21_000), + ..Default::default() + }, + ) + .await?; + + await_balance(&provider, value).await?; + + // Drop anvil to simulate a reorg + tracing::warn!("Dropping anvil & restarting at port {anvil_port}"); + drop(anvil); + + let anvil = AnvilBuilder::default().port(anvil_port).spawn().await?; + let provider = setup_provider(anvil.endpoint()).await?; + + await_balance(&provider, value).await?; + + Ok(()) +} + +async fn await_balance( + provider: &Provider, + value: U256, +) -> eyre::Result<()> { + for _ in 0..24 { + let balance = match provider.get_balance(ARBITRARY_ADDRESS, None).await + { + Ok(balance) => balance, + Err(err) => { + tracing::warn!("Error getting balance: {:?}", err); + tokio::time::sleep(Duration::from_secs(3)).await; + continue; + } + }; + + if balance == value { + return Ok(()); + } else { + tokio::time::sleep(Duration::from_secs(3)).await; + } + } + + eyre::bail!("Balance not updated in time"); +} diff --git a/tests/rpc_access.rs b/tests/rpc_access.rs new file mode 100644 index 0000000..15eed04 --- /dev/null +++ b/tests/rpc_access.rs @@ -0,0 +1,34 @@ +mod common; + +use crate::common::prelude::*; + +#[tokio::test] +async fn rpc_access() -> eyre::Result<()> { + setup_tracing(); + + let (db_url, _db_container) = setup_db().await?; + let anvil = AnvilBuilder::default().spawn().await?; + + let (service, client) = + ServiceBuilder::default().build(&anvil, &db_url).await?; + + let CreateApiKeyResponse { api_key } = + client.create_relayer_api_key(DEFAULT_RELAYER_ID).await?; + + let rpc_url = format!( + "http://{}/1/api/{}/rpc", + service.local_addr(), + api_key.reveal()? + ); + + let provider = Provider::new(Http::new(rpc_url.parse::()?)); + + let latest_block_number = provider.get_block_number().await?; + + let very_future_block = latest_block_number + 1000; + let very_future_block = provider.get_block(very_future_block).await?; + + assert!(very_future_block.is_none()); + + Ok(()) +} diff --git a/tests/send_many_txs.rs b/tests/send_many_txs.rs index 6e2c23f..bb273f6 100644 --- a/tests/send_many_txs.rs +++ b/tests/send_many_txs.rs @@ -1,101 +1,53 @@ -use std::time::Duration; - -use ethers::providers::Middleware; -use ethers::types::{Eip1559TransactionRequest, U256}; -use ethers::utils::parse_units; -use service::server::data::{ - CreateRelayerRequest, CreateRelayerResponse, SendTxRequest, SendTxResponse, -}; - mod common; -use crate::common::*; - -const ESCALATION_INTERVAL: Duration = Duration::from_secs(30); +use crate::common::prelude::*; #[tokio::test] async fn send_many_txs() -> eyre::Result<()> { setup_tracing(); let (db_url, _db_container) = setup_db().await?; - let double_anvil = setup_double_anvil().await?; - - let service = - setup_service(&double_anvil.local_addr(), &db_url, ESCALATION_INTERVAL) - .await?; - - let addr = service.local_addr(); - - let response = reqwest::Client::new() - .post(&format!("http://{}/1/relayer/create", addr)) - .json(&CreateRelayerRequest { - name: "Test relayer".to_string(), - chain_id: DEFAULT_ANVIL_CHAIN_ID, - }) - .send() - .await?; - - let response: CreateRelayerResponse = response.json().await?; + let anvil = AnvilBuilder::default().spawn().await?; - // Fund the relayer - let middleware = setup_middleware( - format!("http://{}", double_anvil.local_addr()), - DEFAULT_ANVIL_PRIVATE_KEY, - ) - .await?; + let (_service, client) = + ServiceBuilder::default().build(&anvil, &db_url).await?; - let amount: U256 = parse_units("1000", "ether")?.into(); + let CreateApiKeyResponse { api_key } = + client.create_relayer_api_key(DEFAULT_RELAYER_ID).await?; - middleware - .send_transaction( - Eip1559TransactionRequest { - to: Some(response.address.into()), - value: Some(amount), - ..Default::default() - }, - None, - ) - .await? - .await?; - - let provider = middleware.provider(); - - let current_balance = provider.get_balance(response.address, None).await?; - assert_eq!(current_balance, amount); + let provider = setup_provider(anvil.endpoint()).await?; // Send a transaction let value: U256 = parse_units("10", "ether")?.into(); let num_transfers = 10; - let relayer_id = response.relayer_id; + let mut tasks = FuturesUnordered::new(); for _ in 0..num_transfers { - let response = reqwest::Client::new() - .post(&format!("http://{}/1/tx/send", addr)) - .json(&SendTxRequest { - relayer_id: relayer_id.clone(), - to: ARBITRARY_ADDRESS, - value, - gas_limit: U256::from(21_000), - ..Default::default() - }) - .send() - .await?; + let client = &client; + tasks.push(async { + client + .send_tx( + &api_key, + &SendTxRequest { + to: ARBITRARY_ADDRESS, + value, + gas_limit: U256::from(21_000), + ..Default::default() + }, + ) + .await?; + + Ok(()) + }); + } - let _response: SendTxResponse = response.json().await?; + while let Some(result) = tasks.next().await { + let result: eyre::Result<()> = result; + result?; } let expected_balance = value * num_transfers; - for _ in 0..50 { - let balance = provider.get_balance(ARBITRARY_ADDRESS, None).await?; - - tracing::info!(?balance, ?expected_balance, "Checking balance"); - - if balance == expected_balance { - return Ok(()); - } else { - tokio::time::sleep(Duration::from_secs(5)).await; - } - } + await_balance(&provider, expected_balance, ARBITRARY_ADDRESS).await?; - panic!("Transactions were not sent") + Ok(()) } diff --git a/tests/send_too_many_txs.rs b/tests/send_too_many_txs.rs new file mode 100644 index 0000000..226cc43 --- /dev/null +++ b/tests/send_too_many_txs.rs @@ -0,0 +1,117 @@ +mod common; + +use tx_sitter::client::ClientError; +use tx_sitter::server::ApiError; +use tx_sitter::types::{RelayerUpdate, TransactionPriority}; + +use crate::common::prelude::*; + +const MAX_QUEUED_TXS: usize = 20; + +#[tokio::test] +async fn send_too_many_txs() -> eyre::Result<()> { + setup_tracing(); + + let (db_url, _db_container) = setup_db().await?; + let anvil = AnvilBuilder::default().spawn().await?; + + let (_service, client) = + ServiceBuilder::default().build(&anvil, &db_url).await?; + + let CreateApiKeyResponse { api_key } = + client.create_relayer_api_key(DEFAULT_RELAYER_ID).await?; + + let CreateRelayerResponse { + relayer_id: secondary_relayer_id, + address: secondary_relayer_address, + } = client + .create_relayer(&CreateRelayerRequest { + name: "Secondary Relayer".to_string(), + chain_id: DEFAULT_ANVIL_CHAIN_ID, + }) + .await?; + + let CreateApiKeyResponse { + api_key: secondary_api_key, + } = client.create_relayer_api_key(&secondary_relayer_id).await?; + + // Set max queued txs + client + .update_relayer( + &secondary_relayer_id, + RelayerUpdate::default().with_max_queued_txs(MAX_QUEUED_TXS as u64), + ) + .await?; + + let provider = setup_provider(anvil.endpoint()).await?; + + // Send a transaction + let value: U256 = parse_units("0.01", "ether")?.into(); + + for _ in 0..=MAX_QUEUED_TXS { + client + .send_tx( + &secondary_api_key, + &SendTxRequest { + to: ARBITRARY_ADDRESS, + value, + data: None, + gas_limit: U256::from(21_000), + priority: TransactionPriority::Regular, + tx_id: None, + }, + ) + .await?; + } + + // Sending one more tx should fail + let result = client + .send_tx( + &secondary_api_key, + &SendTxRequest { + to: ARBITRARY_ADDRESS, + value, + data: None, + gas_limit: U256::from(21_000), + priority: TransactionPriority::Regular, + tx_id: None, + }, + ) + .await; + + assert!( + matches!( + result, + Err(ClientError::TxSitter(ApiError::TooManyTransactions { .. })) + ), + "Result {:?} should be too many transactions", + result + ); + + // Accumulate total value + gas budget + let send_value = value * (MAX_QUEUED_TXS + 1); + let total_required_value = send_value + parse_units("1", "ether")?; + + client + .send_tx( + &api_key, + &SendTxRequest { + to: secondary_relayer_address, + value: total_required_value, + data: None, + gas_limit: U256::from(21_000), + priority: TransactionPriority::Regular, + tx_id: None, + }, + ) + .await?; + + tracing::info!("Waiting for secondary relayer balance"); + await_balance(&provider, total_required_value, secondary_relayer_address) + .await?; + + tracing::info!("Waiting for queued up txs to be processed"); + await_balance(&provider, send_value, ARBITRARY_ADDRESS).await?; + + Ok(()) +} diff --git a/tests/send_tx.rs b/tests/send_tx.rs index a524a33..1a8fe62 100644 --- a/tests/send_tx.rs +++ b/tests/send_tx.rs @@ -1,91 +1,42 @@ -use std::time::Duration; - -use ethers::providers::Middleware; -use ethers::types::{Eip1559TransactionRequest, U256}; -use ethers::utils::parse_units; -use service::server::data::{ - CreateRelayerRequest, CreateRelayerResponse, SendTxRequest, SendTxResponse, -}; - mod common; -use crate::common::*; - -const ESCALATION_INTERVAL: Duration = Duration::from_secs(30); +use crate::common::prelude::*; #[tokio::test] async fn send_tx() -> eyre::Result<()> { setup_tracing(); let (db_url, _db_container) = setup_db().await?; - let double_anvil = setup_double_anvil().await?; + let anvil = AnvilBuilder::default().spawn().await?; - let service = - setup_service(&double_anvil.local_addr(), &db_url, ESCALATION_INTERVAL) - .await?; + let (_service, client) = + ServiceBuilder::default().build(&anvil, &db_url).await?; + let CreateApiKeyResponse { api_key } = + client.create_relayer_api_key(DEFAULT_RELAYER_ID).await?; - let addr = service.local_addr(); - - let response = reqwest::Client::new() - .post(&format!("http://{}/1/relayer/create", addr)) - .json(&CreateRelayerRequest { - name: "Test relayer".to_string(), - chain_id: DEFAULT_ANVIL_CHAIN_ID, - }) - .send() - .await?; + let provider = setup_provider(anvil.endpoint()).await?; - let response: CreateRelayerResponse = response.json().await?; - - // Fund the relayer - let middleware = setup_middleware( - format!("http://{}", double_anvil.local_addr()), - DEFAULT_ANVIL_PRIVATE_KEY, - ) - .await?; - - let amount: U256 = parse_units("100", "ether")?.into(); - - middleware - .send_transaction( - Eip1559TransactionRequest { - to: Some(response.address.into()), - value: Some(amount), + // Send a transaction + let value: U256 = parse_units("1", "ether")?.into(); + client + .send_tx( + &api_key, + &SendTxRequest { + to: ARBITRARY_ADDRESS, + value, + gas_limit: U256::from(21_000), ..Default::default() }, - None, ) - .await? - .await?; - - let provider = middleware.provider(); - - let current_balance = provider.get_balance(response.address, None).await?; - assert_eq!(current_balance, amount); - - // Send a transaction - let value: U256 = parse_units("1", "ether")?.into(); - let response = reqwest::Client::new() - .post(&format!("http://{}/1/tx/send", addr)) - .json(&SendTxRequest { - relayer_id: response.relayer_id, - to: ARBITRARY_ADDRESS, - value, - gas_limit: U256::from(21_000), - ..Default::default() - }) - .send() .await?; - let _response: SendTxResponse = response.json().await?; - for _ in 0..10 { let balance = provider.get_balance(ARBITRARY_ADDRESS, None).await?; if balance == value { return Ok(()); } else { - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(5)).await; } }