diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index 04773ad110c7..6406d2fa3db0 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -15,11 +15,8 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.21.4
- cache-dependency-path: |
- go.mod
- go.sum
-
+ go-version: 1.23.0
+ cache: false
- name: Run tests
run: go test -short ./...
env:
diff --git a/.golangci.yml b/.golangci.yml
index e355e6f9d12e..7575a3ac6902 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -24,11 +24,7 @@ linters:
- copyloopvar
- whitespace
- revive # only certain checks enabled
- - durationcheck
- - gocheckcompilerdirectives
- - reassign
- - mirror
- - tenv
+
### linters we tried and will not be using:
###
# - structcheck # lots of false positives
diff --git a/.travis.yml b/.travis.yml
index 2ba3af9419fd..31c944641f58 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,7 +25,7 @@ jobs:
before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled
script:
- - go run build/ci.go dockerx -platform "linux/amd64,linux/arm64,linux/riscv64" -upload ethereum/client-go
+ - go run build/ci.go dockerx -platform "linux/amd64,linux/arm64" -upload ethereum/client-go
# This builder does the Linux Azure uploads
- stage: build
diff --git a/Dockerfile b/Dockerfile
index 3484b079923a..2a83820428d6 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
-FROM --platform=${BUILDPLATFORM} golang:1.21-alpine as builder
+FROM golang:1.23-alpine AS builder
RUN apk add --no-cache gcc musl-dev linux-headers git
diff --git a/README.md b/README.md
index bc188375983d..28ad09f26d37 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
[](https://github.com/taikoxyz/taiko-geth/actions/workflows/ci.yml)
-The codebase is based on [go-ethereum v1.13.15](https://github.com/ethereum/go-ethereum/releases/tag/v1.13.15).
+The codebase is based on [go-ethereum v1.14.11](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.11).
## Tracing changes
@@ -116,7 +116,7 @@ useful on the testnet too.
Specifying the `--holesky` flag, however, will reconfigure your `geth` instance a bit:
- * Instead of connecting to the main Ethereum network, the client will connect to the Holesky
+ * Instead of connecting to the main Ethereum network, the client will connect to the Holesky
test network, which uses different P2P bootnodes, different network IDs and genesis
states.
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
diff --git a/SECURITY.md b/SECURITY.md
index 0b497b44aece..f4cbc7f8b7e8 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -30,68 +30,144 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
-mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY
-neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9
-L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi
-m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b
-fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd
-EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7
-M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv
-QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H
-h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
-2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ
-EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB
-tDRFdGhlcmV1bSBGb3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1
-bS5vcmc+iQJVBBMBCAA/AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W
-7ZaeR5sAhPPhf+iNMzT6X2oKBQJl2LD9BQkRdTklAAoJEOiNMzT6X2oKYYYQALkV
-wJjWYoVoMuw9D1ybQo4Sqyp6D/XYHXSpqZDO9RlADQisYBfuO7EW75evgZ+54Ajc
-8gZ2BUkFcSR9z2t0TEkUyjmPDZsaElTTP2Boa2GG5pyziEM6t1cMMY1sP1aotx9H
-DYwCeMmDv0wTMi6v0C6+/in2hBxbGALRbQKWKd/5ss4OEPe37hG9zAJcBYZg2tes
-O7ceg7LHZpNC1zvMUrBY6os74FJ437f8bankqvVE83/dvTcCDhMsei9LiWS2uo26
-qiyqeR9lZEj8W5F6UgkQH+UOhamJ9UB3N/h//ipKrwtiv0+jQm9oNG7aIAi3UJgD
-CvSod87H0l7/U8RWzyam/r8eh4KFM75hIVtqEy5jFV2z7x2SibXQi7WRfAysjFLp
-/li8ff6kLDR9IMATuMSF7Ol0O9JMRfSPjRZRtVOwYVIBla3BhfMrjvMMcZMAy/qS
-DWx2iFYDMGsswv7hp3lsFOaa1ju95ClZZk3q/z7u5gH7LFAxR0jPaW48ay3CFylW
-sDpQpO1DWb9uXBdhOU+MN18uSjqzocga3Wz2C8jhWRvxyFf3SNIybm3zk6W6IIoy
-6KmwSRZ30oxizy6zMYw1qJE89zjjumzlZAm0R/Q4Ui+WJhlSyrYbqzqdxYuLgdEL
-lgKfbv9/t8tNXGGSuCe5L7quOv9k7l2+QmLlg+SJtDlFdGhlcmV1bSBGb3VuZGF0
-aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0aGVyZXVtLm9yZz6JAlUEEwEI
-AD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbtlp5HmwCE8+F/6I0z
-NPpfagoFAmXYsP4FCRF1OSUACgkQ6I0zNPpfagoUGA/+LVzXUJrsfi8+ADMF1hru
-wFDcY1r+vM4Ovbk1NhCc/DnV5VG40j5FiQpE81BNiH59sYeZkQm9jFbwevK7Zpuq
-RZaG2WGiwU/11xrt5/Qjq7T+vEtd94546kFcBnP8uexZqP4dTi4LHa2on8aRbwzN
-7RjCpCQhy1TUuk47dyOR1y3ZHrpTwkHpuhwgffaWtxgSyCMYz7fsd5Ukh3eE+Ani
-90CIUieve2U3o+WPxBD9PRaIPg6LmBhfGxGvC/6tqY9W3Z9xEOVDxC4wdYppQzsg
-Pg7bNnVmlFWHsEk8FuMfY8nTqY3/ojhJxikWKz2V3Y2AbsLEXCvrEg6b4FvmsS97
-8ifEBbFXU8hvMSpMLtO7vLamWyOHq41IXWH6HLNLhDfDzTfpAJ8iYDKGj72YsMzF
-0fIjPa6mniMB2RmREAM0Jas3M/6DUw1EzwK1iQofIBoCRPIkR5mxmzjcRB6tVdQa
-on20/9YTKKBUQAdK0OWW8j1euuULDgNdkN2LBXdQLy/JcQiggU8kOCKL/Lmj5HWP
-FNT9rYfnjmCuux3UfJGfhPryujEA0CdIfq1Qf4ldOVzpWYjsMn+yQxAQTorAzF3z
-iYddP2cw/Nvookay8xywKJnDsaRaWqdQ8Ceox3qSB4LCjQRNR5c3HfvGm3EBdEyI
-zEEpjZ6GHa05DCajqKjtjlm5Ag0EWCXe2AEQAJuCrodM3mAQGLSWQP8xp8ieY2L7
-n1TmBEZiqTjpaV9GOEe51eMOmAPSWiUZviFiie2QxopGUKDZG+CO+Tdm97Q8paMr
-DuCvxgFr18wVjwGEBcjfY53Ij2sWHERkV9YB/ApWZPX0F14BBEW9x937zDx/VdVz
-7N11QswkUFOv7EoOUhFbBOR0s9B5ZuOjR4eX+Di24uIutPFVuePbpt/7b7UNsz/D
-lVq/M+uS+Ieq8p79A/+BrLhANWJa8WAtv3SJ18Ach2w+B+WnRUNLmtUcUvoPvetJ
-F0hGjcjxzyZig2NJHhcO6+A6QApb0tHem+i4UceOnoWvQZ6xFuttvYQbrqI+xH30
-xDsWogv1Uwc+baa1H5e9ucqQfatjISpoxtJ2Tb2MZqmQBaV7iwiFIqTvj0Di0aQe
-XTwpnY32joat9R6E/9XZ4ktqmHYOKgUvUfFGvKsrLzRBAoswlF6TGKCryCt5bzEH
-jO5/0yS6i75Ec2ajw95seMWy0uKCIPr/M/Z77i1SatPT8lMY5KGgYyXxG3RVHF08
-iYq6f7gs5dt87ECs5KRjqLfn6CyCSRLLWBMkTQFjdL1q5Pr5iuCVj9NY9D0gnFZU
-4qVP7dYinnAm7ZsEpDjbRUuoNjOShbK16X9szUAJS2KkyIhV5Sza4WJGOnMDVbLR
-Aco9N1K4aUk9Gt9xABEBAAGJAjwEGAEIACYCGwwWIQSulu2WnkebAITz4X/ojTM0
-+l9qCgUCZdiwoAUJEXU4yAAKCRDojTM0+l9qCj2PD/9pbIPRMZtvKIIE+OhOAl/s
-qfZJXByAM40ELpUhDHqwbOplIEyvXtWfQ5c+kWlG/LPJ2CgLkHyFQDn6tuat82rH
-/5VoZyxp16CBAwEgYdycOr9hMGSVKNIJDfV9Bu6VtZnn6fa/swBzGE7eVpXsIoNr
-jeqsogBtzLecG1oHMXRMq7oUqu9c6VNoCx2uxRUOeWW8YuP7h9j6mxIuKKbcpmQ5
-RSLNEhJZJsMMFLf8RAQPXmshG1ZixY2ZliNe/TTm6eEfFCw0KcQxoX9LmurLWE9w
-dIKgn1/nQ04GFnmtcq3hVxY/m9BvzY1jmZXNd4TdpfrPXhi0W/GDn53ERFPJmw5L
-F8ogxzD/ekxzyd9nCCgtzkamtBKDJk35x/MoVWMLjD5k6P+yW7YY4xMQliSJHKss
-leLnaPpgDBi4KPtLxPswgFqObcy4TNse07rFO4AyHf11FBwMTEfuODCOMnQTpi3z
-Zx6KxvS3BEY36abjvwrqsmt8dJ/+/QXT0e82fo2kJ65sXIszez3e0VUZ8KrMp+wd
-X0GWYWAfqXws6HrQFYfIpEE0Vz9gXDxEOTFZ2FoVIvIHyRfyDrAIz3wZLmnLGk1h
-l3CDjHF0Wigv0CacIQ1V1aYp3NhIVwAvShQ+qS5nFgik6UZnjjWibobOm3yQDzll
-6F7hEeTW+gnXEI2gPjfb5w==
-=b5eA
+mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
+82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
+qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
+lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
+48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
+PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
+yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
+nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
+2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
+8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
+b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
+FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
+mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
+rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
+Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
+Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
+CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
+09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
+QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
+Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
+XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
+AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
+D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
+s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
+ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
+kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
+dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
+O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
+ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
+fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
+T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
+V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
+CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
+Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
+5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
+7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
+otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
+DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
+aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
+A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
+/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
+rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
+BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
+lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
+GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
+GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
+6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
+qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
+zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
+nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
+5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
+WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
+8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
+N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
+c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
+wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
+D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
+J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
+Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
+NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
+QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
+Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
+uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
+TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
+Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
+AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
+diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
+sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
+E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
+B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
+C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
+BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
+TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
+SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
+CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
+HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
+AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
+0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
+VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
+wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
+OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
+WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
+EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
+jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
+Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
+MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
+AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
+ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
+FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
+/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
+Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
+DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
+1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
+9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
++2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
+qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
+OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
+BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
+YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
+9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
+26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
+D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
+FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
+rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
+bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
+gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
+bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
+CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
+3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
+KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
+ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
+JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
+sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
+BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
+Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
+/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
+M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
+BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
+1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
+6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
+LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
+yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
+cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
+5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
+GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
+rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
+XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
+QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
+mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
+8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
+I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
+yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
+ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
+i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
++m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
+402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
+epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
+PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
+=arte
-----END PGP PUBLIC KEY BLOCK-----
```
diff --git a/accounts/usbwallet/trezor/messages-common.pb.go b/accounts/usbwallet/trezor/messages-common.pb.go
index 73800802bb30..eab6a66217fa 100644
--- a/accounts/usbwallet/trezor/messages-common.pb.go
+++ b/accounts/usbwallet/trezor/messages-common.pb.go
@@ -4,8 +4,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
-// protoc v5.27.1
+// protoc-gen-go v1.26.0
+// protoc v5.28.0
// source: messages-common.proto
package trezor
@@ -1010,7 +1010,7 @@ func file_messages_common_proto_rawDescGZIP() []byte {
var file_messages_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_messages_common_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
-var file_messages_common_proto_goTypes = []any{
+var file_messages_common_proto_goTypes = []interface{}{
(Failure_FailureType)(0), // 0: hw.trezor.messages.common.Failure.FailureType
(ButtonRequest_ButtonRequestType)(0), // 1: hw.trezor.messages.common.ButtonRequest.ButtonRequestType
(PinMatrixRequest_PinMatrixRequestType)(0), // 2: hw.trezor.messages.common.PinMatrixRequest.PinMatrixRequestType
@@ -1043,7 +1043,7 @@ func file_messages_common_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_messages_common_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Success); i {
case 0:
return &v.state
@@ -1055,7 +1055,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Failure); i {
case 0:
return &v.state
@@ -1067,7 +1067,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ButtonRequest); i {
case 0:
return &v.state
@@ -1079,7 +1079,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ButtonAck); i {
case 0:
return &v.state
@@ -1091,7 +1091,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PinMatrixRequest); i {
case 0:
return &v.state
@@ -1103,7 +1103,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PinMatrixAck); i {
case 0:
return &v.state
@@ -1115,7 +1115,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PassphraseRequest); i {
case 0:
return &v.state
@@ -1127,7 +1127,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PassphraseAck); i {
case 0:
return &v.state
@@ -1139,7 +1139,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PassphraseStateRequest); i {
case 0:
return &v.state
@@ -1151,7 +1151,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[9].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PassphraseStateAck); i {
case 0:
return &v.state
@@ -1163,7 +1163,7 @@ func file_messages_common_proto_init() {
return nil
}
}
- file_messages_common_proto_msgTypes[10].Exporter = func(v any, i int) any {
+ file_messages_common_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*HDNodeType); i {
case 0:
return &v.state
diff --git a/accounts/usbwallet/trezor/messages-ethereum.pb.go b/accounts/usbwallet/trezor/messages-ethereum.pb.go
index a92123efcdda..4c257655ca23 100644
--- a/accounts/usbwallet/trezor/messages-ethereum.pb.go
+++ b/accounts/usbwallet/trezor/messages-ethereum.pb.go
@@ -4,8 +4,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
-// protoc v5.27.1
+// protoc-gen-go v1.26.0
+// protoc v5.28.0
// source: messages-ethereum.proto
package trezor
@@ -831,7 +831,7 @@ func file_messages_ethereum_proto_rawDescGZIP() []byte {
}
var file_messages_ethereum_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
-var file_messages_ethereum_proto_goTypes = []any{
+var file_messages_ethereum_proto_goTypes = []interface{}{
(*EthereumGetPublicKey)(nil), // 0: hw.trezor.messages.ethereum.EthereumGetPublicKey
(*EthereumPublicKey)(nil), // 1: hw.trezor.messages.ethereum.EthereumPublicKey
(*EthereumGetAddress)(nil), // 2: hw.trezor.messages.ethereum.EthereumGetAddress
@@ -860,7 +860,7 @@ func file_messages_ethereum_proto_init() {
}
file_messages_common_proto_init()
if !protoimpl.UnsafeEnabled {
- file_messages_ethereum_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumGetPublicKey); i {
case 0:
return &v.state
@@ -872,7 +872,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumPublicKey); i {
case 0:
return &v.state
@@ -884,7 +884,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumGetAddress); i {
case 0:
return &v.state
@@ -896,7 +896,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumAddress); i {
case 0:
return &v.state
@@ -908,7 +908,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumSignTx); i {
case 0:
return &v.state
@@ -920,7 +920,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumTxRequest); i {
case 0:
return &v.state
@@ -932,7 +932,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumTxAck); i {
case 0:
return &v.state
@@ -944,7 +944,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumSignMessage); i {
case 0:
return &v.state
@@ -956,7 +956,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumMessageSignature); i {
case 0:
return &v.state
@@ -968,7 +968,7 @@ func file_messages_ethereum_proto_init() {
return nil
}
}
- file_messages_ethereum_proto_msgTypes[9].Exporter = func(v any, i int) any {
+ file_messages_ethereum_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EthereumVerifyMessage); i {
case 0:
return &v.state
diff --git a/accounts/usbwallet/trezor/messages-management.pb.go b/accounts/usbwallet/trezor/messages-management.pb.go
index 983e2d281df3..87bd68403987 100644
--- a/accounts/usbwallet/trezor/messages-management.pb.go
+++ b/accounts/usbwallet/trezor/messages-management.pb.go
@@ -4,8 +4,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
-// protoc v5.27.1
+// protoc-gen-go v1.26.0
+// protoc v5.28.0
// source: messages-management.proto
package trezor
@@ -1955,7 +1955,7 @@ func file_messages_management_proto_rawDescGZIP() []byte {
var file_messages_management_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_messages_management_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
-var file_messages_management_proto_goTypes = []any{
+var file_messages_management_proto_goTypes = []interface{}{
(ApplySettings_PassphraseSourceType)(0), // 0: hw.trezor.messages.management.ApplySettings.PassphraseSourceType
(RecoveryDevice_RecoveryDeviceType)(0), // 1: hw.trezor.messages.management.RecoveryDevice.RecoveryDeviceType
(WordRequest_WordRequestType)(0), // 2: hw.trezor.messages.management.WordRequest.WordRequestType
@@ -2001,7 +2001,7 @@ func file_messages_management_proto_init() {
}
file_messages_common_proto_init()
if !protoimpl.UnsafeEnabled {
- file_messages_management_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Initialize); i {
case 0:
return &v.state
@@ -2013,7 +2013,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetFeatures); i {
case 0:
return &v.state
@@ -2025,7 +2025,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Features); i {
case 0:
return &v.state
@@ -2037,7 +2037,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClearSession); i {
case 0:
return &v.state
@@ -2049,7 +2049,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ApplySettings); i {
case 0:
return &v.state
@@ -2061,7 +2061,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ApplyFlags); i {
case 0:
return &v.state
@@ -2073,7 +2073,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChangePin); i {
case 0:
return &v.state
@@ -2085,7 +2085,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Ping); i {
case 0:
return &v.state
@@ -2097,7 +2097,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Cancel); i {
case 0:
return &v.state
@@ -2109,7 +2109,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[9].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetEntropy); i {
case 0:
return &v.state
@@ -2121,7 +2121,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[10].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Entropy); i {
case 0:
return &v.state
@@ -2133,7 +2133,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[11].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WipeDevice); i {
case 0:
return &v.state
@@ -2145,7 +2145,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[12].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LoadDevice); i {
case 0:
return &v.state
@@ -2157,7 +2157,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[13].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResetDevice); i {
case 0:
return &v.state
@@ -2169,7 +2169,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[14].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BackupDevice); i {
case 0:
return &v.state
@@ -2181,7 +2181,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[15].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EntropyRequest); i {
case 0:
return &v.state
@@ -2193,7 +2193,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[16].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EntropyAck); i {
case 0:
return &v.state
@@ -2205,7 +2205,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[17].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RecoveryDevice); i {
case 0:
return &v.state
@@ -2217,7 +2217,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[18].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WordRequest); i {
case 0:
return &v.state
@@ -2229,7 +2229,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[19].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WordAck); i {
case 0:
return &v.state
@@ -2241,7 +2241,7 @@ func file_messages_management_proto_init() {
return nil
}
}
- file_messages_management_proto_msgTypes[20].Exporter = func(v any, i int) any {
+ file_messages_management_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetU2FCounter); i {
case 0:
return &v.state
diff --git a/accounts/usbwallet/trezor/messages.pb.go b/accounts/usbwallet/trezor/messages.pb.go
index 4518db679e93..d8d298650d96 100644
--- a/accounts/usbwallet/trezor/messages.pb.go
+++ b/accounts/usbwallet/trezor/messages.pb.go
@@ -4,8 +4,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
-// protoc v5.27.1
+// protoc-gen-go v1.26.0
+// protoc v5.28.0
// source: messages.proto
package trezor
@@ -1320,7 +1320,7 @@ func file_messages_proto_rawDescGZIP() []byte {
}
var file_messages_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_messages_proto_goTypes = []any{
+var file_messages_proto_goTypes = []interface{}{
(MessageType)(0), // 0: hw.trezor.messages.MessageType
(*descriptorpb.EnumValueOptions)(nil), // 1: google.protobuf.EnumValueOptions
}
diff --git a/appveyor.yml b/appveyor.yml
index 1543211edc8c..92369537cd0e 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -24,9 +24,7 @@ for:
- image: Ubuntu
build_script:
- go run build/ci.go lint
- - go run build/ci.go check_tidy
- - go run build/ci.go check_generate
- - go run build/ci.go check_baddeps
+ - go run build/ci.go generate -verify
- go run build/ci.go install -dlgo
test_script:
- go run build/ci.go test -dlgo -short
diff --git a/beacon/blsync/client.go b/beacon/blsync/client.go
index 3c93754d3df2..39a1c6ea76c1 100644
--- a/beacon/blsync/client.go
+++ b/beacon/blsync/client.go
@@ -17,22 +17,25 @@
package blsync
import (
+ "strings"
+
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/api"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
- "github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/urfave/cli/v2"
)
type Client struct {
urls []string
customHeader map[string]string
- config *params.ClientConfig
+ chainConfig *lightClientConfig
scheduler *request.Scheduler
blockSync *beaconBlockSync
engineRPC *rpc.Client
@@ -41,18 +44,34 @@ type Client struct {
engineClient *engineClient
}
-func NewClient(config params.ClientConfig) *Client {
+func NewClient(ctx *cli.Context) *Client {
+ if !ctx.IsSet(utils.BeaconApiFlag.Name) {
+ utils.Fatalf("Beacon node light client API URL not specified")
+ }
+ var (
+ chainConfig = makeChainConfig(ctx)
+ customHeader = make(map[string]string)
+ )
+ for _, s := range ctx.StringSlice(utils.BeaconApiHeaderFlag.Name) {
+ kv := strings.Split(s, ":")
+ if len(kv) != 2 {
+ utils.Fatalf("Invalid custom API header entry: %s", s)
+ }
+ customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
+ }
+
// create data structures
var (
db = memorydb.New()
- committeeChain = light.NewCommitteeChain(db, &config.ChainConfig, config.Threshold, !config.NoFilter)
- headTracker = light.NewHeadTracker(committeeChain, config.Threshold)
+ threshold = ctx.Int(utils.BeaconThresholdFlag.Name)
+ committeeChain = light.NewCommitteeChain(db, chainConfig.ChainConfig, threshold, !ctx.Bool(utils.BeaconNoFilterFlag.Name))
+ headTracker = light.NewHeadTracker(committeeChain, threshold)
)
headSync := sync.NewHeadSync(headTracker, committeeChain)
// set up scheduler and sync modules
scheduler := request.NewScheduler()
- checkpointInit := sync.NewCheckpointInit(committeeChain, config.Checkpoint)
+ checkpointInit := sync.NewCheckpointInit(committeeChain, chainConfig.Checkpoint)
forwardSync := sync.NewForwardUpdateSync(committeeChain)
beaconBlockSync := newBeaconBlockSync(headTracker)
scheduler.RegisterTarget(headTracker)
@@ -64,9 +83,9 @@ func NewClient(config params.ClientConfig) *Client {
return &Client{
scheduler: scheduler,
- urls: config.Apis,
- customHeader: config.CustomHeader,
- config: &config,
+ urls: ctx.StringSlice(utils.BeaconApiFlag.Name),
+ customHeader: customHeader,
+ chainConfig: &chainConfig,
blockSync: beaconBlockSync,
}
}
@@ -78,7 +97,7 @@ func (c *Client) SetEngineRPC(engine *rpc.Client) {
func (c *Client) Start() error {
headCh := make(chan types.ChainHeadEvent, 16)
c.chainHeadSub = c.blockSync.SubscribeChainHead(headCh)
- c.engineClient = startEngineClient(c.config, c.engineRPC, headCh)
+ c.engineClient = startEngineClient(c.chainConfig, c.engineRPC, headCh)
c.scheduler.Start()
for _, url := range c.urls {
diff --git a/beacon/blsync/config.go b/beacon/blsync/config.go
new file mode 100644
index 000000000000..efc44b47d1a9
--- /dev/null
+++ b/beacon/blsync/config.go
@@ -0,0 +1,114 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blsync
+
+import (
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/urfave/cli/v2"
+)
+
+// lightClientConfig contains beacon light client configuration
+type lightClientConfig struct {
+ *types.ChainConfig
+ Checkpoint common.Hash
+}
+
+var (
+ MainnetConfig = lightClientConfig{
+ ChainConfig: (&types.ChainConfig{
+ GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
+ GenesisTime: 1606824023,
+ }).
+ AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
+ AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
+ AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
+ AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
+ AddFork("DENEB", 269568, []byte{4, 0, 0, 0}),
+ Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"),
+ }
+
+ SepoliaConfig = lightClientConfig{
+ ChainConfig: (&types.ChainConfig{
+ GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
+ GenesisTime: 1655733600,
+ }).
+ AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
+ AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
+ AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
+ AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
+ AddFork("DENEB", 132608, []byte{144, 0, 0, 115}),
+ Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"),
+ }
+)
+
+func makeChainConfig(ctx *cli.Context) lightClientConfig {
+ var config lightClientConfig
+ customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name)
+ utils.CheckExclusive(ctx, utils.MainnetFlag, utils.SepoliaFlag, utils.BeaconConfigFlag)
+ switch {
+ case ctx.Bool(utils.MainnetFlag.Name):
+ config = MainnetConfig
+ case ctx.Bool(utils.SepoliaFlag.Name):
+ config = SepoliaConfig
+ default:
+ if !customConfig {
+ config = MainnetConfig
+ }
+ }
+ // Genesis root and time should always be specified together with custom chain config
+ if customConfig {
+ if !ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
+ utils.Fatalf("Custom beacon chain config is specified but genesis root is missing")
+ }
+ if !ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
+ utils.Fatalf("Custom beacon chain config is specified but genesis time is missing")
+ }
+ if !ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
+ utils.Fatalf("Custom beacon chain config is specified but checkpoint is missing")
+ }
+ config.ChainConfig = &types.ChainConfig{
+ GenesisTime: ctx.Uint64(utils.BeaconGenesisTimeFlag.Name),
+ }
+ if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
+ copy(config.GenesisValidatorsRoot[:len(c)], c)
+ } else {
+ utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err)
+ }
+ if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil {
+ utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err)
+ }
+ } else {
+ if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
+ utils.Fatalf("Genesis root is specified but custom beacon chain config is missing")
+ }
+ if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
+ utils.Fatalf("Genesis time is specified but custom beacon chain config is missing")
+ }
+ }
+ // Checkpoint is required with custom chain config and is optional with pre-defined config
+ if ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
+ if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
+ copy(config.Checkpoint[:len(c)], c)
+ } else {
+ utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err)
+ }
+ }
+ return config
+}
diff --git a/beacon/blsync/engineclient.go b/beacon/blsync/engineclient.go
index c6569fdbde72..97ef6f5cb88e 100644
--- a/beacon/blsync/engineclient.go
+++ b/beacon/blsync/engineclient.go
@@ -23,7 +23,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/beacon/engine"
- "github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
ctypes "github.com/ethereum/go-ethereum/core/types"
@@ -32,14 +31,14 @@ import (
)
type engineClient struct {
- config *params.ClientConfig
+ config *lightClientConfig
rpc *rpc.Client
rootCtx context.Context
cancelRoot context.CancelFunc
wg sync.WaitGroup
}
-func startEngineClient(config *params.ClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
+func startEngineClient(config *lightClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
ctx, cancel := context.WithCancel(context.Background())
ec := &engineClient{
config: config,
@@ -93,7 +92,7 @@ func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
}
func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) (string, error) {
- execData := engine.BlockToExecutableData(event.Block, nil, nil, nil).ExecutionPayload
+ execData := engine.BlockToExecutableData(event.Block, nil, nil).ExecutionPayload
var (
method string
diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go
index b3ea4ad2a70d..632f9bd839f1 100644
--- a/beacon/engine/gen_ed.go
+++ b/beacon/engine/gen_ed.go
@@ -17,26 +17,28 @@ var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableData struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
- StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
- LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
- Random common.Hash `json:"prevRandao" gencodec:"required"`
- Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
- GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
- BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
- BlockHash common.Hash `json:"blockHash" gencodec:"required"`
- Transactions []hexutil.Bytes `json:"transactions"`
- Withdrawals []*types.Withdrawal `json:"withdrawals"`
- BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
- ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
- TxHash common.Hash `json:"txHash"`
- WithdrawalsHash common.Hash `json:"withdrawalsHash"`
- TaikoBlock bool
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
+ StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
+ ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
+ LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
+ Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
+ GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
+ BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
+ BlockHash common.Hash `json:"blockHash" gencodec:"required"`
+ Transactions []hexutil.Bytes `json:"transactions"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
+ Deposits types.Deposits `json:"depositRequests"`
+ ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
+ TxHash common.Hash `json:"txHash"`
+ WithdrawalsHash common.Hash `json:"withdrawalsHash"`
+ TaikoBlock bool
}
var enc ExecutableData
enc.ParentHash = e.ParentHash
@@ -61,6 +63,8 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
+ enc.Deposits = e.Deposits
+ enc.ExecutionWitness = e.ExecutionWitness
enc.TxHash = e.TxHash
enc.WithdrawalsHash = e.WithdrawalsHash
enc.TaikoBlock = e.TaikoBlock
@@ -70,26 +74,28 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableData struct {
- ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
- FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
- StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
- LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
- Random *common.Hash `json:"prevRandao" gencodec:"required"`
- Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
- GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
- BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
- BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
- Transactions []hexutil.Bytes `json:"transactions"`
- Withdrawals []*types.Withdrawal `json:"withdrawals"`
- BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
- ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
- TxHash *common.Hash `json:"txHash"`
- WithdrawalsHash *common.Hash `json:"withdrawalsHash"`
- TaikoBlock *bool
+ ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
+ FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
+ StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
+ ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
+ LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
+ Random *common.Hash `json:"prevRandao" gencodec:"required"`
+ Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
+ GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
+ BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
+ BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
+ Transactions []hexutil.Bytes `json:"transactions"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
+ Deposits *types.Deposits `json:"depositRequests"`
+ ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
+ TxHash *common.Hash `json:"txHash"`
+ WithdrawalsHash *common.Hash `json:"withdrawalsHash"`
+ TaikoBlock *bool
}
var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil {
@@ -162,6 +168,12 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
+ if dec.Deposits != nil {
+ e.Deposits = *dec.Deposits
+ }
+ if dec.ExecutionWitness != nil {
+ e.ExecutionWitness = dec.ExecutionWitness
+ }
if dec.TxHash != nil {
e.TxHash = *dec.TxHash
}
diff --git a/beacon/engine/gen_epe.go b/beacon/engine/gen_epe.go
index deada06166c5..039884e842fd 100644
--- a/beacon/engine/gen_epe.go
+++ b/beacon/engine/gen_epe.go
@@ -20,7 +20,7 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness,omitempty"`
+ Witness *hexutil.Bytes `json:"witness"`
}
var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload
@@ -45,7 +45,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override *bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness,omitempty"`
+ Witness *hexutil.Bytes `json:"witness"`
}
var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil {
diff --git a/beacon/engine/types.go b/beacon/engine/types.go
index 3d4b9611f800..403466f2cd0a 100644
--- a/beacon/engine/types.go
+++ b/beacon/engine/types.go
@@ -86,23 +86,25 @@ type blockMetadataMarshaling struct {
// ExecutableData is the data necessary to execute an EL payload.
type ExecutableData struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
- StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
- ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
- LogsBloom []byte `json:"logsBloom" gencodec:"required"`
- Random common.Hash `json:"prevRandao" gencodec:"required"`
- Number uint64 `json:"blockNumber" gencodec:"required"`
- GasLimit uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed uint64 `json:"gasUsed" gencodec:"required"`
- Timestamp uint64 `json:"timestamp" gencodec:"required"`
- ExtraData []byte `json:"extraData" gencodec:"required"`
- BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
- BlockHash common.Hash `json:"blockHash" gencodec:"required"`
- Transactions [][]byte `json:"transactions"`
- Withdrawals []*types.Withdrawal `json:"withdrawals"`
- BlobGasUsed *uint64 `json:"blobGasUsed"`
- ExcessBlobGas *uint64 `json:"excessBlobGas"`
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
+ StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
+ ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
+ LogsBloom []byte `json:"logsBloom" gencodec:"required"`
+ Random common.Hash `json:"prevRandao" gencodec:"required"`
+ Number uint64 `json:"blockNumber" gencodec:"required"`
+ GasLimit uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed uint64 `json:"gasUsed" gencodec:"required"`
+ Timestamp uint64 `json:"timestamp" gencodec:"required"`
+ ExtraData []byte `json:"extraData" gencodec:"required"`
+ BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
+ BlockHash common.Hash `json:"blockHash" gencodec:"required"`
+ Transactions [][]byte `json:"transactions"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ BlobGasUsed *uint64 `json:"blobGasUsed"`
+ ExcessBlobGas *uint64 `json:"excessBlobGas"`
+ Deposits types.Deposits `json:"depositRequests"`
+ ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
TxHash common.Hash `json:"txHash"` // CHANGE(taiko): allow passing txHash directly instead of transactions list
WithdrawalsHash common.Hash `json:"withdrawalsHash"` // CHANGE(taiko): allow passing WithdrawalsHash directly instead of withdrawals
@@ -139,7 +141,7 @@ type ExecutionPayloadEnvelope struct {
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests [][]byte `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness,omitempty"`
+ Witness *hexutil.Bytes `json:"witness"`
}
type BlobsBundleV1 struct {
@@ -243,8 +245,8 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// and that the blockhash of the constructed block matches the parameters. Nil
// Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in data.
-func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
- block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot, requests)
+func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
+ block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot)
if err != nil {
return nil, err
}
@@ -257,7 +259,7 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
// ExecutableDataToBlockNoHash is analogous to ExecutableDataToBlock, but is used
// for stateless execution, so it skips checking if the executable data hashes to
// the requested hash (stateless has to *compute* the root hash, it's not given).
-func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
+func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
txs, err := decodeTransactions(data.Transactions)
if err != nil {
return nil, err
@@ -292,21 +294,19 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
h := types.DeriveSha(types.Withdrawals(data.Withdrawals), trie.NewStackTrie(nil))
withdrawalsRoot = &h
}
-
- var requestsHash *common.Hash
- if requests != nil {
- // Put back request type byte.
- typedRequests := make([][]byte, len(requests))
- for i, reqdata := range requests {
- typedReqdata := make([]byte, len(reqdata)+1)
- typedReqdata[0] = byte(i)
- copy(typedReqdata[1:], reqdata)
- typedRequests[i] = typedReqdata
+ // Compute requestsHash if any requests are non-nil.
+ var (
+ requestsHash *common.Hash
+ requests types.Requests
+ )
+ if data.Deposits != nil {
+ requests = make(types.Requests, 0)
+ for _, d := range data.Deposits {
+ requests = append(requests, types.NewRequest(d))
}
- h := types.CalcRequestsHash(typedRequests)
+ h := types.DeriveSha(requests, trie.NewStackTrie(nil))
requestsHash = &h
}
-
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
@@ -330,7 +330,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
RequestsHash: requestsHash,
}
return types.NewBlockWithHeader(header).
- WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
+ WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals, Requests: requests}).
WithWitness(data.ExecutionWitness),
nil
}
@@ -372,22 +372,22 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
}
}
+ setRequests(block.Requests(), data)
+ return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
+}
- // Remove type byte in requests.
- var plainRequests [][]byte
+// setRequests differentiates the different request types and
+// assigns them to the associated fields in ExecutableData.
+func setRequests(requests types.Requests, data *ExecutableData) {
if requests != nil {
- plainRequests = make([][]byte, len(requests))
- for i, reqdata := range requests {
- plainRequests[i] = reqdata[1:]
- }
+ // If requests is non-nil, it means deposits are available in block and we
+ // should return an empty slice instead of nil if there are no deposits.
+ data.Deposits = make(types.Deposits, 0)
}
-
- return &ExecutionPayloadEnvelope{
- ExecutionPayload: data,
- BlockValue: fees,
- BlobsBundle: &bundle,
- Requests: plainRequests,
- Override: false,
+ for _, r := range requests {
+ if d, ok := r.Inner().(*types.Deposit); ok {
+ data.Deposits = append(data.Deposits, d)
+ }
}
}
@@ -395,6 +395,7 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
type ExecutionPayloadBody struct {
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ Deposits types.Deposits `json:"depositRequests"`
}
// Client identifiers to support ClientVersionV1.
diff --git a/beacon/light/committee_chain.go b/beacon/light/committee_chain.go
index 4fa87785c08a..197f8fbc2341 100644
--- a/beacon/light/committee_chain.go
+++ b/beacon/light/committee_chain.go
@@ -87,7 +87,7 @@ func NewCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signe
}
// NewTestCommitteeChain creates a new CommitteeChain for testing.
-func NewTestCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
+func NewTestCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
return newCommitteeChain(db, config, signerThreshold, enforceTime, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) })
}
diff --git a/beacon/light/head_tracker.go b/beacon/light/head_tracker.go
index 010e548ddbd9..7ef93feccedf 100644
--- a/beacon/light/head_tracker.go
+++ b/beacon/light/head_tracker.go
@@ -69,13 +69,12 @@ func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
// slot or same slot and more signers) then ValidatedOptimistic is updated.
// The boolean return flag signals if ValidatedOptimistic has been changed.
func (h *HeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, error) {
- if err := update.Validate(); err != nil {
- return false, err
- }
-
h.lock.Lock()
defer h.lock.Unlock()
+ if err := update.Validate(); err != nil {
+ return false, err
+ }
replace, err := h.validate(update.SignedHeader(), h.optimisticUpdate.SignedHeader())
if replace {
h.optimisticUpdate, h.hasOptimisticUpdate = update, true
@@ -89,13 +88,12 @@ func (h *HeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, e
// slot or same slot and more signers) then ValidatedFinality is updated.
// The boolean return flag signals if ValidatedFinality has been changed.
func (h *HeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) {
- if err := update.Validate(); err != nil {
- return false, err
- }
-
h.lock.Lock()
defer h.lock.Unlock()
+ if err := update.Validate(); err != nil {
+ return false, err
+ }
replace, err := h.validate(update.SignedHeader(), h.finalityUpdate.SignedHeader())
if replace {
h.finalityUpdate, h.hasFinalityUpdate = update, true
diff --git a/beacon/params/config.go b/beacon/params/config.go
index be2a40f1718c..6b90788d5efd 100644
--- a/beacon/params/config.go
+++ b/beacon/params/config.go
@@ -39,13 +39,81 @@ const syncCommitteeDomain = 7
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"}
-// ClientConfig contains beacon light client configuration.
-type ClientConfig struct {
- ChainConfig
- Apis []string
- CustomHeader map[string]string
- Threshold int
- NoFilter bool
+// Fork describes a single beacon chain fork and also stores the calculated
+// signature domain used after this fork.
+type Fork struct {
+ // Name of the fork in the chain config (config.yaml) file
+ Name string
+
+ // Epoch when given fork version is activated
+ Epoch uint64
+
+ // Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
+ Version []byte
+
+ // index in list of known forks or MaxInt if unknown
+ knownIndex int
+
+ // calculated by computeDomain, based on fork version and genesis validators root
+ domain merkle.Value
+}
+
+// computeDomain returns the signature domain based on the given fork version
+// and genesis validator set root.
+func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
+ var (
+ hasher = sha256.New()
+ forkVersion32 merkle.Value
+ forkDataRoot merkle.Value
+ )
+ copy(forkVersion32[:], f.Version)
+ hasher.Write(forkVersion32[:])
+ hasher.Write(genesisValidatorsRoot[:])
+ hasher.Sum(forkDataRoot[:0])
+
+ f.domain[0] = syncCommitteeDomain
+ copy(f.domain[4:], forkDataRoot[:28])
+}
+
+// Forks is the list of all beacon chain forks in the chain configuration.
+type Forks []*Fork
+
+// domain returns the signature domain for the given epoch (assumes that domains
+// have already been calculated).
+func (f Forks) domain(epoch uint64) (merkle.Value, error) {
+ for i := len(f) - 1; i >= 0; i-- {
+ if epoch >= f[i].Epoch {
+ return f[i].domain, nil
+ }
+ }
+ return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
+}
+
+// SigningRoot calculates the signing root of the given header.
+func (f Forks) SigningRoot(header Header) (common.Hash, error) {
+ domain, err := f.domain(header.Epoch())
+ if err != nil {
+ return common.Hash{}, err
+ }
+ var (
+ signingRoot common.Hash
+ headerHash = header.Hash()
+ hasher = sha256.New()
+ )
+ hasher.Write(headerHash[:])
+ hasher.Write(domain[:])
+ hasher.Sum(signingRoot[:0])
+
+ return signingRoot, nil
+}
+
+func (f Forks) Len() int { return len(f) }
+func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f Forks) Less(i, j int) bool {
+ if f[i].Epoch != f[j].Epoch {
+ return f[i].Epoch < f[j].Epoch
+ }
+ return f[i].knownIndex < f[j].knownIndex
}
// ChainConfig contains the beacon chain configuration.
@@ -66,6 +134,16 @@ func (c *ChainConfig) ForkAtEpoch(epoch uint64) Fork {
return Fork{}
}
+// ForkAtEpoch returns the latest active fork at the given epoch.
+func (c *ChainConfig) ForkAtEpoch(epoch uint64) Fork {
+ for i := len(c.Forks) - 1; i >= 0; i-- {
+ if c.Forks[i].Epoch <= epoch {
+ return *c.Forks[i]
+ }
+ }
+ return Fork{}
+}
+
// AddFork adds a new item to the list of forks.
func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainConfig {
knownIndex := slices.Index(knownForks, name)
diff --git a/build/checksums.txt b/build/checksums.txt
index b83521585071..06de819c70b5 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -5,88 +5,88 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
-# version:golang 1.23.3
+# version:golang 1.23.1
# https://go.dev/dl/
-8d6a77332487557c6afa2421131b50f83db4ae3c579c3bc72e670ee1f6968599 go1.23.3.src.tar.gz
-bdbf2a243ed4a121c9988684e5b15989cb244c1ff9e41ca823d0187b5c859114 go1.23.3.aix-ppc64.tar.gz
-b79c77bbdf61e6e486aa6bea9286f3f7969c28e2ff7686ce10c334f746bfb724 go1.23.3.darwin-amd64.pkg
-c7e024d5c0bc81845070f23598caf02f05b8ae88fd4ad2cd3e236ddbea833ad2 go1.23.3.darwin-amd64.tar.gz
-3e764df0db8f3c7470b9ff641954a380510a4822613c06bd5a195fd083f4731d go1.23.3.darwin-arm64.pkg
-31e119fe9bde6e105407a32558d5b5fa6ca11e2bd17f8b7b2f8a06aba16a0632 go1.23.3.darwin-arm64.tar.gz
-3872c9a98331050a242afe63fa6abc8fc313aca83dcaefda318e903309ac0c8d go1.23.3.dragonfly-amd64.tar.gz
-69479fa016ec5b4605885643ce0c2dd5c583e02353978feb6de38c961863b9cc go1.23.3.freebsd-386.tar.gz
-bf1de22a900646ef4f79480ed88337856d47089cc610f87e6fef46f6b8db0e1f go1.23.3.freebsd-amd64.tar.gz
-e461f866479bc36bdd4cfec32bfecb1bb243152268a1b3223de109410dec3407 go1.23.3.freebsd-arm.tar.gz
-24154b4018a45540aefeb6b5b9ffdcc8d9a8cdb78cd7fec262787b89fed19997 go1.23.3.freebsd-arm64.tar.gz
-218f3f1532e61dd65c330c2a5fc85bec18cc3690489763e62ffa9bb9fc85a68e go1.23.3.freebsd-riscv64.tar.gz
-24e3f34858b8687c31f5e5ab9e46d27fb613b0d50a94261c500cebb2d79c0672 go1.23.3.illumos-amd64.tar.gz
-3d7b00191a43c50d28e0903a0c576104bc7e171a8670de419d41111c08dfa299 go1.23.3.linux-386.tar.gz
-a0afb9744c00648bafb1b90b4aba5bdb86f424f02f9275399ce0c20b93a2c3a8 go1.23.3.linux-amd64.tar.gz
-1f7cbd7f668ea32a107ecd41b6488aaee1f5d77a66efd885b175494439d4e1ce go1.23.3.linux-arm64.tar.gz
-5f0332754beffc65af65a7b2da76e9dd997567d0d81b6f4f71d3588dc7b4cb00 go1.23.3.linux-armv6l.tar.gz
-1d0161a8946c7d99f717bad23631738408511f9f87e78d852224a023d8882ad8 go1.23.3.linux-loong64.tar.gz
-e924a7c9027f521f8a3563541ed0f89a4db3ef005b6b71263415b38e0b46e63a go1.23.3.linux-mips.tar.gz
-4cdf8c38165627f032c2b17cdd95e4aafff40d75fc873824d4c94914284098ca go1.23.3.linux-mips64.tar.gz
-5e49347e7325d2e268fb14040529b704e66eed77154cc73a919e9167d8527a2f go1.23.3.linux-mips64le.tar.gz
-142eabc17cee99403e895383ed7a6b7b40e740e8c2f73b79352bb9d1242fbd98 go1.23.3.linux-mipsle.tar.gz
-96ad61ba6b6cc0f5adfd75e65231c61e7db26d8236f01117023899528164d1b0 go1.23.3.linux-ppc64.tar.gz
-e3b926c81e8099d3cee6e6e270b85b39c3bd44263f8d3df29aacb4d7e00507c8 go1.23.3.linux-ppc64le.tar.gz
-324e03b6f59be841dfbaeabc466224b0f0905f5ad3a225b7c0703090e6c4b1a5 go1.23.3.linux-riscv64.tar.gz
-6bd72fcef72b046b6282c2d1f2c38f31600e4fe9361fcd8341500c754fb09c38 go1.23.3.linux-s390x.tar.gz
-5df382337fe2e4ea6adaafa823da5e083513a97534a38f89d691dd6f599084ca go1.23.3.netbsd-386.tar.gz
-9ae7cb6095a3e91182ac03547167e230fddd4941ed02dbdb6af663b2a53d9db7 go1.23.3.netbsd-amd64.tar.gz
-4a452c4134a9bea6213d8925d322f26b01c0eccda1330585bb2b241c76a0c3ea go1.23.3.netbsd-arm.tar.gz
-8ff3b5184d840148dbca061c04dca35a7070dc894255d3b755066bd76a7094dc go1.23.3.netbsd-arm64.tar.gz
-5b6940922e68ac1162a704a8b583fb4f039f955bfe97c35a56c40269cbcff9b1 go1.23.3.openbsd-386.tar.gz
-6ae4aeb6a88f3754b10ecec90422a30fb8bf86c3187be2be9408d67a5a235ace go1.23.3.openbsd-amd64.tar.gz
-e5eae226391b60c4d1ea1022663f55b225c6d7bab67f31fbafd5dd7a04684006 go1.23.3.openbsd-arm.tar.gz
-e12b2c04535e0bf5561d54831122b410d708519c1ec2c56b0c2350b15243c331 go1.23.3.openbsd-arm64.tar.gz
-599818e4062166d7a112f6f3fcca2dd4e2cdd3111fe48f9757bd8debf38c7f52 go1.23.3.openbsd-ppc64.tar.gz
-9ca4db8cab2a07d561f5b2a9397793684ab3b22326add1fe8cda8a545a1693db go1.23.3.openbsd-riscv64.tar.gz
-8fca1ec2aced936e0170605378ee7f0acb38f002490321f67fc83728ee281967 go1.23.3.plan9-386.tar.gz
-22d663692224fc1933a97f61d9fe49815e3b9ef1c2be97046505683fdf2e23c7 go1.23.3.plan9-amd64.tar.gz
-d0417a702d0e776d57e450fa2ce1ce7efa199a636644776862dbf946c409a462 go1.23.3.plan9-arm.tar.gz
-b5d9db1c02e0ca266a142eb687bd7749890c30872b09a4a0ffcd491425039754 go1.23.3.solaris-amd64.tar.gz
-14b7baf4af2046013b74dfac6e9a0a7403f15ee9940a16890bc028dfd32c49ac go1.23.3.windows-386.msi
-23da9089ea6c5612d718f13c26e9bfc9aaaabe222838075346a8191d48f9dfe5 go1.23.3.windows-386.zip
-614f0e3eed82245dfb4356d4e8d5b96abecca6a4c4f0168c0e389e4dd6284db8 go1.23.3.windows-amd64.msi
-81968b563642096b8a7521171e2be6e77ff6f44032f7493b7bdec9d33f44f31d go1.23.3.windows-amd64.zip
-c9951eecad732c59dfde6dc4803fa9253eb074663c61035c8d856f4d2eb146cb go1.23.3.windows-arm.msi
-1a7db02be47deada42082d21d63eba0013f93375cfa0e7768962f1295a469022 go1.23.3.windows-arm.zip
-a74e3e195219af4330b93c71cd4b736b709a5654a07cc37eebe181c4984afb82 go1.23.3.windows-arm64.msi
-dbdfa868b1a3f8c62950373e4975d83f90dd8b869a3907319af8384919bcaffe go1.23.3.windows-arm64.zip
+6ee44e298379d146a5e5aa6b1c5b5d5f5d0a3365eabdd70741e6e21340ec3b0d go1.23.1.src.tar.gz
+f17f2791717c15728ec63213a014e244c35f9c8846fb29f5a1b63d0c0556f756 go1.23.1.aix-ppc64.tar.gz
+dd9e772686ed908bcff94b6144322d4e2473a7dcd7c696b7e8b6d12f23c887fd go1.23.1.darwin-amd64.pkg
+488d9e4ca3e3ed513ee4edd91bef3a2360c65fa6d6be59cf79640bf840130a58 go1.23.1.darwin-amd64.tar.gz
+be34b488157ec69d94e26e1554558219a2c90789bcb7e3686965a7f9c8cfcbe7 go1.23.1.darwin-arm64.pkg
+e223795ca340e285a760a6446ce57a74500b30e57469a4109961d36184d3c05a go1.23.1.darwin-arm64.tar.gz
+6af626176923a6ae6c5de6dc1c864f38365793c0e4ecd0d6eab847bdc23953e5 go1.23.1.dragonfly-amd64.tar.gz
+cc957c1a019702e6cdc2e257202d42799011ebc1968b6c3bcd6b1965952607d5 go1.23.1.freebsd-386.tar.gz
+a7d57781c50bb80886a8f04066791956d45aa3eea0f83070c5268b6223afb2ff go1.23.1.freebsd-amd64.tar.gz
+c7b09f3fef456048e596db9bea746eb66796aeb82885622b0388feee18f36a3e go1.23.1.freebsd-arm.tar.gz
+b05cd6a77995a0c8439d88df124811c725fb78b942d0b6dd1643529d7ba62f1f go1.23.1.freebsd-arm64.tar.gz
+56236ae70be1613f2915943b94f53c96be5bffc0719314078facd778a89bc57e go1.23.1.freebsd-riscv64.tar.gz
+8644c52df4e831202114fd67c9fcaf1f7233ad27bf945ac53fa7217cf1a0349f go1.23.1.illumos-amd64.tar.gz
+cdee2f4e2efa001f7ee75c90f2efc310b63346cfbba7b549987e9139527c6b17 go1.23.1.linux-386.tar.gz
+49bbb517cfa9eee677e1e7897f7cf9cfdbcf49e05f61984a2789136de359f9bd go1.23.1.linux-amd64.tar.gz
+faec7f7f8ae53fda0f3d408f52182d942cc89ef5b7d3d9f23ff117437d4b2d2f go1.23.1.linux-arm64.tar.gz
+6c7832c7dcd8fb6d4eb308f672a725393403c74ee7be1aeccd8a443015df99de go1.23.1.linux-armv6l.tar.gz
+649ce3856ddc808c00b14a46232eab0bf95e7911cdf497010b17d76656f5ca4e go1.23.1.linux-loong64.tar.gz
+201911048f234e5a0c51ec94b1a11d4e47062fee4398b1d2faa6c820dc026724 go1.23.1.linux-mips.tar.gz
+2bce3743df463915e45d2612f9476ffb03d0b3750b1cb3879347de08715b5fc6 go1.23.1.linux-mips64.tar.gz
+54e301f266e33431b0703136e0bbd4cf02461b1ecedd37b7cbd90cb862a98e5f go1.23.1.linux-mips64le.tar.gz
+8efd495e93d17408c0803595cdc3bf13cb28e0f957aeabd9cc18245fb8e64019 go1.23.1.linux-mipsle.tar.gz
+52bd68689095831ad9af7160844c23b28bb8d0acd268de7e300ff5f0662b7a07 go1.23.1.linux-ppc64.tar.gz
+042888cae54b5fbfd9dd1e3b6bc4a5134879777fe6497fc4c62ec394b5ecf2da go1.23.1.linux-ppc64le.tar.gz
+1a4a609f0391bea202d9095453cbfaf7368fa88a04c206bf9dd715a738664dc3 go1.23.1.linux-riscv64.tar.gz
+47dc49ad45c45e192efa0df7dc7bc5403f5f2d15b5d0dc74ef3018154b616f4d go1.23.1.linux-s390x.tar.gz
+fbfbd5efa6a5d581ea7f5e65015f927db0e52135cab057e43d39d5482da54b61 go1.23.1.netbsd-386.tar.gz
+e96e1cc5cf36113ee6099d1a7306b22cd9c3f975a36bdff954c59f104f22b853 go1.23.1.netbsd-amd64.tar.gz
+c394dfc06bfc276a591209a37e09cd39089ec9a9cc3db30b94814ce2e39eb1d4 go1.23.1.netbsd-arm.tar.gz
+b3b35d64f32821a68b3e2994032dbefb81978f2ec3f218c7a770623b82d36b8e go1.23.1.netbsd-arm64.tar.gz
+3c775c4c16c182e33c2c4ac090d9a247a93b3fb18a3df01d87d490f29599faff go1.23.1.openbsd-386.tar.gz
+5edbe53b47c57b32707fd7154536fbe9eaa79053fea01650c93b54cdba13fc0f go1.23.1.openbsd-amd64.tar.gz
+c30903dd8fa98b8aca8e9db0962ce9f55502aed93e0ef41e5ae148aaa0088de1 go1.23.1.openbsd-arm.tar.gz
+12da183489e58f9c6b357bc1b626f85ed7d4220cab31a49d6a49e6ac6a718b67 go1.23.1.openbsd-arm64.tar.gz
+9cc9aad37696a4a10c31dcec9e35a308de0b369dad354d54cf07406ac6fa7c6f go1.23.1.openbsd-ppc64.tar.gz
+e1d740dda062ce5a276a0c3ed7d8b6353238bc8ff405f63e2e3480bfd26a5ec5 go1.23.1.openbsd-riscv64.tar.gz
+da2a37f9987f01f096859230aa13ecc4ad2e7884465bce91004bc78c64435d65 go1.23.1.plan9-386.tar.gz
+fd8fff8b0697d55c4a4d02a8dc998192b80a9dc2a057647373d6ff607cad29de go1.23.1.plan9-amd64.tar.gz
+52efbc5804c1c86ba7868aa8ebbc31cc8c2a27b62a60fd57944970d48fc67525 go1.23.1.plan9-arm.tar.gz
+f54205f21e2143f2ada1bf1c00ddf64590f5139d5c3fb77cc06175f0d8cc7567 go1.23.1.solaris-amd64.tar.gz
+369a17f0cfd29e5c848e58ffe0d772da20abe334d1c7ca01dbcd55bb3db0b440 go1.23.1.windows-386.msi
+ab866f47d7be56e6b1c67f1d529bf4c23331a339fb0785f435a0552d352cb257 go1.23.1.windows-386.zip
+e99dac215ee437b9bb8f8b14bbfe0e8756882c1ed291f30818e8363bc9c047a5 go1.23.1.windows-amd64.msi
+32dedf277c86610e380e1765593edb66876f00223df71690bd6be68ee17675c0 go1.23.1.windows-amd64.zip
+23169c79dc6b54e0dffb25be6b67425ad9759392a58309bc057430a9bf4c8f6a go1.23.1.windows-arm.msi
+1a57615a09f13534f88e9f2d7efd5743535d1a5719b19e520eef965a634f8efb go1.23.1.windows-arm.zip
+313e1a543931ad8735b4df8969e00f5f4c2ef07be21f54015ede961a70263d35 go1.23.1.windows-arm64.msi
+64ad0954d2c33f556fb1018d62de091254aa6e3a94f1c8a8b16af0d3701d194e go1.23.1.windows-arm64.zip
-# version:golangci 1.61.0
+# version:golangci 1.59.0
# https://github.com/golangci/golangci-lint/releases/
-# https://github.com/golangci/golangci-lint/releases/download/v1.61.0/
-5c280ef3284f80c54fd90d73dc39ca276953949da1db03eb9dd0fbf868cc6e55 golangci-lint-1.61.0-darwin-amd64.tar.gz
-544334890701e4e04a6e574bc010bea8945205c08c44cced73745a6378012d36 golangci-lint-1.61.0-darwin-arm64.tar.gz
-e885a6f561092055930ebd298914d80e8fd2e10d2b1e9942836c2c6a115301fa golangci-lint-1.61.0-freebsd-386.tar.gz
-b13f6a3f11f65e7ff66b734d7554df3bbae0f485768848424e7554ed289e19c2 golangci-lint-1.61.0-freebsd-amd64.tar.gz
-cd8e7bbe5b8f33ed1597aa1cc588da96a3b9f22e1b9ae60d93511eae1a0ee8c5 golangci-lint-1.61.0-freebsd-armv6.tar.gz
-7ade524dbd88bd250968f45e190af90e151fa5ee63dd6aa7f7bb90e8155db61d golangci-lint-1.61.0-freebsd-armv7.tar.gz
-0fe3cd8a1ed8d9f54f48670a5af3df056d6040d94017057f0f4d65c930660ad9 golangci-lint-1.61.0-illumos-amd64.tar.gz
-b463fc5053a612abd26393ebaff1d85d7d56058946f4f0f7bf25ed44ea899415 golangci-lint-1.61.0-linux-386.tar.gz
-77cb0af99379d9a21d5dc8c38364d060e864a01bd2f3e30b5e8cc550c3a54111 golangci-lint-1.61.0-linux-amd64.tar.gz
-af60ac05566d9351615cb31b4cc070185c25bf8cbd9b09c1873aa5ec6f3cc17e golangci-lint-1.61.0-linux-arm64.tar.gz
-1f307f2fcc5d7d674062a967a0d83a7091e300529aa237ec6ad2b3dd14c897f5 golangci-lint-1.61.0-linux-armv6.tar.gz
-3ad8cbaae75a547450844811300f99c4cd290277398e43d22b9eb1792d15af4c golangci-lint-1.61.0-linux-armv7.tar.gz
-9be2ca67d961d7699079739cf6f7c8291c5183d57e34d1677de21ca19d0bd3ed golangci-lint-1.61.0-linux-loong64.tar.gz
-90d005e1648115ebf0861b408eab9c936079a24763e883058b0a227cd3135d31 golangci-lint-1.61.0-linux-mips64.tar.gz
-6d2ed4f49407115460b8c10ccfc40fd177e0887a48864a2879dd16e84ba2a48c golangci-lint-1.61.0-linux-mips64le.tar.gz
-633089589af5a58b7430afb6eee107d4e9c99e8d91711ddc219eb13a07e8d3b8 golangci-lint-1.61.0-linux-ppc64le.tar.gz
-4c1a097d9e0d1b4a8144dae6a1f5583a38d662f3bdc1498c4e954b6ed856be98 golangci-lint-1.61.0-linux-riscv64.tar.gz
-30581d3c987d287b7064617f1a2694143e10dffc40bc25be6636006ee82d7e1c golangci-lint-1.61.0-linux-s390x.tar.gz
-42530bf8100bd43c07f5efe6d92148ba6c5a7a712d510c6f24be85af6571d5eb golangci-lint-1.61.0-netbsd-386.tar.gz
-b8bb07c920f6601edf718d5e82ec0784fd590b0992b42b6ec18da99f26013ed4 golangci-lint-1.61.0-netbsd-amd64.tar.gz
-353a51527c60bd0776b0891b03f247c791986f625fca689d121972c624e54198 golangci-lint-1.61.0-netbsd-arm64.tar.gz
-957a6272c3137910514225704c5dac0723b9c65eb7d9587366a997736e2d7580 golangci-lint-1.61.0-netbsd-armv6.tar.gz
-a89eb28ff7f18f5cd52b914739360fa95cf2f643de4adeca46e26bec3a07e8d8 golangci-lint-1.61.0-netbsd-armv7.tar.gz
-d8d74c43600b271393000717a4ed157d7a15bb85bab7db2efad9b63a694d4634 golangci-lint-1.61.0-windows-386.zip
-e7bc2a81929a50f830244d6d2e657cce4f19a59aff49fa9000176ff34fda64ce golangci-lint-1.61.0-windows-amd64.zip
-ed97c221596dd771e3dd9344872c140340bee2e819cd7a90afa1de752f1f2e0f golangci-lint-1.61.0-windows-arm64.zip
-4b365233948b13d02d45928a5c390045e00945e919747b9887b5f260247541ae golangci-lint-1.61.0-windows-armv6.zip
-595538fb64d152173959d28f6235227f9cd969a828e5af0c4e960d02af4ffd0e golangci-lint-1.61.0-windows-armv7.zip
+# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/
+418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz
+5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz
+8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz
+658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz
+4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz
+ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz
+439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz
+940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz
+3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz
+c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz
+93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz
+d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz
+047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz
+5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz
+71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz
+6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz
+af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz
+a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz
+68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz
+d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz
+83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz
+6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz
+11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz
+466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip
+3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip
+b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip
+6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip
+3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
#
diff --git a/build/ci.go b/build/ci.go
index 754d88a86ad1..4d96ca1729fc 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -44,16 +44,17 @@ package main
import (
"bytes"
+ "crypto/sha256"
"encoding/base64"
"flag"
"fmt"
+ "io"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
- "slices"
"strings"
"time"
@@ -123,12 +124,11 @@ var (
// Distros for which packages are created
debDistros = []string{
- "xenial", // 16.04, EOL: 04/2026
- "bionic", // 18.04, EOL: 04/2028
- "focal", // 20.04, EOL: 04/2030
- "jammy", // 22.04, EOL: 04/2032
- "noble", // 24.04, EOL: 04/2034
- "oracular", // 24.10, EOL: 07/2025
+ "xenial", // 16.04, EOL: 04/2026
+ "bionic", // 18.04, EOL: 04/2028
+ "focal", // 20.04, EOL: 04/2030
+ "jammy", // 22.04, EOL: 04/2032
+ "noble", // 24.04, EOL: 04/2034
}
// This is where the tests should be unpacked.
@@ -178,6 +178,8 @@ func main() {
doPurge(os.Args[2:])
case "sanitycheck":
doSanityCheck()
+ case "generate":
+ doGenerate()
default:
log.Fatal("unknown command ", os.Args[1])
}
@@ -355,93 +357,128 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
return filepath.Join(cachedir, base)
}
-// doCheckTidy assets that the Go modules files are tidied already.
-func doCheckTidy() {
- targets := []string{"go.mod", "go.sum"}
-
- hashes, err := build.HashFiles(targets)
+// hashAllSourceFiles iterates all files under the top-level project directory
+// computing the hash of each file (excluding files within the tests
+// subrepo)
+func hashAllSourceFiles() (map[string]common.Hash, error) {
+ res := make(map[string]common.Hash)
+ err := filepath.WalkDir(".", func(path string, d os.DirEntry, err error) error {
+ if strings.HasPrefix(path, filepath.FromSlash("tests/testdata")) {
+ return filepath.SkipDir
+ }
+ if !d.Type().IsRegular() {
+ return nil
+ }
+ // open the file and hash it
+ f, err := os.OpenFile(path, os.O_RDONLY, 0666)
+ if err != nil {
+ return err
+ }
+ hasher := sha256.New()
+ if _, err := io.Copy(hasher, f); err != nil {
+ return err
+ }
+ res[path] = common.Hash(hasher.Sum(nil))
+ return nil
+ })
if err != nil {
- log.Fatalf("failed to hash go.mod/go.sum: %v", err)
+ return nil, err
+ }
+ return res, nil
+}
+
+// hashSourceFiles iterates the provided set of filepaths (relative to the top-level geth project directory)
+// computing the hash of each file.
+func hashSourceFiles(files []string) (map[string]common.Hash, error) {
+ res := make(map[string]common.Hash)
+ for _, filePath := range files {
+ f, err := os.OpenFile(filePath, os.O_RDONLY, 0666)
+ if err != nil {
+ return nil, err
+ }
+ hasher := sha256.New()
+ if _, err := io.Copy(hasher, f); err != nil {
+ return nil, err
+ }
+ res[filePath] = common.Hash(hasher.Sum(nil))
}
- build.MustRun(new(build.GoToolchain).Go("mod", "tidy"))
+ return res, nil
+}
- tidied, err := build.HashFiles(targets)
+// compareHashedFilesets compares two maps (key is relative file path to top-level geth directory, value is its hash)
+// and returns the list of file paths whose hashes differed.
+func compareHashedFilesets(preHashes map[string]common.Hash, postHashes map[string]common.Hash) []string {
+ updates := []string{}
+ for path, postHash := range postHashes {
+ preHash, ok := preHashes[path]
+ if !ok || preHash != postHash {
+ updates = append(updates, path)
+ }
+ }
+ return updates
+}
+
+func doGoModTidy() {
+ targetFiles := []string{"go.mod", "go.sum"}
+ preHashes, err := hashSourceFiles(targetFiles)
if err != nil {
- log.Fatalf("failed to rehash go.mod/go.sum: %v", err)
+ log.Fatal("failed to hash go.mod/go.sum", "err", err)
+ }
+ tc := new(build.GoToolchain)
+ c := tc.Go("mod", "tidy")
+ build.MustRun(c)
+ postHashes, err := hashSourceFiles(targetFiles)
+ updates := compareHashedFilesets(preHashes, postHashes)
+ for _, updatedFile := range updates {
+ fmt.Fprintf(os.Stderr, "changed file %s\n", updatedFile)
}
- if updates := build.DiffHashes(hashes, tidied); len(updates) > 0 {
- log.Fatalf("files changed on running 'go mod tidy': %v", updates)
+ if len(updates) != 0 {
+ log.Fatal("go.sum and/or go.mod were updated by running 'go mod tidy'")
}
- fmt.Println("No untidy module files detected.")
}
-// doCheckGenerate ensures that re-generating generated files does not cause
-// any mutations in the source file tree.
-func doCheckGenerate() {
+// doGenerate ensures that re-generating generated files does not cause
+// any mutations in the source file tree: i.e. all generated files were
+// updated and committed. Any stale generated files are updated.
+func doGenerate() {
var (
+ tc = new(build.GoToolchain)
cachedir = flag.String("cachedir", "./build/cache", "directory for caching binaries.")
+ verify = flag.Bool("verify", false, "check whether any files are changed by go generate")
)
- // Compute the origin hashes of all the files
- var hashes map[string][32]byte
- var err error
- hashes, err = build.HashFolder(".", []string{"tests/testdata", "build/cache"})
- if err != nil {
- log.Fatal("Error computing hashes", "err", err)
+ protocPath := downloadProtoc(*cachedir)
+ protocGenGoPath := downloadProtocGenGo(*cachedir)
+
+ var preHashes map[string]common.Hash
+ if *verify {
+ var err error
+ preHashes, err = hashAllSourceFiles()
+ if err != nil {
+ log.Fatal("failed to compute map of source hashes", "err", err)
+ }
}
- // Run any go generate steps we might be missing
- var (
- protocPath = downloadProtoc(*cachedir)
- protocGenGoPath = downloadProtocGenGo(*cachedir)
- )
- c := new(build.GoToolchain).Go("generate", "./...")
+
+ c := tc.Go("generate", "./...")
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator)))
build.MustRun(c)
- // Check if generate file hashes have changed
- generated, err := build.HashFolder(".", []string{"tests/testdata", "build/cache"})
+ if !*verify {
+ return
+ }
+ // Check if files were changed.
+ postHashes, err := hashAllSourceFiles()
if err != nil {
- log.Fatalf("Error re-computing hashes: %v", err)
+ log.Fatal("error computing source tree file hashes", "err", err)
}
- updates := build.DiffHashes(hashes, generated)
- for _, file := range updates {
- log.Printf("File changed: %s", file)
+ updates := compareHashedFilesets(preHashes, postHashes)
+ for _, updatedFile := range updates {
+ fmt.Fprintf(os.Stderr, "changed file %s\n", updatedFile)
}
if len(updates) != 0 {
log.Fatal("One or more generated files were updated by running 'go generate ./...'")
}
- fmt.Println("No stale files detected.")
-}
-
-// doCheckBadDeps verifies whether certain unintended dependencies between some
-// packages leak into the codebase due to a refactor. This is not an exhaustive
-// list, rather something we build up over time at sensitive places.
-func doCheckBadDeps() {
- baddeps := [][2]string{
- // Rawdb tends to be a dumping ground for db utils, sometimes leaking the db itself
- {"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/leveldb"},
- {"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/pebbledb"},
- }
- tc := new(build.GoToolchain)
-
- var failed bool
- for _, rule := range baddeps {
- out, err := tc.Go("list", "-deps", rule[0]).CombinedOutput()
- if err != nil {
- log.Fatalf("Failed to list '%s' dependencies: %v", rule[0], err)
- }
- for _, line := range strings.Split(string(out), "\n") {
- if strings.TrimSpace(line) == rule[1] {
- log.Printf("Found bad dependency '%s' -> '%s'", rule[0], rule[1])
- failed = true
- }
- }
- }
- if failed {
- log.Fatalf("Bad dependencies detected.")
- }
- fmt.Println("No bad dependencies detected.")
}
// doLint runs golangci-lint on requested packages.
@@ -458,6 +495,8 @@ func doLint(cmdline []string) {
linter := downloadLinter(*cachedir)
lflags := []string{"run", "--config", ".golangci.yml"}
build.MustRunCommandWithOutput(linter, append(lflags, packages...)...)
+
+ doGoModTidy()
fmt.Println("You have achieved perfection.")
}
@@ -737,7 +776,7 @@ func doDockerBuildx(cmdline []string) {
gethImage := fmt.Sprintf("%s%s", spec.base, tag)
build.MustRunCommand("docker", "buildx", "build",
"--build-arg", "COMMIT="+env.Commit,
- "--build-arg", "VERSION="+version.WithMeta,
+ "--build-arg", "VERSION="+params.VersionWithMeta,
"--build-arg", "BUILDNUM="+env.Buildnum,
"--tag", gethImage,
"--platform", *platform,
diff --git a/cmd/blsync/main.go b/cmd/blsync/main.go
index d74e1496cd82..f9b8575edf9c 100644
--- a/cmd/blsync/main.go
+++ b/cmd/blsync/main.go
@@ -20,7 +20,6 @@ import (
"context"
"fmt"
"os"
- "slices"
"github.com/ethereum/go-ethereum/beacon/blsync"
"github.com/ethereum/go-ethereum/cmd/utils"
@@ -34,7 +33,7 @@ import (
func main() {
app := flags.NewApp("beacon light syncer tool")
- app.Flags = slices.Concat([]cli.Flag{
+ app.Flags = flags.Merge([]cli.Flag{
utils.BeaconApiFlag,
utils.BeaconApiHeaderFlag,
utils.BeaconThresholdFlag,
@@ -46,7 +45,6 @@ func main() {
//TODO datadir for optional permanent database
utils.MainnetFlag,
utils.SepoliaFlag,
- utils.HoleskyFlag,
utils.BlsyncApiFlag,
utils.BlsyncJWTSecretFlag,
},
@@ -70,7 +68,7 @@ func main() {
func sync(ctx *cli.Context) error {
// set up blsync
- client := blsync.NewClient(utils.MakeBeaconLightConfig(ctx))
+ client := blsync.NewClient(ctx)
client.SetEngineRPC(makeRPCClient(ctx))
client.Start()
diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go
index 8c48b3a557c1..4fc40589e064 100644
--- a/cmd/devp2p/discv4cmd.go
+++ b/cmd/devp2p/discv4cmd.go
@@ -21,7 +21,6 @@ import (
"fmt"
"net"
"net/http"
- "slices"
"strconv"
"strings"
"time"
@@ -29,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -83,7 +83,7 @@ var (
Name: "listen",
Usage: "Runs a discovery node",
Action: discv4Listen,
- Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{
+ Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
httpAddrFlag,
}),
}
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index f80dd02c67a7..a0b242b2a6f4 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -69,12 +69,8 @@ type ExecutionResult struct {
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
- RequestsHash *common.Hash `json:"requestsHash,omitempty"`
- Requests [][]byte `json:"requests,omitempty"`
-}
-
-type executionResultMarshaling struct {
- Requests []hexutil.Bytes `json:"requests,omitempty"`
+ RequestsHash *common.Hash `json:"requestsRoot,omitempty"`
+ DepositRequests *types.Deposits `json:"depositRequests,omitempty"`
}
type ommer struct {
@@ -132,7 +128,7 @@ type rejectedTx struct {
// Apply applies a set of transactions to a pre-state
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txIt txIterator, miningReward int64,
- getTracerFn func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
+ getTracerFn func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes
var hashError error
@@ -242,7 +238,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
continue
}
}
- tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash(), chainConfig)
+ tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash())
if err != nil {
return nil, nil, nil, err
}
@@ -408,17 +404,28 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
}
- if requests != nil {
- // Set requestsHash on block.
- h := types.CalcRequestsHash(requests)
+ if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
+ // Parse the requests from the logs
+ var allLogs []*types.Log
+ for _, receipt := range receipts {
+ allLogs = append(allLogs, receipt.Logs...)
+ }
+ requests, err := core.ParseDepositLogs(allLogs, chainConfig)
+ if err != nil {
+ return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
+ }
+ // Calculate the requests root
+ h := types.DeriveSha(requests, trie.NewStackTrie(nil))
execRs.RequestsHash = &h
- for i := range requests {
- // remove prefix
- requests[i] = requests[i][1:]
+ // Get the deposits from the requests
+ deposits := make(types.Deposits, 0)
+ for _, req := range requests {
+ if dep, ok := req.Inner().(*types.Deposit); ok {
+ deposits = append(deposits, dep)
+ }
}
- execRs.Requests = requests
+ execRs.DepositRequests = &deposits
}
-
// Re-create statedb instance with new root upon the updated database
// for accessing latest states.
statedb, err = state.New(root, statedb.Database())
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index d8665d22d34b..d9a44985de0d 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -82,9 +82,7 @@ type input struct {
}
func Transition(ctx *cli.Context) error {
- var getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
- return nil, nil, nil
- }
+ var getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) { return nil, nil, nil }
baseDir, err := createBasedir(ctx)
if err != nil {
@@ -99,7 +97,7 @@ func Transition(ctx *cli.Context) error {
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
Debug: true,
}
- getTracer = func(txIndex int, txHash common.Hash, _ *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
+ getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
@@ -123,7 +121,7 @@ func Transition(ctx *cli.Context) error {
if ctx.IsSet(TraceTracerConfigFlag.Name) {
config = []byte(ctx.String(TraceTracerConfigFlag.Name))
}
- getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
+ getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
diff --git a/cmd/evm/main.go b/cmd/evm/main.go
index 0d4471b8d544..931261a6327c 100644
--- a/cmd/evm/main.go
+++ b/cmd/evm/main.go
@@ -149,32 +149,31 @@ var (
}
)
-var (
- stateTransitionCommand = &cli.Command{
- Name: "transition",
- Aliases: []string{"t8n"},
- Usage: "Executes a full state transition",
- Action: t8ntool.Transition,
- Flags: []cli.Flag{
- t8ntool.TraceFlag,
- t8ntool.TraceTracerFlag,
- t8ntool.TraceTracerConfigFlag,
- t8ntool.TraceEnableMemoryFlag,
- t8ntool.TraceDisableStackFlag,
- t8ntool.TraceEnableReturnDataFlag,
- t8ntool.TraceEnableCallFramesFlag,
- t8ntool.OutputBasedir,
- t8ntool.OutputAllocFlag,
- t8ntool.OutputResultFlag,
- t8ntool.OutputBodyFlag,
- t8ntool.InputAllocFlag,
- t8ntool.InputEnvFlag,
- t8ntool.InputTxsFlag,
- t8ntool.ForknameFlag,
- t8ntool.ChainIDFlag,
- t8ntool.RewardFlag,
- },
- }
+var stateTransitionCommand = &cli.Command{
+ Name: "transition",
+ Aliases: []string{"t8n"},
+ Usage: "Executes a full state transition",
+ Action: t8ntool.Transition,
+ Flags: []cli.Flag{
+ t8ntool.TraceFlag,
+ t8ntool.TraceTracerFlag,
+ t8ntool.TraceTracerConfigFlag,
+ t8ntool.TraceEnableMemoryFlag,
+ t8ntool.TraceDisableStackFlag,
+ t8ntool.TraceEnableReturnDataFlag,
+ t8ntool.TraceEnableCallFramesFlag,
+ t8ntool.OutputBasedir,
+ t8ntool.OutputAllocFlag,
+ t8ntool.OutputResultFlag,
+ t8ntool.OutputBodyFlag,
+ t8ntool.InputAllocFlag,
+ t8ntool.InputEnvFlag,
+ t8ntool.InputTxsFlag,
+ t8ntool.ForknameFlag,
+ t8ntool.ChainIDFlag,
+ t8ntool.RewardFlag,
+ },
+}
transactionCommand = &cli.Command{
Name: "transaction",
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index d0a0d3287cb5..5f217692a786 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -92,7 +92,7 @@ func stateTestCmd(ctx *cli.Context) error {
}
// Load the test content from the input file
if len(ctx.Args().First()) != 0 {
- return runStateTest(ctx, ctx.Args().First(), cfg, ctx.Bool(DumpFlag.Name), ctx.Bool(BenchFlag.Name))
+ return runStateTest(ctx.Args().First(), cfg, ctx.Bool(DumpFlag.Name))
}
// Read filenames from stdin and execute back-to-back
scanner := bufio.NewScanner(os.Stdin)
@@ -101,7 +101,7 @@ func stateTestCmd(ctx *cli.Context) error {
if len(fname) == 0 {
return nil
}
- if err := runStateTest(ctx, fname, cfg, ctx.Bool(DumpFlag.Name), ctx.Bool(BenchFlag.Name)); err != nil {
+ if err := runStateTest(fname, cfg, ctx.Bool(DumpFlag.Name)); err != nil {
return err
}
}
@@ -142,7 +142,7 @@ func collectMatchedSubtests(ctx *cli.Context, testsByName map[string]tests.State
}
// runStateTest loads the state-test given by fname, and executes the test.
-func runStateTest(ctx *cli.Context, fname string, cfg vm.Config, dump bool, bench bool) error {
+func runStateTest(fname string, cfg vm.Config, dump bool) error {
src, err := os.ReadFile(fname)
if err != nil {
return err
@@ -155,20 +155,26 @@ func runStateTest(ctx *cli.Context, fname string, cfg vm.Config, dump bool, benc
matchingTests := collectMatchedSubtests(ctx, testsByName)
// Iterate over all the tests, run them and aggregate the results
- var results []StatetestResult
- for _, test := range matchingTests {
- // Run the test and aggregate the result
- result := &StatetestResult{Name: test.name, Fork: test.st.Fork, Pass: true}
- test.test.Run(test.st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
- var root common.Hash
- if tstate.StateDB != nil {
- root = tstate.StateDB.IntermediateRoot(false)
- result.Root = &root
- fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
- if dump { // Dump any state to aid debugging
- cpy, _ := state.New(root, tstate.StateDB.Database())
- dump := cpy.RawDump(nil)
- result.State = &dump
+ results := make([]StatetestResult, 0, len(testsByName))
+ for key, test := range testsByName {
+ for _, st := range test.Subtests() {
+ // Run the test and aggregate the result
+ result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
+ test.Run(st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
+ var root common.Hash
+ if tstate.StateDB != nil {
+ root = tstate.StateDB.IntermediateRoot(false)
+ result.Root = &root
+ fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
+ if dump { // Dump any state to aid debugging
+ cpy, _ := state.New(root, tstate.StateDB.Database())
+ dump := cpy.RawDump(nil)
+ result.State = &dump
+ }
+ }
+ if err != nil {
+ // Test failed, mark as so
+ result.Pass, result.Error = false, err.Error()
}
}
if err != nil {
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index ab96a49e3d30..65e992400189 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -43,6 +43,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/naoina/toml"
"github.com/urfave/cli/v2"
@@ -243,7 +244,7 @@ func makeFullNode(ctx *cli.Context) *node.Node {
// Start blsync mode.
srv := rpc.NewServer()
srv.RegisterName("engine", catalyst.NewConsensusAPI(eth))
- blsyncer := blsync.NewClient(utils.MakeBeaconLightConfig(ctx))
+ blsyncer := blsync.NewClient(ctx)
blsyncer.SetEngineRPC(rpc.DialInProc(srv))
stack.RegisterLifecycle(blsyncer)
} else {
diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go
index b8c2c498a64d..2cbf4a6bb17b 100644
--- a/cmd/geth/consolecmd_test.go
+++ b/cmd/geth/consolecmd_test.go
@@ -129,7 +129,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
attach.SetTemplateFunc("gover", runtime.Version)
- attach.SetTemplateFunc("gethver", func() string { return version.WithCommit("", "") })
+ attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
attach.SetTemplateFunc("niltime", func() string {
return time.Unix(1695902100, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
})
diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go
index 7622246050df..99a958b73c0e 100644
--- a/cmd/geth/dbcmd.go
+++ b/cmd/geth/dbcmd.go
@@ -212,7 +212,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
Name: "inspect-history",
Usage: "Inspect the state history within block range",
ArgsUsage: "
[OPTIONAL ]",
- Flags: slices.Concat([]cli.Flag{
+ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
&cli.Uint64Flag{
Name: "start",
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index fbbc3f3f6528..00612d3c14dd 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -144,7 +144,7 @@ var (
utils.GpoPercentileFlag,
utils.GpoMaxGasPriceFlag,
utils.GpoIgnoreGasPriceFlag,
- // CHANGE(taiko): use default gas price flag
+ // CHANGE(taiko): add `--gpo.defaultprice` flag
utils.GpoDefaultGasPriceFlag,
configFileFlag,
utils.LogDebugFlag,
@@ -356,7 +356,8 @@ func geth(ctx *cli.Context) error {
}
// startNode boots up the system node and all registered protocols, after which
-// it starts the RPC/IPC interfaces and the miner.
+// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
+// miner.
func startNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
// Start up the node itself
utils.StartNode(ctx, stack, isConsole)
@@ -430,3 +431,33 @@ func startNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
}()
}
}
+
+// unlockAccounts unlocks any account specifically requested.
+func unlockAccounts(ctx *cli.Context, stack *node.Node) {
+ var unlocks []string
+ inputs := strings.Split(ctx.String(utils.UnlockedAccountFlag.Name), ",")
+ for _, input := range inputs {
+ if trimmed := strings.TrimSpace(input); trimmed != "" {
+ unlocks = append(unlocks, trimmed)
+ }
+ }
+ // Short circuit if there is no account to unlock.
+ if len(unlocks) == 0 {
+ return
+ }
+ // If insecure account unlocking is not allowed if node's APIs are exposed to external.
+ // Print warning log to user and skip unlocking.
+ if !stack.Config().InsecureUnlockAllowed && stack.Config().ExtRPCEnabled() {
+ utils.Fatalf("Account unlock with HTTP access is forbidden!")
+ }
+ backends := stack.AccountManager().Backends(keystore.KeyStoreType)
+ if len(backends) == 0 {
+ log.Warn("Failed to unlock accounts, keystore is not available")
+ return
+ }
+ ks := backends[0].(*keystore.KeyStore)
+ passwords := utils.MakePasswordList(ctx)
+ for i, account := range unlocks {
+ unlockAccount(ks, account, i, passwords)
+ }
+}
diff --git a/cmd/geth/testdata/clique.json b/cmd/geth/testdata/clique.json
index d318f4c16612..ef5508e943b8 100644
--- a/cmd/geth/testdata/clique.json
+++ b/cmd/geth/testdata/clique.json
@@ -8,7 +8,7 @@
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
- "terminalTotalDifficulty": 0,
+ "terminalTotalDifficultyPassed": true,
"clique": {
"period": 5,
"epoch": 30000
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 67dcd67fa9e7..3e00c0743c9c 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -320,7 +320,7 @@ var (
Usage: "Target EL engine API URL",
Category: flags.BeaconCategory,
}
- BlsyncJWTSecretFlag = &flags.DirectoryFlag{
+ BlsyncJWTSecretFlag = &cli.StringFlag{
Name: "blsync.jwtsecret",
Usage: "Path to a JWT secret to use for target engine API endpoint",
Category: flags.BeaconCategory,
@@ -525,7 +525,6 @@ var (
VMTraceJsonConfigFlag = &cli.StringFlag{
Name: "vmtrace.jsonconfig",
Usage: "Tracer configuration (JSON)",
- Value: "{}",
Category: flags.VMCategory,
}
// API options.
@@ -840,7 +839,7 @@ var (
Value: ethconfig.Defaults.GPO.IgnorePrice.Int64(),
Category: flags.GasPriceCategory,
}
- // CHANGE(taiko): use default gas price flag
+ // CHANGE(taiko): add `--gpo.defaultprice` flag.
GpoDefaultGasPriceFlag = &cli.Int64Flag{
Name: "gpo.defaultprice",
Usage: "Default gas price",
@@ -1282,6 +1281,24 @@ func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) {
cfg.Miner.PendingFeeRecipient = common.BytesToAddress(b)
}
+// MakePasswordList reads password lines from the file specified by the global --password flag.
+func MakePasswordList(ctx *cli.Context) []string {
+ path := ctx.Path(PasswordFileFlag.Name)
+ if path == "" {
+ return nil
+ }
+ text, err := os.ReadFile(path)
+ if err != nil {
+ Fatalf("Failed to read password file: %v", err)
+ }
+ lines := strings.Split(string(text), "\n")
+ // Sanitise DOS line endings.
+ for i := range lines {
+ lines[i] = strings.TrimRight(lines[i], "\r")
+ }
+ return lines
+}
+
func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
setNodeKey(ctx, cfg)
setNAT(ctx, cfg)
@@ -1428,10 +1445,6 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.IsSet(GpoIgnoreGasPriceFlag.Name) {
cfg.IgnorePrice = big.NewInt(ctx.Int64(GpoIgnoreGasPriceFlag.Name))
}
- // CHANGE(taiko): use flag
- if ctx.IsSet(GpoDefaultGasPriceFlag.Name) {
- cfg.Default = big.NewInt(ctx.Int64(GpoDefaultGasPriceFlag.Name))
- }
}
func setTxPool(ctx *cli.Context, cfg *legacypool.Config) {
@@ -1731,7 +1744,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
// Override any default configs for hard coded networks.
switch {
- // CHANGE(taiko): when --taiko flag is set, use the Taiko genesis.
+ // CHANGE(taiko): when `--taiko` flag is set, use the Taiko genesis.
case ctx.IsSet(TaikoFlag.Name):
cfg.Genesis = core.TaikoGenesisBlock(cfg.NetworkId)
case ctx.Bool(MainnetFlag.Name):
@@ -1814,6 +1827,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if err != nil {
Fatalf("Could not read genesis from database: %v", err)
}
+ if !genesis.Config.TerminalTotalDifficultyPassed {
+ Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true")
+ }
if genesis.Config.TerminalTotalDifficulty == nil {
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified")
} else if genesis.Config.TerminalTotalDifficulty.Cmp(big.NewInt(0)) != 0 {
@@ -1844,85 +1860,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// VM tracing config.
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {
- cfg.VMTrace = name
- cfg.VMTraceJsonConfig = ctx.String(VMTraceJsonConfigFlag.Name)
- }
- }
-}
+ var config string
+ if ctx.IsSet(VMTraceJsonConfigFlag.Name) {
+ config = ctx.String(VMTraceJsonConfigFlag.Name)
+ }
-// MakeBeaconLightConfig constructs a beacon light client config based on the
-// related command line flags.
-func MakeBeaconLightConfig(ctx *cli.Context) bparams.ClientConfig {
- var config bparams.ClientConfig
- customConfig := ctx.IsSet(BeaconConfigFlag.Name)
- CheckExclusive(ctx, MainnetFlag, SepoliaFlag, HoleskyFlag, BeaconConfigFlag)
- switch {
- case ctx.Bool(MainnetFlag.Name):
- config.ChainConfig = *bparams.MainnetLightConfig
- case ctx.Bool(SepoliaFlag.Name):
- config.ChainConfig = *bparams.SepoliaLightConfig
- case ctx.Bool(HoleskyFlag.Name):
- config.ChainConfig = *bparams.HoleskyLightConfig
- default:
- if !customConfig {
- config.ChainConfig = *bparams.MainnetLightConfig
- }
- }
- // Genesis root and time should always be specified together with custom chain config
- if customConfig {
- if !ctx.IsSet(BeaconGenesisRootFlag.Name) {
- Fatalf("Custom beacon chain config is specified but genesis root is missing")
- }
- if !ctx.IsSet(BeaconGenesisTimeFlag.Name) {
- Fatalf("Custom beacon chain config is specified but genesis time is missing")
- }
- if !ctx.IsSet(BeaconCheckpointFlag.Name) {
- Fatalf("Custom beacon chain config is specified but checkpoint is missing")
- }
- config.ChainConfig = bparams.ChainConfig{
- GenesisTime: ctx.Uint64(BeaconGenesisTimeFlag.Name),
- }
- if c, err := hexutil.Decode(ctx.String(BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
- copy(config.GenesisValidatorsRoot[:len(c)], c)
- } else {
- Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(BeaconGenesisRootFlag.Name), "error", err)
- }
- configFile := ctx.String(BeaconConfigFlag.Name)
- if err := config.ChainConfig.LoadForks(configFile); err != nil {
- Fatalf("Could not load beacon chain config", "file", configFile, "error", err)
- }
- log.Info("Using custom beacon chain config", "file", configFile)
- } else {
- if ctx.IsSet(BeaconGenesisRootFlag.Name) {
- Fatalf("Genesis root is specified but custom beacon chain config is missing")
- }
- if ctx.IsSet(BeaconGenesisTimeFlag.Name) {
- Fatalf("Genesis time is specified but custom beacon chain config is missing")
- }
- }
- // Checkpoint is required with custom chain config and is optional with pre-defined config
- if ctx.IsSet(BeaconCheckpointFlag.Name) {
- if c, err := hexutil.Decode(ctx.String(BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
- copy(config.Checkpoint[:len(c)], c)
- } else {
- Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(BeaconCheckpointFlag.Name), "error", err)
- }
- }
- config.Apis = ctx.StringSlice(BeaconApiFlag.Name)
- if config.Apis == nil {
- Fatalf("Beacon node light client API URL not specified")
- }
- config.CustomHeader = make(map[string]string)
- for _, s := range ctx.StringSlice(BeaconApiHeaderFlag.Name) {
- kv := strings.Split(s, ":")
- if len(kv) != 2 {
- Fatalf("Invalid custom API header entry: %s", s)
+ cfg.VMTrace = name
+ cfg.VMTraceJsonConfig = config
}
- config.CustomHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
}
- config.Threshold = ctx.Int(BeaconThresholdFlag.Name)
- config.NoFilter = ctx.Bool(BeaconNoFilterFlag.Name)
- return config
}
// SetDNSDiscoveryDefaults configures DNS discovery with the given URL if
@@ -2201,7 +2147,10 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {
- config := json.RawMessage(ctx.String(VMTraceJsonConfigFlag.Name))
+ var config json.RawMessage
+ if ctx.IsSet(VMTraceJsonConfigFlag.Name) {
+ config = json.RawMessage(ctx.String(VMTraceJsonConfigFlag.Name))
+ }
t, err := tracers.LiveDirectory.New(name, config)
if err != nil {
Fatalf("Failed to create tracer %q: %v", name, err)
diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go
index ff63dd5685be..f145f605d6aa 100644
--- a/cmd/utils/flags_legacy.go
+++ b/cmd/utils/flags_legacy.go
@@ -153,23 +153,6 @@ var (
Usage: "Enable expensive metrics collection and reporting (deprecated)",
Category: flags.DeprecatedCategory,
}
- // Deprecated Oct 2024
- EnablePersonal = &cli.BoolFlag{
- Name: "rpc.enabledeprecatedpersonal",
- Usage: "This used to enable the 'personal' namespace.",
- Category: flags.DeprecatedCategory,
- }
- UnlockedAccountFlag = &cli.StringFlag{
- Name: "unlock",
- Usage: "Comma separated list of accounts to unlock (deprecated)",
- Value: "",
- Category: flags.DeprecatedCategory,
- }
- InsecureUnlockAllowedFlag = &cli.BoolFlag{
- Name: "allow-insecure-unlock",
- Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http (deprecated)",
- Category: flags.DeprecatedCategory,
- }
)
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
diff --git a/common/math/big.go b/common/math/big.go
index 825f4baec9ee..c6eec88c6758 100644
--- a/common/math/big.go
+++ b/common/math/big.go
@@ -177,3 +177,38 @@ func U256(x *big.Int) *big.Int {
func U256Bytes(n *big.Int) []byte {
return PaddedBigBytes(U256(n), 32)
}
+
+// S256 interprets x as a two's complement number.
+// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
+//
+// S256(0) = 0
+// S256(1) = 1
+// S256(2**255) = -2**255
+// S256(2**256-1) = -1
+func S256(x *big.Int) *big.Int {
+ if x.Cmp(tt255) < 0 {
+ return x
+ }
+ return new(big.Int).Sub(x, tt256)
+}
+
+// Exp implements exponentiation by squaring.
+// Exp returns a newly-allocated big integer and does not change
+// base or exponent. The result is truncated to 256 bits.
+//
+// Courtesy @karalabe and @chfast
+func Exp(base, exponent *big.Int) *big.Int {
+ copyBase := new(big.Int).Set(base)
+ result := big.NewInt(1)
+
+ for _, word := range exponent.Bits() {
+ for i := 0; i < wordBits; i++ {
+ if word&1 == 1 {
+ U256(result.Mul(result, copyBase))
+ }
+ U256(copyBase.Mul(copyBase, copyBase))
+ word >>= 1
+ }
+ }
+ return result
+}
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index cdacf354a5e6..a54f9745f067 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -347,7 +347,7 @@ func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.H
}
// Finalize implements consensus.Engine and processes withdrawals on top.
-func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
+func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body) {
if !beacon.IsPoSHeader(header) {
beacon.ethone.Finalize(chain, header, state, body)
return
@@ -398,25 +398,21 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
if parent == nil {
return nil, fmt.Errorf("nil parent header for block %d", header.Number)
}
+
preTrie, err := state.Database().OpenTrie(parent.Root)
if err != nil {
return nil, fmt.Errorf("error opening pre-state tree root: %w", err)
}
+
vktPreTrie, okpre := preTrie.(*trie.VerkleTrie)
vktPostTrie, okpost := state.GetTrie().(*trie.VerkleTrie)
-
- // The witness is only attached iff both parent and current block are
- // using verkle tree.
if okpre && okpost {
if len(keys) > 0 {
- verkleProof, stateDiff, err := vktPreTrie.Proof(vktPostTrie, keys)
+ verkleProof, stateDiff, err := vktPreTrie.Proof(vktPostTrie, keys, vktPreTrie.FlatdbNodeResolver)
if err != nil {
return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err)
}
- block = block.WithWitness(&types.ExecutionWitness{
- StateDiff: stateDiff,
- VerkleProof: verkleProof,
- })
+ block = block.WithWitness(&types.ExecutionWitness{StateDiff: stateDiff, VerkleProof: verkleProof})
}
}
}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index d31efd744510..b4f7c5be5cf2 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -574,7 +574,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
// Finalize implements consensus.Engine. There is no post-transaction
// consensus rules in clique, do nothing here.
-func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
+func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body) {
// No block rewards in PoA, so the state remains as is
}
diff --git a/consensus/consensus.go b/consensus/consensus.go
index ff76d31f551f..f596b5dd3dd8 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -89,7 +89,7 @@ type Engine interface {
//
// Note: The state database might be updated to reflect any consensus rules
// that happen at finalization (e.g. block rewards).
- Finalize(chain ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body)
+ Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body)
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
// rewards or process withdrawals) and assembles the final block.
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index 4f92f1282b9e..a3777e51e6a7 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -504,7 +504,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H
}
// Finalize implements consensus.Engine, accumulating the block and uncle rewards.
-func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
+func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body) {
// Accumulate any block and uncle rewards
accumulateRewards(chain.Config(), state, header, body.Uncles)
}
@@ -567,7 +567,7 @@ func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
// accumulateRewards credits the coinbase of the given block with the mining
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
-func accumulateRewards(config *params.ChainConfig, stateDB vm.StateDB, header *types.Header, uncles []*types.Header) {
+func accumulateRewards(config *params.ChainConfig, stateDB *state.StateDB, header *types.Header, uncles []*types.Header) {
// Select the correct block reward based on chain progression
blockReward := FrontierBlockReward
if config.IsByzantium(header.Number) {
diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go
index b80c1b833a47..b9f5dee56e46 100644
--- a/consensus/misc/dao.go
+++ b/consensus/misc/dao.go
@@ -21,6 +21,7 @@ import (
"errors"
"math/big"
+ "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -81,8 +82,7 @@ func ApplyDAOHardFork(statedb vm.StateDB) {
// Move every DAO account and extra-balance account funds into the refund contract
for _, addr := range params.DAODrainList() {
- balance := statedb.GetBalance(addr)
- statedb.AddBalance(params.DAORefundContract, balance, tracing.BalanceIncreaseDaoContract)
- statedb.SubBalance(addr, balance, tracing.BalanceDecreaseDaoAccount)
+ statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr), tracing.BalanceIncreaseDaoContract)
+ statedb.SetBalance(addr, new(uint256.Int), tracing.BalanceDecreaseDaoAccount)
}
}
diff --git a/consensus/taiko/consensus.go b/consensus/taiko/consensus.go
index f13c8388756d..3ec5255c134d 100644
--- a/consensus/taiko/consensus.go
+++ b/consensus/taiko/consensus.go
@@ -11,6 +11,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -201,13 +202,17 @@ func (t *Taiko) Prepare(chain consensus.ChainHeaderReader, header *types.Header)
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
-func (t *Taiko) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal) {
+func (t *Taiko) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body) {
// no block rewards in l2
header.UncleHash = types.CalcUncleHash(nil)
header.Difficulty = common.Big0
// Withdrawals processing.
- for _, w := range withdrawals {
- state.AddBalance(w.Address, uint256.MustFromBig(new(big.Int).SetUint64(w.Amount)))
+ for _, w := range body.Withdrawals {
+ state.AddBalance(
+ w.Address,
+ uint256.MustFromBig(new(big.Int).SetUint64(w.Amount)),
+ tracing.BalanceIncreaseWithdrawal,
+ )
}
header.Root = state.IntermediateRoot(true)
}
@@ -217,14 +222,14 @@ func (t *Taiko) Finalize(chain consensus.ChainHeaderReader, header *types.Header
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
-func (t *Taiko) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, error) {
- if withdrawals == nil {
- withdrawals = make([]*types.Withdrawal, 0)
+func (t *Taiko) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
+ if body.Withdrawals == nil {
+ body.Withdrawals = make([]*types.Withdrawal, 0)
}
// Verify anchor transaction
- if len(txs) != 0 { // Transactions list might be empty when building empty payload.
- isAnchor, err := t.ValidateAnchorTx(txs[0], header)
+ if len(body.Transactions) != 0 { // Transactions list might be empty when building empty payload.
+ isAnchor, err := t.ValidateAnchorTx(body.Transactions[0], header)
if err != nil {
return nil, err
}
@@ -234,10 +239,8 @@ func (t *Taiko) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *t
}
// Finalize block
- t.Finalize(chain, header, state, txs, uncles, withdrawals)
- return types.NewBlockWithWithdrawals(
- header, txs, nil /* ignore uncles */, receipts, withdrawals, trie.NewStackTrie(nil),
- ), nil
+ t.Finalize(chain, header, state, body)
+ return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil
}
// Seal generates a new sealing request for the given input block and pushes
diff --git a/core/asm/asm.go b/core/asm/asm.go
index f4b59f447191..70891951cf24 100644
--- a/core/asm/asm.go
+++ b/core/asm/asm.go
@@ -72,23 +72,9 @@ func (it *instructionIterator) Next() bool {
return false
}
it.op = vm.OpCode(it.code[it.pc])
- var a int
- if !it.eofEnabled { // Legacy code
- if it.op.IsPush() {
- a = int(it.op) - int(vm.PUSH0)
- }
- } else { // EOF code
- if it.op == vm.RJUMPV {
- // RJUMPV is unique as it has a variable sized operand. The total size is
- // determined by the count byte which immediately follows RJUMPV.
- maxIndex := int(it.code[it.pc+1])
- a = (maxIndex+1)*2 + 1
- } else {
- a = vm.Immediates(it.op)
- }
- }
- if a > 0 {
- u := it.pc + 1 + uint64(a)
+ if it.op.IsPush() {
+ a := uint64(it.op) - uint64(vm.PUSH0)
+ u := it.pc + 1 + a
if uint64(len(it.code)) <= it.pc || uint64(len(it.code)) < u {
it.error = fmt.Errorf("incomplete instruction at %v", it.pc)
return false
diff --git a/core/bench_test.go b/core/bench_test.go
index 6d518e8d3b73..ac00655b8525 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -337,8 +337,6 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database: %v", err)
}
- db = rawdb.NewDatabase(pdb)
-
chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
b.Fatalf("error creating chain: %v", err)
diff --git a/core/block_validator.go b/core/block_validator.go
index 59783a040730..4f51f5dc1788 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -121,7 +121,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
if res == nil {
- return errors.New("nil ProcessResult value")
+ return fmt.Errorf("nil ProcessResult value")
}
header := block.Header()
if block.GasUsed() != res.GasUsed {
@@ -145,12 +145,10 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
}
// Validate the parsed requests match the expected header value.
if header.RequestsHash != nil {
- reqhash := types.CalcRequestsHash(res.Requests)
- if reqhash != *header.RequestsHash {
- return fmt.Errorf("invalid requests hash (remote: %x local: %x)", *header.RequestsHash, reqhash)
+ depositSha := types.DeriveSha(res.Requests, trie.NewStackTrie(nil))
+ if depositSha != *header.RequestsHash {
+ return fmt.Errorf("invalid deposit root hash (remote: %x local: %x)", *header.RequestsHash, depositSha)
}
- } else if res.Requests != nil {
- return errors.New("block has requests before prague fork")
}
// Validate the state root against the received state root and throw
// an error if they don't match.
diff --git a/core/blockchain.go b/core/blockchain.go
index c3da61b28108..0d2d57cdfbc2 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -1542,7 +1542,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// Reorganise the chain if the parent is not the head block
if block.ParentHash() != currentBlock.Hash() {
- if err := bc.reorg(currentBlock, block.Header()); err != nil {
+ if err := bc.reorg(currentBlock, block); err != nil {
return NonStatTy, err
}
}
@@ -1550,7 +1550,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// Set new head.
bc.writeHeadBlock(block)
- bc.chainFeed.Send(ChainEvent{Header: block.Header()})
+ bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
@@ -1560,7 +1560,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// we will fire an accumulated ChainHeadEvent and disable fire
// event here.
if emitHeadEvent {
- bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
}
return CanonStatTy, nil
}
@@ -1774,6 +1774,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
if err != nil {
return nil, it.index, err
}
+ statedb.SetLogger(bc.logger)
// If we are past Byzantium, enable prefetching to pull in trie node paths
// while processing transactions. Before Byzantium the prefetcher is mostly
@@ -2258,8 +2259,21 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error
// reads should be blocked until the mutation is complete.
bc.txLookupLock.Lock()
- // Reorg can be executed, start reducing the chain's old blocks and appending
- // the new blocks
+ // Insert the new chain segment in incremental order, from the old
+ // to the new. The new chain head (newChain[0]) is not inserted here,
+ // as it will be handled separately outside of this function
+ for i := len(newChain) - 1; i >= 1; i-- {
+ // Insert the block in the canonical way, re-writing history
+ bc.writeHeadBlock(newChain[i])
+
+ // Collect the new added transactions.
+ for _, tx := range newChain[i].Transactions() {
+ addedTxs = append(addedTxs, tx.Hash())
+ }
+ }
+
+ // Delete useless indexes right now which includes the non-canonical
+ // transaction indexes, canonical chain indexes which above the head.
var (
deletedTxs []common.Hash
rebirthTxs []common.Hash
@@ -2285,7 +2299,32 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error
deletedLogs = nil
}
}
- if len(deletedLogs) > 0 {
+ rawdb.DeleteCanonicalHash(indexesBatch, i)
+ }
+ if err := indexesBatch.Write(); err != nil {
+ log.Crit("Failed to delete useless indexes", "err", err)
+ }
+ // Reset the tx lookup cache to clear stale txlookup cache.
+ bc.txLookupCache.Purge()
+
+ // Release the tx-lookup lock after mutation.
+ bc.txLookupLock.Unlock()
+
+ // Send out events for logs from the old canon chain, and 'reborn'
+ // logs from the new canon chain. The number of logs can be very
+ // high, so the events are sent in batches of size around 512.
+
+ // Deleted logs + blocks:
+ var deletedLogs []*types.Log
+ for i := len(oldChain) - 1; i >= 0; i-- {
+ // Also send event for blocks removed from the canon chain.
+ bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
+
+ // Collect deleted logs for notification
+ if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 {
+ deletedLogs = append(deletedLogs, logs...)
+ }
+ if len(deletedLogs) > 512 {
bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
}
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index dc391bb520c1..cef87ffb7527 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1333,6 +1333,85 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan Re
}
}
+func TestReorgSideEvent(t *testing.T) {
+ testReorgSideEvent(t, rawdb.HashScheme)
+ testReorgSideEvent(t, rawdb.PathScheme)
+}
+
+func testReorgSideEvent(t *testing.T, scheme string) {
+ var (
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ gspec = &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
+ }
+ signer = types.LatestSigner(gspec.Config)
+ )
+ blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
+ defer blockchain.Stop()
+
+ _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
+ if _, err := blockchain.InsertChain(chain); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+
+ _, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) {
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
+ if i == 2 {
+ gen.OffsetTime(-9)
+ }
+ if err != nil {
+ t.Fatalf("failed to create tx: %v", err)
+ }
+ gen.AddTx(tx)
+ })
+ chainSideCh := make(chan ChainSideEvent, 64)
+ blockchain.SubscribeChainSideEvent(chainSideCh)
+ if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+
+ expectedSideHashes := map[common.Hash]bool{
+ chain[0].Hash(): true,
+ chain[1].Hash(): true,
+ chain[2].Hash(): true,
+ }
+
+ i := 0
+
+ const timeoutDura = 10 * time.Second
+ timeout := time.NewTimer(timeoutDura)
+done:
+ for {
+ select {
+ case ev := <-chainSideCh:
+ block := ev.Block
+ if _, ok := expectedSideHashes[block.Hash()]; !ok {
+ t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
+ }
+ i++
+
+ if i == len(expectedSideHashes) {
+ timeout.Stop()
+
+ break done
+ }
+ timeout.Reset(timeoutDura)
+
+ case <-timeout.C:
+ t.Fatalf("Timeout. Possibly not all blocks were triggered for sideevent: %v", i)
+ }
+ }
+
+ // make sure no more events are fired
+ select {
+ case e := <-chainSideCh:
+ t.Errorf("unexpected event fired: %v", e)
+ case <-time.After(250 * time.Millisecond):
+ }
+}
+
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
testCanonicalBlockRetrieval(t, rawdb.HashScheme)
@@ -2663,13 +2742,13 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
datadir := t.TempDir()
ancient := path.Join(datadir, "ancient")
- pdb, err := pebble.New(datadir, 0, 0, "", false)
- if err != nil {
- t.Fatalf("Failed to create persistent key-value database: %v", err)
- }
- db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
+ db, err := rawdb.Open(rawdb.OpenOptions{
+ Directory: datadir,
+ AncientsDirectory: ancient,
+ Ephemeral: true,
+ })
if err != nil {
- t.Fatalf("Failed to create persistent freezer database: %v", err)
+ t.Fatalf("Failed to create persistent database: %v", err)
}
defer db.Close()
@@ -4149,81 +4228,56 @@ func TestEIP3651(t *testing.T) {
}
}
-// Simple deposit generator, source: https://gist.github.com/lightclient/54abb2af2465d6969fa6d1920b9ad9d7
-var depositsGeneratorCode = common.FromHex("6080604052366103aa575f603067ffffffffffffffff811115610025576100246103ae565b5b6040519080825280601f01601f1916602001820160405280156100575781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061007d5761007c6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f602067ffffffffffffffff8111156100c7576100c66103ae565b5b6040519080825280601f01601f1916602001820160405280156100f95781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061011f5761011e6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff811115610169576101686103ae565b5b6040519080825280601f01601f19166020018201604052801561019b5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f815181106101c1576101c06103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f606067ffffffffffffffff81111561020b5761020a6103ae565b5b6040519080825280601f01601f19166020018201604052801561023d5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610263576102626103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff8111156102ad576102ac6103ae565b5b6040519080825280601f01601f1916602001820160405280156102df5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610305576103046103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f8081819054906101000a900460ff168092919061035090610441565b91906101000a81548160ff021916908360ff160217905550507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c585858585856040516103a09594939291906104d9565b60405180910390a1005b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f60ff82169050919050565b5f61044b82610435565b915060ff820361045e5761045d610408565b5b600182019050919050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f6104ab82610469565b6104b58185610473565b93506104c5818560208601610483565b6104ce81610491565b840191505092915050565b5f60a0820190508181035f8301526104f181886104a1565b9050818103602083015261050581876104a1565b9050818103604083015261051981866104a1565b9050818103606083015261052d81856104a1565b9050818103608083015261054181846104a1565b9050969550505050505056fea26469706673582212208569967e58690162d7d6fe3513d07b393b4c15e70f41505cbbfd08f53eba739364736f6c63430008190033")
-
-// This is a smoke test for EIP-7685 requests added in the Prague fork. The test first
-// creates a block containing requests, and then inserts it into the chain to run
-// validation.
-func TestPragueRequests(t *testing.T) {
+func TestEIP6110(t *testing.T) {
var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- config = *params.MergedTestChainConfig
- signer = types.LatestSigner(&config)
- engine = beacon.NewFaker()
+ engine = beacon.NewFaker()
+
+ // A sender who makes transactions, has some funds
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
+ config = *params.AllEthashProtocolChanges
+ gspec = &Genesis{
+ Config: &config,
+ Alloc: types.GenesisAlloc{
+ addr: {Balance: funds},
+ config.DepositContractAddress: {
+ // Simple deposit generator, source: https://gist.github.com/lightclient/54abb2af2465d6969fa6d1920b9ad9d7
+ Code: common.Hex2Bytes("6080604052366103aa575f603067ffffffffffffffff811115610025576100246103ae565b5b6040519080825280601f01601f1916602001820160405280156100575781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061007d5761007c6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f602067ffffffffffffffff8111156100c7576100c66103ae565b5b6040519080825280601f01601f1916602001820160405280156100f95781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061011f5761011e6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff811115610169576101686103ae565b5b6040519080825280601f01601f19166020018201604052801561019b5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f815181106101c1576101c06103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f606067ffffffffffffffff81111561020b5761020a6103ae565b5b6040519080825280601f01601f19166020018201604052801561023d5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610263576102626103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff8111156102ad576102ac6103ae565b5b6040519080825280601f01601f1916602001820160405280156102df5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610305576103046103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f8081819054906101000a900460ff168092919061035090610441565b91906101000a81548160ff021916908360ff160217905550507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c585858585856040516103a09594939291906104d9565b60405180910390a1005b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f60ff82169050919050565b5f61044b82610435565b915060ff820361045e5761045d610408565b5b600182019050919050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f6104ab82610469565b6104b58185610473565b93506104c5818560208601610483565b6104ce81610491565b840191505092915050565b5f60a0820190508181035f8301526104f181886104a1565b9050818103602083015261050581876104a1565b9050818103604083015261051981866104a1565b9050818103606083015261052d81856104a1565b9050818103608083015261054181846104a1565b9050969550505050505056fea26469706673582212208569967e58690162d7d6fe3513d07b393b4c15e70f41505cbbfd08f53eba739364736f6c63430008190033"),
+ Nonce: 0,
+ Balance: big.NewInt(0),
+ },
+ },
+ }
)
- gspec := &Genesis{
- Config: &config,
- Alloc: types.GenesisAlloc{
- addr1: {Balance: big.NewInt(9999900000000000)},
- config.DepositContractAddress: {Code: depositsGeneratorCode},
- params.WithdrawalQueueAddress: {Code: params.WithdrawalQueueCode},
- params.ConsolidationQueueAddress: {Code: params.ConsolidationQueueCode},
- },
- }
+
+ gspec.Config.BerlinBlock = common.Big0
+ gspec.Config.LondonBlock = common.Big0
+ gspec.Config.TerminalTotalDifficulty = common.Big0
+ gspec.Config.TerminalTotalDifficultyPassed = true
+ gspec.Config.ShanghaiTime = u64(0)
+ gspec.Config.CancunTime = u64(0)
+ gspec.Config.PragueTime = u64(0)
+ signer := types.LatestSigner(gspec.Config)
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- // create deposit
- depositTx := types.MustSignNewTx(key1, signer, &types.DynamicFeeTx{
- ChainID: gspec.Config.ChainID,
- Nonce: 0,
- To: &config.DepositContractAddress,
- Gas: 500_000,
- GasFeeCap: newGwei(5),
- GasTipCap: big.NewInt(2),
- })
- b.AddTx(depositTx)
-
- // create withdrawal request
- withdrawalTx := types.MustSignNewTx(key1, signer, &types.DynamicFeeTx{
- ChainID: gspec.Config.ChainID,
- Nonce: 1,
- To: ¶ms.WithdrawalQueueAddress,
- Gas: 500_000,
- GasFeeCap: newGwei(5),
- GasTipCap: big.NewInt(2),
- Value: newGwei(1),
- Data: common.FromHex("b917cfdc0d25b72d55cf94db328e1629b7f4fde2c30cdacf873b664416f76a0c7f7cc50c9f72a3cb84be88144cde91250000000000000d80"),
- })
- b.AddTx(withdrawalTx)
-
- // create consolidation request
- consolidationTx := types.MustSignNewTx(key1, signer, &types.DynamicFeeTx{
- ChainID: gspec.Config.ChainID,
- Nonce: 2,
- To: ¶ms.ConsolidationQueueAddress,
- Gas: 500_000,
- GasFeeCap: newGwei(5),
- GasTipCap: big.NewInt(2),
- Value: newGwei(1),
- Data: common.FromHex("b917cfdc0d25b72d55cf94db328e1629b7f4fde2c30cdacf873b664416f76a0c7f7cc50c9f72a3cb84be88144cde9125b9812f7d0b1f2f969b52bbb2d316b0c2fa7c9dba85c428c5e6c27766bcc4b0c6e874702ff1eb1c7024b08524a9771601"),
- })
- b.AddTx(consolidationTx)
+ for i := 0; i < 5; i++ {
+ txdata := &types.DynamicFeeTx{
+ ChainID: gspec.Config.ChainID,
+ Nonce: uint64(i),
+ To: &config.DepositContractAddress,
+ Gas: 500000,
+ GasFeeCap: newGwei(5),
+ GasTipCap: big.NewInt(2),
+ AccessList: nil,
+ Data: []byte{},
+ }
+ tx := types.NewTx(txdata)
+ tx, _ = types.SignTx(tx, signer, key)
+ b.AddTx(tx)
+ }
})
-
- // Check block has the correct requests hash.
- rh := blocks[0].RequestsHash()
- if rh == nil {
- t.Fatal("block has nil requests hash")
- }
- expectedRequestsHash := common.HexToHash("0x06ffb72b9f0823510b128bca6cd4f96f59b745de6791e9fc350b596e7605101e")
- if *rh != expectedRequestsHash {
- t.Fatalf("block has wrong requestsHash %v, want %v", *rh, expectedRequestsHash)
- }
-
- // Insert block to check validation.
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{DisableStack: true}, os.Stderr).Hooks()}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -4231,4 +4285,32 @@ func TestPragueRequests(t *testing.T) {
if n, err := chain.InsertChain(blocks); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
+
+ block := chain.GetBlockByNumber(1)
+ if len(block.Requests()) != 5 {
+ t.Fatalf("failed to retrieve deposits: have %d, want %d", len(block.Requests()), 5)
+ }
+
+ // Verify each index is correct.
+ for want, req := range block.Requests() {
+ d, ok := req.Inner().(*types.Deposit)
+ if !ok {
+ t.Fatalf("expected deposit object")
+ }
+ if got := int(d.PublicKey[0]); got != want {
+ t.Fatalf("invalid pubkey: have %d, want %d", got, want)
+ }
+ if got := int(d.WithdrawalCredentials[0]); got != want {
+ t.Fatalf("invalid withdrawal credentials: have %d, want %d", got, want)
+ }
+ if d.Amount != uint64(want) {
+ t.Fatalf("invalid amounbt: have %d, want %d", d.Amount, want)
+ }
+ if got := int(d.Signature[0]); got != want {
+ t.Fatalf("invalid signature: have %d, want %d", got, want)
+ }
+ if d.Index != uint64(want) {
+ t.Fatalf("invalid index: have %d, want %d", d.Index, want)
+ }
+ }
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 586979e77237..8e75abdea0ad 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -346,34 +346,18 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
gen(i, b)
}
- var requests [][]byte
+ var requests types.Requests
if config.IsPrague(b.header.Number, b.header.Time) {
- // EIP-6110 deposits
- var blockLogs []*types.Log
for _, r := range b.receipts {
- blockLogs = append(blockLogs, r.Logs...)
- }
- depositRequests, err := ParseDepositLogs(blockLogs, config)
- if err != nil {
- panic(fmt.Sprintf("failed to parse deposit log: %v", err))
+ d, err := ParseDepositLogs(r.Logs, config)
+ if err != nil {
+ panic(fmt.Sprintf("failed to parse deposit log: %v", err))
+ }
+ requests = append(requests, d...)
}
- requests = append(requests, depositRequests)
- // create EVM for system calls
- blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
- vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, cm.config, vm.Config{})
- // EIP-7002 withdrawals
- withdrawalRequests := ProcessWithdrawalQueue(vmenv, statedb)
- requests = append(requests, withdrawalRequests)
- // EIP-7251 consolidations
- consolidationRequests := ProcessConsolidationQueue(vmenv, statedb)
- requests = append(requests, consolidationRequests)
- }
- if requests != nil {
- reqHash := types.CalcRequestsHash(requests)
- b.header.RequestsHash = &reqHash
}
- body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
+ body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals, Requests: requests}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts)
if err != nil {
panic(err)
@@ -462,15 +446,16 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
// Save pre state for proof generation
// preState := statedb.Copy()
- // Pre-execution system calls.
- if config.IsPrague(b.header.Number, b.header.Time) {
- // EIP-2935
- blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
- vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, cm.config, vm.Config{})
- ProcessParentBlockHash(b.header.ParentHash, vmenv, statedb)
- }
-
- // Execute any user modifications to the block.
+ // TODO uncomment when the 2935 PR is merged
+ // if config.IsPrague(b.header.Number, b.header.Time) {
+ // if !config.IsPrague(b.parent.Number(), b.parent.Time()) {
+ // Transition case: insert all 256 ancestors
+ // InsertBlockHashHistoryAtEip2935Fork(statedb, b.header.Number.Uint64()-1, b.header.ParentHash, chainreader)
+ // } else {
+ // ProcessParentBlockHash(statedb, b.header.Number.Uint64()-1, b.header.ParentHash)
+ // }
+ // }
+ // Execute any user modifications to the block
if gen != nil {
gen(i, b)
}
@@ -484,7 +469,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
panic(err)
}
- // Write state changes to DB.
+ // Write state changes to db
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
diff --git a/core/genesis.go b/core/genesis.go
index eff92084ebad..31db49f527e4 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -449,6 +449,7 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
}
var (
withdrawals []*types.Withdrawal
+ requests types.Requests
)
if conf := g.Config; conf != nil {
num := big.NewInt(int64(g.Number))
@@ -472,12 +473,11 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
}
}
if conf.IsPrague(num, g.Timestamp) {
- emptyRequests := [][]byte{{0x00}, {0x01}, {0x02}}
- rhash := types.CalcRequestsHash(emptyRequests)
- head.RequestsHash = &rhash
+ head.RequestsHash = &types.EmptyRequestsHash
+ requests = make(types.Requests, 0)
}
}
- return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil))
+ return types.NewBlock(head, &types.Body{Withdrawals: withdrawals, Requests: requests}, nil, trie.NewStackTrie(nil))
}
// Commit writes the block and state of a genesis specification to the database.
@@ -588,11 +588,10 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b
- // Pre-deploy system contracts
- params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
- params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},
- params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0},
- params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0},
+ // Pre-deploy EIP-4788 system contract
+ params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
+ // Pre-deploy EIP-2935 history contract.
+ params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},
},
}
if faucet != nil {
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index d6370cee33ae..5ba785c2d3d1 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -58,9 +58,8 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000
// - The append-only nature ensures that disk writes are minimized.
// - The in-order data ensures that disk reads are always optimized.
type Freezer struct {
- datadir string
- frozen atomic.Uint64 // Number of items already frozen
- tail atomic.Uint64 // Number of the first stored item in the freezer
+ frozen atomic.Uint64 // Number of items already frozen
+ tail atomic.Uint64 // Number of the first stored item in the freezer
// This lock synchronizes writers and the truncate operation, as well as
// the "atomic" (batched) read operations.
diff --git a/core/rawdb/freezer_memory.go b/core/rawdb/freezer_memory.go
index 2d3dbb07ddee..cd3910151486 100644
--- a/core/rawdb/freezer_memory.go
+++ b/core/rawdb/freezer_memory.go
@@ -19,10 +19,10 @@ package rawdb
import (
"errors"
"fmt"
- "math"
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -419,9 +419,3 @@ func (f *MemoryFreezer) Reset() error {
f.items, f.tail = 0, 0
return nil
}
-
-// AncientDatadir returns the path of the ancient store.
-// Since the memory freezer is ephemeral, an empty string is returned.
-func (f *MemoryFreezer) AncientDatadir() (string, error) {
- return "", nil
-}
diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go
index 7c77a06efcda..b14799506615 100644
--- a/core/rawdb/freezer_resettable.go
+++ b/core/rawdb/freezer_resettable.go
@@ -202,14 +202,6 @@ func (f *resettableFreezer) Sync() error {
return f.freezer.Sync()
}
-// AncientDatadir returns the path of the ancient store.
-func (f *resettableFreezer) AncientDatadir() (string, error) {
- f.lock.RLock()
- defer f.lock.RUnlock()
-
- return f.freezer.AncientDatadir()
-}
-
// cleanup removes the directory located in the specified path
// has the name with deletion marker suffix.
func cleanup(path string) error {
diff --git a/core/state/access_events.go b/core/state/access_events.go
index b745c383b15c..7f67df64eb07 100644
--- a/core/state/access_events.go
+++ b/core/state/access_events.go
@@ -117,7 +117,7 @@ func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address)
return gas
}
-// ContractCreatePreCheckGas charges access costs before
+// ContractCreateCPreheck charges access costs before
// a contract creation is initiated. It is just reads, because the
// address collision is done before the transfer, and so no write
// are guaranteed to happen at this point.
diff --git a/core/state/database.go b/core/state/database.go
index 0d8acec35aaa..de61dee036eb 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -186,9 +186,9 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
// is optional and may be partially useful if it's not fully
// generated.
if db.snap != nil {
- snap := db.snap.Snapshot(stateRoot)
- if snap != nil {
- readers = append(readers, newStateReader(snap)) // snap reader is optional
+ sr, err := newStateReader(stateRoot, db.snap)
+ if err == nil {
+ readers = append(readers, sr) // snap reader is optional
}
}
// Set up the trie reader, which is expected to always be available
diff --git a/core/state/reader.go b/core/state/reader.go
index 85842adde85f..6bddefc2a7dd 100644
--- a/core/state/reader.go
+++ b/core/state/reader.go
@@ -21,13 +21,13 @@ import (
"maps"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
- "github.com/ethereum/go-ethereum/triedb/database"
)
// Reader defines the interface for accessing accounts and storage slots
@@ -52,18 +52,23 @@ type Reader interface {
Copy() Reader
}
-// stateReader wraps a database state reader.
+// stateReader is a wrapper over the state snapshot and implements the Reader
+// interface. It provides an efficient way to access flat state.
type stateReader struct {
- reader database.StateReader
- buff crypto.KeccakState
+ snap snapshot.Snapshot
+ buff crypto.KeccakState
}
-// newStateReader constructs a state reader with on the given state root.
-func newStateReader(reader database.StateReader) *stateReader {
- return &stateReader{
- reader: reader,
- buff: crypto.NewKeccakState(),
+// newStateReader constructs a flat state reader with on the specified state root.
+func newStateReader(root common.Hash, snaps *snapshot.Tree) (*stateReader, error) {
+ snap := snaps.Snapshot(root)
+ if snap == nil {
+ return nil, errors.New("snapshot is not available")
}
+ return &stateReader{
+ snap: snap,
+ buff: crypto.NewKeccakState(),
+ }, nil
}
// Account implements Reader, retrieving the account specified by the address.
@@ -73,18 +78,18 @@ func newStateReader(reader database.StateReader) *stateReader {
//
// The returned account might be nil if it's not existent.
func (r *stateReader) Account(addr common.Address) (*types.StateAccount, error) {
- account, err := r.reader.Account(crypto.HashData(r.buff, addr.Bytes()))
+ ret, err := r.snap.Account(crypto.HashData(r.buff, addr.Bytes()))
if err != nil {
return nil, err
}
- if account == nil {
+ if ret == nil {
return nil, nil
}
acct := &types.StateAccount{
- Nonce: account.Nonce,
- Balance: account.Balance,
- CodeHash: account.CodeHash,
- Root: common.BytesToHash(account.Root),
+ Nonce: ret.Nonce,
+ Balance: ret.Balance,
+ CodeHash: ret.CodeHash,
+ Root: common.BytesToHash(ret.Root),
}
if len(acct.CodeHash) == 0 {
acct.CodeHash = types.EmptyCodeHash.Bytes()
@@ -105,7 +110,7 @@ func (r *stateReader) Account(addr common.Address) (*types.StateAccount, error)
func (r *stateReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) {
addrHash := crypto.HashData(r.buff, addr.Bytes())
slotHash := crypto.HashData(r.buff, key.Bytes())
- ret, err := r.reader.Storage(addrHash, slotHash)
+ ret, err := r.snap.Storage(addrHash, slotHash)
if err != nil {
return common.Hash{}, err
}
@@ -126,8 +131,8 @@ func (r *stateReader) Storage(addr common.Address, key common.Hash) (common.Hash
// Copy implements Reader, returning a deep-copied snap reader.
func (r *stateReader) Copy() Reader {
return &stateReader{
- reader: r.reader,
- buff: crypto.NewKeccakState(),
+ snap: r.snap,
+ buff: crypto.NewKeccakState(),
}
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 01fb55ea4cd6..8245298f4fd0 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -354,7 +354,12 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
if len(result.keys) > 0 {
tr := trie.NewEmpty(nil)
for i, key := range result.keys {
- tr.Update(key, result.vals[i])
+ snapTrie.Update(key, result.vals[i])
+ }
+ root, nodes := snapTrie.Commit(false)
+ if nodes != nil {
+ tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ tdb.Commit(root, false)
}
_, nodes := tr.Commit(false)
hashSet := nodes.HashSet()
diff --git a/core/state/snapshot/iterator_binary.go b/core/state/snapshot/iterator_binary.go
index 523c7b9946a9..db3d56748bd2 100644
--- a/core/state/snapshot/iterator_binary.go
+++ b/core/state/snapshot/iterator_binary.go
@@ -69,7 +69,7 @@ func (dl *diffLayer) initBinaryStorageIterator(account, seek common.Hash) Iterat
if !ok {
// If the storage in this layer is already destructed, discard all
// deeper layers but still return a valid single-branch iterator.
- a, destructed := dl.StorageIterator(account, seek)
+ a, destructed := dl.StorageIterator(account, common.Hash{})
if destructed {
l := &binaryIterator{
a: a,
@@ -93,7 +93,7 @@ func (dl *diffLayer) initBinaryStorageIterator(account, seek common.Hash) Iterat
}
// If the storage in this layer is already destructed, discard all
// deeper layers but still return a valid single-branch iterator.
- a, destructed := dl.StorageIterator(account, seek)
+ a, destructed := dl.StorageIterator(account, common.Hash{})
if destructed {
l := &binaryIterator{
a: a,
diff --git a/core/state/state_object.go b/core/state/state_object.go
index b659bf7ff208..19b8a1743b8e 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -23,6 +23,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -198,7 +199,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
// Schedule the resolved storage slots for prefetching if it's enabled.
if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash {
- if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, nil, []common.Hash{key}, true); err != nil {
+ if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, [][]byte{key[:]}, true); err != nil {
log.Error("Failed to prefetch storage slot", "addr", s.address, "key", key, "err", err)
}
}
@@ -207,8 +208,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
}
// SetState updates a value in account storage.
-// It returns the previous value
-func (s *stateObject) SetState(key, value common.Hash) common.Hash {
+func (s *stateObject) SetState(key, value common.Hash) {
// If the new value is the same as old, don't set. Otherwise, track only the
// dirty changes, supporting reverting all of it back to no change.
prev, origin := s.getState(key)
@@ -218,7 +218,9 @@ func (s *stateObject) SetState(key, value common.Hash) common.Hash {
// New value is different, update and journal the change
s.db.journal.storageChange(s.address, key, prev, origin)
s.setState(key, value, origin)
- return prev
+ if s.db.logger != nil && s.db.logger.OnStorageChange != nil {
+ s.db.logger.OnStorageChange(s.address, key, prev, value)
+ }
}
// setState updates a value in account dirty storage. The dirtiness will be
@@ -235,7 +237,7 @@ func (s *stateObject) setState(key common.Hash, value common.Hash, origin common
// finalise moves all dirty storage slots into the pending area to be hashed or
// committed later. It is invoked at the end of every transaction.
func (s *stateObject) finalise() {
- slotsToPrefetch := make([]common.Hash, 0, len(s.dirtyStorage))
+ slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage))
for key, value := range s.dirtyStorage {
if origin, exist := s.uncommittedStorage[key]; exist && origin == value {
// The slot is reverted to its original value, delete the entry
@@ -248,7 +250,7 @@ func (s *stateObject) finalise() {
// The slot is different from its original value and hasn't been
// tracked for commit yet.
s.uncommittedStorage[key] = s.GetCommittedState(key)
- slotsToPrefetch = append(slotsToPrefetch, key) // Copy needed for closure
+ slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure
}
// Aggregate the dirty storage slots into the pending area. It might
// be possible that the value of tracked slot here is same with the
@@ -259,7 +261,7 @@ func (s *stateObject) finalise() {
s.pendingStorage[key] = value
}
if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
- if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil {
+ if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch, false); err != nil {
log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err)
}
}
@@ -321,7 +323,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var (
deletions []common.Hash
- used = make([]common.Hash, 0, len(s.uncommittedStorage))
+ used = make([][]byte, 0, len(s.uncommittedStorage))
)
for key, origin := range s.uncommittedStorage {
// Skip noop changes, persist actual changes
@@ -344,7 +346,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
deletions = append(deletions, key)
}
// Cache the items for preloading
- used = append(used, key) // Copy needed for closure
+ used = append(used, common.CopyBytes(key[:])) // Copy needed for closure
}
for _, key := range deletions {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
@@ -354,7 +356,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
s.db.StorageDeleted.Add(1)
}
if s.db.prefetcher != nil {
- s.db.prefetcher.used(s.addrHash, s.data.Root, nil, used)
+ s.db.prefetcher.used(s.addrHash, s.data.Root, used)
}
s.uncommittedStorage = make(Storage) // empties the commit markers
return tr, nil
@@ -446,8 +448,7 @@ func (s *stateObject) commit() (*accountUpdate, *trienode.NodeSet, error) {
// AddBalance adds amount to s's balance.
// It is used to add funds to the destination account of a transfer.
-// returns the previous balance
-func (s *stateObject) AddBalance(amount *uint256.Int) uint256.Int {
+func (s *stateObject) AddBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) {
// EIP161: We must check emptiness for the objects such that the account
// clearing (0,0,0 objects) can take effect.
if amount.IsZero() {
@@ -456,13 +457,23 @@ func (s *stateObject) AddBalance(amount *uint256.Int) uint256.Int {
}
return *(s.Balance())
}
- return s.SetBalance(new(uint256.Int).Add(s.Balance(), amount))
+ s.SetBalance(new(uint256.Int).Add(s.Balance(), amount), reason)
+}
+
+// SubBalance removes amount from s's balance.
+// It is used to remove funds from the origin account of a transfer.
+func (s *stateObject) SubBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) {
+ if amount.IsZero() {
+ return
+ }
+ s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount), reason)
}
-// SetBalance sets the balance for the object, and returns the previous balance.
-func (s *stateObject) SetBalance(amount *uint256.Int) uint256.Int {
- prev := *s.data.Balance
+func (s *stateObject) SetBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) {
s.db.journal.balanceChange(s.address, s.data.Balance)
+ if s.db.logger != nil && s.db.logger.OnBalanceChange != nil {
+ s.db.logger.OnBalanceChange(s.address, s.Balance().ToBig(), amount.ToBig(), reason)
+ }
s.setBalance(amount)
return prev
}
@@ -537,6 +548,10 @@ func (s *stateObject) CodeSize() int {
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
s.db.journal.setCode(s.address)
+ if s.db.logger != nil && s.db.logger.OnCodeChange != nil {
+ // TODO remove prevcode from this callback
+ s.db.logger.OnCodeChange(s.address, common.BytesToHash(s.CodeHash()), nil, codeHash, code)
+ }
s.setCode(codeHash, code)
}
@@ -548,6 +563,9 @@ func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
func (s *stateObject) SetNonce(nonce uint64) {
s.db.journal.nonceChange(s.address, s.data.Nonce)
+ if s.db.logger != nil && s.db.logger.OnNonceChange != nil {
+ s.db.logger.OnNonceChange(s.address, s.data.Nonce, nonce)
+ }
s.setNonce(nonce)
}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 6f54300c37ad..9de50beb12dc 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/triedb"
@@ -47,11 +48,11 @@ func TestDump(t *testing.T) {
// generate a few entries
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
- obj1.AddBalance(uint256.NewInt(22))
+ obj1.AddBalance(uint256.NewInt(22), tracing.BalanceChangeUnspecified)
obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
- obj3.SetBalance(uint256.NewInt(44))
+ obj3.SetBalance(uint256.NewInt(44), tracing.BalanceChangeUnspecified)
// write some of them to the trie
s.state.updateStateObject(obj1)
@@ -105,13 +106,13 @@ func TestIterativeDump(t *testing.T) {
// generate a few entries
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
- obj1.AddBalance(uint256.NewInt(22))
+ obj1.AddBalance(uint256.NewInt(22), tracing.BalanceChangeUnspecified)
obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
- obj3.SetBalance(uint256.NewInt(44))
+ obj3.SetBalance(uint256.NewInt(44), tracing.BalanceChangeUnspecified)
obj4 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x00}))
- obj4.AddBalance(uint256.NewInt(1337))
+ obj4.AddBalance(uint256.NewInt(1337), tracing.BalanceChangeUnspecified)
// write some of them to the trie
s.state.updateStateObject(obj1)
@@ -199,7 +200,7 @@ func TestCreateObjectRevert(t *testing.T) {
state.CreateAccount(addr)
so0 := state.getStateObject(addr)
- so0.SetBalance(uint256.NewInt(42))
+ so0.SetBalance(uint256.NewInt(42), tracing.BalanceChangeUnspecified)
so0.SetNonce(43)
so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
state.setStateObject(so0)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 1d614a3114ed..7c131b7a751c 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"maps"
+ "math/big"
"slices"
"sync"
"sync/atomic"
@@ -37,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
@@ -80,6 +82,7 @@ type StateDB struct {
db Database
prefetcher *triePrefetcher
trie Trie
+ logger *tracing.Hooks
reader Reader
// originalRoot is the pre-state root, before any changes were made.
@@ -149,13 +152,12 @@ type StateDB struct {
SnapshotCommits time.Duration
TrieDBCommits time.Duration
- AccountUpdated int
- StorageUpdated int
- AccountDeleted int
- StorageDeleted int
-
- // Testing hooks
- onCommit func(states *triestate.Set) // Hook invoked when commit is performed
+ AccountLoaded int // Number of accounts retrieved from the database during the state transition
+ AccountUpdated int // Number of accounts updated during the state transition
+ AccountDeleted int // Number of accounts deleted during the state transition
+ StorageLoaded int // Number of storage slots retrieved from the database during the state transition
+ StorageUpdated atomic.Int64 // Number of storage slots updated during the state transition
+ StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition
}
// New creates a new state from a given trie.
@@ -188,6 +190,11 @@ func New(root common.Hash, db Database) (*StateDB, error) {
return sdb, nil
}
+// SetLogger sets the logger for account update hooks.
+func (s *StateDB) SetLogger(l *tracing.Hooks) {
+ s.logger = l
+}
+
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
@@ -208,7 +215,7 @@ func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness)
// the prefetcher is constructed. For more details, see:
// https://github.com/ethereum/go-ethereum/issues/29880
s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, witness == nil)
- if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, nil, false); err != nil {
+ if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, false); err != nil {
log.Error("Failed to prefetch account trie", "root", s.originalRoot, "err", err)
}
}
@@ -241,6 +248,9 @@ func (s *StateDB) AddLog(log *types.Log) {
log.TxHash = s.thash
log.TxIndex = uint(s.txIndex)
log.Index = s.logSize
+ if s.logger != nil && s.logger.OnLog != nil {
+ s.logger.OnLog(log)
+ }
s.logs[s.thash] = append(s.logs[s.thash], log)
s.logSize++
}
@@ -406,19 +416,19 @@ func (s *StateDB) HasSelfDestructed(addr common.Address) bool {
*/
// AddBalance adds amount to the account associated with addr.
-func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) uint256.Int {
+func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) {
stateObject := s.getOrNewStateObject(addr)
- if stateObject == nil {
- return uint256.Int{}
+ if stateObject != nil {
+ stateObject.AddBalance(amount, reason)
}
return stateObject.AddBalance(amount)
}
// SubBalance subtracts amount from the account associated with addr.
-func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) uint256.Int {
+func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) {
stateObject := s.getOrNewStateObject(addr)
- if stateObject == nil {
- return uint256.Int{}
+ if stateObject != nil {
+ stateObject.SubBalance(amount, reason)
}
if amount.IsZero() {
return *(stateObject.Balance())
@@ -429,7 +439,7 @@ func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tr
func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) {
stateObject := s.getOrNewStateObject(addr)
if stateObject != nil {
- stateObject.SetBalance(amount)
+ stateObject.SetBalance(amount, reason)
}
}
@@ -479,7 +489,7 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common
if obj != nil {
newObj.SetCode(common.BytesToHash(obj.CodeHash()), obj.code)
newObj.SetNonce(obj.Nonce())
- newObj.SetBalance(obj.Balance())
+ newObj.SetBalance(obj.Balance(), tracing.BalanceChangeUnspecified)
}
}
@@ -494,11 +504,10 @@ func (s *StateDB) SelfDestruct(addr common.Address) uint256.Int {
if stateObject == nil {
return prevBalance
}
- prevBalance = *(stateObject.Balance())
// Regardless of whether it is already destructed or not, we do have to
// journal the balance-change, if we set it to zero here.
if !stateObject.Balance().IsZero() {
- stateObject.SetBalance(new(uint256.Int))
+ stateObject.SetBalance(new(uint256.Int), tracing.BalanceDecreaseSelfdestruct)
}
// If it is already marked as self-destructed, we do not need to add it
// for journalling a second time.
@@ -506,7 +515,6 @@ func (s *StateDB) SelfDestruct(addr common.Address) uint256.Int {
s.journal.destruct(addr)
stateObject.markSelfdestructed()
}
- return prevBalance
}
func (s *StateDB) SelfDestruct6780(addr common.Address) (uint256.Int, bool) {
@@ -515,7 +523,7 @@ func (s *StateDB) SelfDestruct6780(addr common.Address) (uint256.Int, bool) {
return uint256.Int{}, false
}
if stateObject.newContract {
- return s.SelfDestruct(addr), true
+ s.SelfDestruct(addr)
}
return *(stateObject.Balance()), false
}
@@ -593,7 +601,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
}
// Schedule the resolved account for prefetching if it's enabled.
if s.prefetcher != nil {
- if err = s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, []common.Address{addr}, nil, true); err != nil {
+ if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, [][]byte{addr[:]}, true); err != nil {
log.Error("Failed to prefetch account", "addr", addr, "err", err)
}
}
@@ -709,7 +717,7 @@ func (s *StateDB) Copy() *StateDB {
// CHANGE(taiko): RevisionId returns the latest snapshot id.
func (s *StateDB) RevisionId() int {
- return s.nextRevisionId
+ return s.journal.nextRevisionId
}
// Snapshot returns an identifier for the current revision of the state.
@@ -746,6 +754,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) {
delete(s.stateObjects, obj.address)
s.markDelete(addr)
+
+ // If ether was sent to account post-selfdestruct it is burnt.
+ if bal := obj.Balance(); s.logger != nil && s.logger.OnBalanceChange != nil && obj.selfDestructed && bal.Sign() != 0 {
+ s.logger.OnBalanceChange(obj.address, bal.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestructBurn)
+ }
// We need to maintain account deletions explicitly (will remain
// set indefinitely). Note only the first occurred self-destruct
// event is tracked.
@@ -762,7 +775,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch = append(addressesToPrefetch, addr) // Copy needed for closure
}
if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
- if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, nil, false); err != nil {
+ if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, false); err != nil {
log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err)
}
}
@@ -883,7 +896,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// into a shortnode. This requires `B` to be resolved from disk.
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var (
- usedAddrs []common.Address
+ usedAddrs [][]byte
deletedAddrs []common.Address
)
for addr, op := range s.mutations {
@@ -907,7 +920,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.AccountUpdates += time.Since(start)
if s.prefetcher != nil {
- s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs, nil)
+ s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs)
}
// Track the amount of time wasted on hashing the account trie
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
@@ -1074,8 +1087,7 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
deletes[addrHash] = op
// Short circuit if the origin storage was empty.
-
- if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
+ if prev.Root == types.EmptyRootHash {
continue
}
// Remove storage slots belonging to the account.
@@ -1288,7 +1300,8 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
// If trie database is enabled, commit the state update as a new layer
if db := s.db.TrieDB(); db != nil {
start := time.Now()
- if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, ret.stateSet()); err != nil {
+ set := triestate.New(ret.accountsOrigin, ret.storagesOrigin)
+ if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, set); err != nil {
return nil, err
}
s.TrieDBCommits += time.Since(start)
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 3647397df6f8..3c19ec0591f5 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -170,7 +170,7 @@ func TestCopy(t *testing.T) {
for i := byte(0); i < 255; i++ {
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- obj.AddBalance(uint256.NewInt(uint64(i)))
+ obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
orig.updateStateObject(obj)
}
orig.Finalise(false)
@@ -187,9 +187,9 @@ func TestCopy(t *testing.T) {
copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- origObj.AddBalance(uint256.NewInt(2 * uint64(i)))
- copyObj.AddBalance(uint256.NewInt(3 * uint64(i)))
- ccopyObj.AddBalance(uint256.NewInt(4 * uint64(i)))
+ origObj.AddBalance(uint256.NewInt(2*uint64(i)), tracing.BalanceChangeUnspecified)
+ copyObj.AddBalance(uint256.NewInt(3*uint64(i)), tracing.BalanceChangeUnspecified)
+ ccopyObj.AddBalance(uint256.NewInt(4*uint64(i)), tracing.BalanceChangeUnspecified)
orig.updateStateObject(origObj)
copy.updateStateObject(copyObj)
@@ -236,7 +236,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
// Fill up the initial states
for i := byte(0); i < 255; i++ {
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- obj.AddBalance(uint256.NewInt(uint64(i)))
+ obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
obj.data.Root = common.HexToHash("0xdeadbeef")
orig.updateStateObject(obj)
}
@@ -246,9 +246,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
// modify all in memory without finalizing
for i := byte(0); i < 255; i++ {
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- amount := uint256.NewInt(uint64(i))
- obj.SetBalance(new(uint256.Int).Sub(obj.Balance(), amount))
-
+ obj.SubBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
orig.updateStateObject(obj)
}
cpy := orig.Copy()
@@ -282,7 +280,7 @@ func TestCopyObjectState(t *testing.T) {
// Fill up the initial states
for i := byte(0); i < 5; i++ {
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
- obj.AddBalance(uint256.NewInt(uint64(i)))
+ obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
obj.data.Root = common.HexToHash("0xdeadbeef")
orig.updateStateObject(obj)
}
diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go
index c9231f0526b3..f3e6af997e44 100644
--- a/core/state/stateupdate.go
+++ b/core/state/stateupdate.go
@@ -20,7 +20,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie/trienode"
- "github.com/ethereum/go-ethereum/triedb"
)
// contractCode represents a contract code with associated metadata.
@@ -132,17 +131,3 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
nodes: nodes,
}
}
-
-// stateSet converts the current stateUpdate object into a triedb.StateSet
-// object. This function extracts the necessary data from the stateUpdate
-// struct and formats it into the StateSet structure consumed by the triedb
-// package.
-func (sc *stateUpdate) stateSet() *triedb.StateSet {
- return &triedb.StateSet{
- Destructs: sc.destructs,
- Accounts: sc.accounts,
- AccountsOrigin: sc.accountsOrigin,
- Storages: sc.storages,
- StoragesOrigin: sc.storagesOrigin,
- }
-}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index b2c75e72fe33..2416cda873db 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
@@ -61,7 +62,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
obj := state.getOrNewStateObject(common.BytesToAddress([]byte{i}))
acc := &testAccount{address: common.BytesToAddress([]byte{i})}
- obj.AddBalance(uint256.NewInt(uint64(11 * i)))
+ obj.AddBalance(uint256.NewInt(uint64(11*i)), tracing.BalanceChangeUnspecified)
acc.balance = uint256.NewInt(uint64(11 * i))
obj.SetNonce(uint64(42 * i))
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 5b64583432a4..29dfdf04fa6f 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -118,31 +118,31 @@ func (p *triePrefetcher) report() {
fetcher.wait() // ensure the fetcher's idle before poking in its internals
if fetcher.root == p.root {
- p.accountLoadReadMeter.Mark(int64(len(fetcher.seenReadAddr)))
- p.accountLoadWriteMeter.Mark(int64(len(fetcher.seenWriteAddr)))
+ p.accountLoadReadMeter.Mark(int64(len(fetcher.seenRead)))
+ p.accountLoadWriteMeter.Mark(int64(len(fetcher.seenWrite)))
p.accountDupReadMeter.Mark(int64(fetcher.dupsRead))
p.accountDupWriteMeter.Mark(int64(fetcher.dupsWrite))
p.accountDupCrossMeter.Mark(int64(fetcher.dupsCross))
- for _, key := range fetcher.usedAddr {
- delete(fetcher.seenReadAddr, key)
- delete(fetcher.seenWriteAddr, key)
+ for _, key := range fetcher.used {
+ delete(fetcher.seenRead, string(key))
+ delete(fetcher.seenWrite, string(key))
}
- p.accountWasteMeter.Mark(int64(len(fetcher.seenReadAddr) + len(fetcher.seenWriteAddr)))
+ p.accountWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite)))
} else {
- p.storageLoadReadMeter.Mark(int64(len(fetcher.seenReadSlot)))
- p.storageLoadWriteMeter.Mark(int64(len(fetcher.seenWriteSlot)))
+ p.storageLoadReadMeter.Mark(int64(len(fetcher.seenRead)))
+ p.storageLoadWriteMeter.Mark(int64(len(fetcher.seenWrite)))
p.storageDupReadMeter.Mark(int64(fetcher.dupsRead))
p.storageDupWriteMeter.Mark(int64(fetcher.dupsWrite))
p.storageDupCrossMeter.Mark(int64(fetcher.dupsCross))
- for _, key := range fetcher.usedSlot {
- delete(fetcher.seenReadSlot, key)
- delete(fetcher.seenWriteSlot, key)
+ for _, key := range fetcher.used {
+ delete(fetcher.seenRead, string(key))
+ delete(fetcher.seenWrite, string(key))
}
- p.storageWasteMeter.Mark(int64(len(fetcher.seenReadSlot) + len(fetcher.seenWriteSlot)))
+ p.storageWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite)))
}
}
}
@@ -158,7 +158,7 @@ func (p *triePrefetcher) report() {
// upon the same contract, the parameters invoking this method may be
// repeated.
// 2. Finalize of the main account trie. This happens only once per block.
-func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, addrs []common.Address, slots []common.Hash, read bool) error {
+func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte, read bool) error {
// If the state item is only being read, but reads are disabled, return
if read && p.noreads {
return nil
@@ -175,7 +175,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm
fetcher = newSubfetcher(p.db, p.root, owner, root, addr)
p.fetchers[id] = fetcher
}
- return fetcher.schedule(addrs, slots, read)
+ return fetcher.schedule(keys, read)
}
// trie returns the trie matching the root hash, blocking until the fetcher of
@@ -195,12 +195,10 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
// used marks a batch of state items used to allow creating statistics as to
// how useful or wasteful the fetcher is.
-func (p *triePrefetcher) used(owner common.Hash, root common.Hash, usedAddr []common.Address, usedSlot []common.Hash) {
+func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) {
if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil {
fetcher.wait() // ensure the fetcher's idle before poking in its internals
-
- fetcher.usedAddr = append(fetcher.usedAddr, usedAddr...)
- fetcher.usedSlot = append(fetcher.usedSlot, usedSlot...)
+ fetcher.used = append(fetcher.used, used...)
}
}
@@ -237,50 +235,44 @@ type subfetcher struct {
stop chan struct{} // Channel to interrupt processing
term chan struct{} // Channel to signal interruption
- seenReadAddr map[common.Address]struct{} // Tracks the accounts already loaded via read operations
- seenWriteAddr map[common.Address]struct{} // Tracks the accounts already loaded via write operations
- seenReadSlot map[common.Hash]struct{} // Tracks the storage already loaded via read operations
- seenWriteSlot map[common.Hash]struct{} // Tracks the storage already loaded via write operations
+ seenRead map[string]struct{} // Tracks the entries already loaded via read operations
+ seenWrite map[string]struct{} // Tracks the entries already loaded via write operations
dupsRead int // Number of duplicate preload tasks via reads only
dupsWrite int // Number of duplicate preload tasks via writes only
dupsCross int // Number of duplicate preload tasks via read-write-crosses
- usedAddr []common.Address // Tracks the accounts used in the end
- usedSlot []common.Hash // Tracks the storage used in the end
+ used [][]byte // Tracks the entries used in the end
}
// subfetcherTask is a trie path to prefetch, tagged with whether it originates
// from a read or a write request.
type subfetcherTask struct {
read bool
- addr *common.Address
- slot *common.Hash
+ key []byte
}
// newSubfetcher creates a goroutine to prefetch state items belonging to a
// particular root hash.
func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address) *subfetcher {
sf := &subfetcher{
- db: db,
- state: state,
- owner: owner,
- root: root,
- addr: addr,
- wake: make(chan struct{}, 1),
- stop: make(chan struct{}),
- term: make(chan struct{}),
- seenReadAddr: make(map[common.Address]struct{}),
- seenWriteAddr: make(map[common.Address]struct{}),
- seenReadSlot: make(map[common.Hash]struct{}),
- seenWriteSlot: make(map[common.Hash]struct{}),
+ db: db,
+ state: state,
+ owner: owner,
+ root: root,
+ addr: addr,
+ wake: make(chan struct{}, 1),
+ stop: make(chan struct{}),
+ term: make(chan struct{}),
+ seenRead: make(map[string]struct{}),
+ seenWrite: make(map[string]struct{}),
}
go sf.loop()
return sf
}
// schedule adds a batch of trie keys to the queue to prefetch.
-func (sf *subfetcher) schedule(addrs []common.Address, slots []common.Hash, read bool) error {
+func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
// Ensure the subfetcher is still alive
select {
case <-sf.term:
@@ -289,11 +281,9 @@ func (sf *subfetcher) schedule(addrs []common.Address, slots []common.Hash, read
}
// Append the tasks to the current queue
sf.lock.Lock()
- for _, addr := range addrs {
- sf.tasks = append(sf.tasks, &subfetcherTask{read: read, addr: &addr})
- }
- for _, slot := range slots {
- sf.tasks = append(sf.tasks, &subfetcherTask{read: read, slot: &slot})
+ for _, key := range keys {
+ key := key // closure for the append below
+ sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key})
}
sf.lock.Unlock()
@@ -389,66 +379,35 @@ func (sf *subfetcher) loop() {
sf.lock.Unlock()
for _, task := range tasks {
- if task.addr != nil {
- key := *task.addr
- if task.read {
- if _, ok := sf.seenReadAddr[key]; ok {
- sf.dupsRead++
- continue
- }
- if _, ok := sf.seenWriteAddr[key]; ok {
- sf.dupsCross++
- continue
- }
- } else {
- if _, ok := sf.seenReadAddr[key]; ok {
- sf.dupsCross++
- continue
- }
- if _, ok := sf.seenWriteAddr[key]; ok {
- sf.dupsWrite++
- continue
- }
+ key := string(task.key)
+ if task.read {
+ if _, ok := sf.seenRead[key]; ok {
+ sf.dupsRead++
+ continue
+ }
+ if _, ok := sf.seenWrite[key]; ok {
+ sf.dupsCross++
+ continue
}
} else {
- key := *task.slot
- if task.read {
- if _, ok := sf.seenReadSlot[key]; ok {
- sf.dupsRead++
- continue
- }
- if _, ok := sf.seenWriteSlot[key]; ok {
- sf.dupsCross++
- continue
- }
- } else {
- if _, ok := sf.seenReadSlot[key]; ok {
- sf.dupsCross++
- continue
- }
- if _, ok := sf.seenWriteSlot[key]; ok {
- sf.dupsWrite++
- continue
- }
+ if _, ok := sf.seenRead[key]; ok {
+ sf.dupsCross++
+ continue
+ }
+ if _, ok := sf.seenWrite[key]; ok {
+ sf.dupsWrite++
+ continue
}
}
- if task.addr != nil {
- sf.trie.GetAccount(*task.addr)
+ if len(task.key) == common.AddressLength {
+ sf.trie.GetAccount(common.BytesToAddress(task.key))
} else {
- sf.trie.GetStorage(sf.addr, (*task.slot)[:])
+ sf.trie.GetStorage(sf.addr, task.key)
}
if task.read {
- if task.addr != nil {
- sf.seenReadAddr[*task.addr] = struct{}{}
- } else {
- sf.seenReadSlot[*task.slot] = struct{}{}
- }
+ sf.seenRead[key] = struct{}{}
} else {
- if task.addr != nil {
- sf.seenWriteAddr[*task.addr] = struct{}{}
- } else {
- sf.seenWriteSlot[*task.slot] = struct{}{}
- }
+ sf.seenWrite[key] = struct{}{}
}
}
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index d96727704cdd..529b42d39cdb 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -53,12 +53,12 @@ func TestUseAfterTerminate(t *testing.T) {
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", true)
skey := common.HexToHash("aaa")
- if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, nil, []common.Hash{skey}, false); err != nil {
+ if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}, false); err != nil {
t.Errorf("Prefetch failed before terminate: %v", err)
}
prefetcher.terminate(false)
- if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, nil, []common.Hash{skey}, false); err == nil {
+ if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}, false); err == nil {
t.Errorf("Prefetch succeeded after terminate: %v", err)
}
if tr := prefetcher.trie(common.Hash{}, db.originalRoot); tr == nil {
@@ -90,10 +90,14 @@ func TestVerklePrefetcher(t *testing.T) {
fetcher := newTriePrefetcher(sdb, root, "", false)
// Read account
- fetcher.prefetch(common.Hash{}, root, common.Address{}, []common.Address{addr}, nil, false)
+ fetcher.prefetch(common.Hash{}, root, common.Address{}, [][]byte{
+ addr.Bytes(),
+ }, false)
// Read storage slot
- fetcher.prefetch(crypto.Keccak256Hash(addr.Bytes()), sRoot, addr, nil, []common.Hash{skey}, false)
+ fetcher.prefetch(crypto.Keccak256Hash(addr.Bytes()), sRoot, addr, [][]byte{
+ skey.Bytes(),
+ }, false)
fetcher.terminate(false)
accountTrie := fetcher.trie(common.Hash{}, root)
diff --git a/core/state_processor.go b/core/state_processor.go
index 6cf0494e87ae..56b929f5cd39 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -71,29 +71,22 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
var (
context vm.BlockContext
signer = types.MakeSigner(p.config, header.Number, header.Time)
+ err error
)
-
- // Apply pre-execution system calls.
context = NewEVMBlockContext(header, p.chain, nil)
-
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg)
- var tracingStateDB = vm.StateDB(statedb)
- if hooks := cfg.Tracer; hooks != nil {
- tracingStateDB = state.NewHookedState(statedb, hooks)
- }
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
- ProcessBeaconBlockRoot(*beaconRoot, vmenv, tracingStateDB)
+ ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
}
if p.config.IsPrague(block.Number(), block.Time()) {
- ProcessParentBlockHash(block.ParentHash(), vmenv, tracingStateDB)
+ ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
}
-
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
// CHANGE(taiko): mark the first transaction as anchor transaction.
if i == 0 && p.config.Taiko {
if err := tx.MarkAsAnchor(); err != nil {
- return nil, nil, 0, err
+ return nil, err
}
}
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
@@ -113,24 +106,16 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
allLogs = append(allLogs, receipt.Logs...)
}
// Read requests if Prague is enabled.
- var requests [][]byte
+ var requests types.Requests
if p.config.IsPrague(block.Number(), block.Time()) {
- // EIP-6110 deposits
- depositRequests, err := ParseDepositLogs(allLogs, p.config)
+ requests, err = ParseDepositLogs(allLogs, p.config)
if err != nil {
return nil, err
}
- requests = append(requests, depositRequests)
- // EIP-7002 withdrawals
- withdrawalRequests := ProcessWithdrawalQueue(vmenv, tracingStateDB)
- requests = append(requests, withdrawalRequests)
- // EIP-7251 consolidations
- consolidationRequests := ProcessConsolidationQueue(vmenv, tracingStateDB)
- requests = append(requests, consolidationRequests)
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
- p.chain.engine.Finalize(p.chain, header, tracingStateDB, block.Body())
+ p.chain.engine.Finalize(p.chain, header, statedb, block.Body())
return &ProcessResult{
Receipts: receipts,
@@ -144,17 +129,14 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// and uses the input parameters for its environment similar to ApplyTransaction. However,
// this method takes an already created EVM instance as input.
func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (receipt *types.Receipt, err error) {
- var tracingStateDB = vm.StateDB(statedb)
- if hooks := evm.Config.Tracer; hooks != nil {
- tracingStateDB = state.NewHookedState(statedb, hooks)
- if hooks.OnTxStart != nil {
- hooks.OnTxStart(evm.GetVMContext(), tx, msg.From)
- }
- if hooks.OnTxEnd != nil {
- defer func() { hooks.OnTxEnd(receipt, err) }()
+ if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxStart != nil {
+ evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
+ if evm.Config.Tracer.OnTxEnd != nil {
+ defer func() {
+ evm.Config.Tracer.OnTxEnd(receipt, err)
+ }()
}
}
-
// Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg)
evm.Reset(txContext, tracingStateDB)
@@ -238,7 +220,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
// contract. This method is exported to be used in tests.
-func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb vm.StateDB) {
+func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
if tracer := vmenv.Config.Tracer; tracer != nil {
if tracer.OnSystemCallStart != nil {
tracer.OnSystemCallStart()
@@ -247,6 +229,9 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb vm.St
defer tracer.OnSystemCallEnd()
}
}
+
+ // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with
+ // the new root
msg := &Message{
From: params.SystemAddress,
GasLimit: 30_000_000,
@@ -264,7 +249,7 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb vm.St
// ProcessParentBlockHash stores the parent block hash in the history storage contract
// as per EIP-2935.
-func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb vm.StateDB) {
+func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
if tracer := vmenv.Config.Tracer; tracer != nil {
if tracer.OnSystemCallStart != nil {
tracer.OnSystemCallStart()
@@ -273,6 +258,7 @@ func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb vm.Stat
defer tracer.OnSystemCallEnd()
}
}
+
msg := &Message{
From: params.SystemAddress,
GasLimit: 30_000_000,
@@ -288,59 +274,17 @@ func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb vm.Stat
statedb.Finalise(true)
}
-// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
-// It returns the opaque request data returned by the contract.
-func ProcessWithdrawalQueue(vmenv *vm.EVM, statedb vm.StateDB) []byte {
- return processRequestsSystemCall(vmenv, statedb, 0x01, params.WithdrawalQueueAddress)
-}
-
-// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
-// It returns the opaque request data returned by the contract.
-func ProcessConsolidationQueue(vmenv *vm.EVM, statedb vm.StateDB) []byte {
- return processRequestsSystemCall(vmenv, statedb, 0x02, params.ConsolidationQueueAddress)
-}
-
-func processRequestsSystemCall(vmenv *vm.EVM, statedb vm.StateDB, requestType byte, addr common.Address) []byte {
- if tracer := vmenv.Config.Tracer; tracer != nil {
- if tracer.OnSystemCallStart != nil {
- tracer.OnSystemCallStart()
- }
- if tracer.OnSystemCallEnd != nil {
- defer tracer.OnSystemCallEnd()
- }
- }
-
- msg := &Message{
- From: params.SystemAddress,
- GasLimit: 30_000_000,
- GasPrice: common.Big0,
- GasFeeCap: common.Big0,
- GasTipCap: common.Big0,
- To: &addr,
- }
- vmenv.Reset(NewEVMTxContext(msg), statedb)
- statedb.AddAddressToAccessList(addr)
- ret, _, _ := vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
- statedb.Finalise(true)
-
- // Create withdrawals requestsData with prefix 0x01
- requestsData := make([]byte, len(ret)+1)
- requestsData[0] = requestType
- copy(requestsData[1:], ret)
- return requestsData
-}
-
// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by
// BeaconDepositContract.
-func ParseDepositLogs(logs []*types.Log, config *params.ChainConfig) ([]byte, error) {
- deposits := make([]byte, 1) // note: first byte is 0x00 (== deposit request type)
+func ParseDepositLogs(logs []*types.Log, config *params.ChainConfig) (types.Requests, error) {
+ deposits := make(types.Requests, 0)
for _, log := range logs {
if log.Address == config.DepositContractAddress {
- request, err := types.DepositLogToRequest(log.Data)
+ d, err := types.UnpackIntoDeposit(log.Data)
if err != nil {
return nil, fmt.Errorf("unable to parse deposit data: %v", err)
}
- deposits = append(deposits, request...)
+ deposits = append(deposits, types.NewRequest(d))
}
}
return deposits, nil
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index f3d230469006..8b02816f267c 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -18,7 +18,7 @@ package core
import (
"crypto/ecdsa"
- "math"
+ "encoding/binary"
"math/big"
"testing"
@@ -29,11 +29,14 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-verkle"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
@@ -422,3 +425,196 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
}
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))
}
+
+var (
+ code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`)
+ intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, true, true, true, true)
+ // A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness
+ // will not contain that copied data.
+ // Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985
+ codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`)
+ intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, true, true, true, true)
+)
+
+func TestProcessVerkle(t *testing.T) {
+ var (
+ config = ¶ms.ChainConfig{
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ Ethash: new(params.EthashConfig),
+ ShanghaiTime: u64(0),
+ VerkleTime: u64(0),
+ TerminalTotalDifficulty: common.Big0,
+ TerminalTotalDifficultyPassed: true,
+ // TODO uncomment when proof generation is merged
+ // ProofInBlocks: true,
+ }
+ signer = types.LatestSigner(config)
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain
+ coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
+ gspec = &Genesis{
+ Config: config,
+ Alloc: GenesisAlloc{
+ coinbase: GenesisAccount{
+ Balance: big.NewInt(1000000000000000000), // 1 ether
+ Nonce: 0,
+ },
+ },
+ }
+ )
+ // Verkle trees use the snapshot, which must be enabled before the
+ // data is saved into the tree+database.
+ // genesis := gspec.MustCommit(bcdb, triedb)
+ cacheConfig := DefaultCacheConfigWithScheme("path")
+ cacheConfig.SnapshotLimit = 0
+ blockchain, _ := NewBlockChain(bcdb, cacheConfig, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
+ defer blockchain.Stop()
+
+ txCost1 := params.TxGas
+ txCost2 := params.TxGas
+ contractCreationCost := intrinsicContractCreationGas +
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */
+ 739 /* execution costs */
+ codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas +
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
+ params.WitnessChunkReadCost + /* SLOAD in constructor */
+ params.WitnessChunkWriteCost + /* SSTORE in constructor */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
+ params.WitnessChunkReadCost + /* SLOAD in constructor */
+ params.WitnessChunkWriteCost + /* SSTORE in constructor */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */
+ 15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */
+ 4844 /* execution costs */
+ blockGasUsagesExpected := []uint64{
+ txCost1*2 + txCost2,
+ txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas,
+ }
+ _, chain, _, proofs, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) {
+ gen.SetPoS()
+
+ // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over)
+ tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
+ gen.AddTx(tx)
+ tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
+ gen.AddTx(tx)
+ tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey)
+ gen.AddTx(tx)
+
+ // Add two contract creations in block #2
+ if i == 1 {
+ tx, _ = types.SignTx(types.NewContractCreation(6, big.NewInt(16), 3000000, big.NewInt(875000000), code), signer, testKey)
+ gen.AddTx(tx)
+
+ tx, _ = types.SignTx(types.NewContractCreation(7, big.NewInt(0), 3000000, big.NewInt(875000000), codeWithExtCodeCopy), signer, testKey)
+ gen.AddTx(tx)
+ }
+ })
+
+ // Check proof for both blocks
+ err := verkle.Verify(proofs[0], gspec.ToBlock().Root().Bytes(), chain[0].Root().Bytes(), statediffs[0])
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = verkle.Verify(proofs[1], chain[0].Root().Bytes(), chain[1].Root().Bytes(), statediffs[1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Log("verified verkle proof, inserting blocks into the chain")
+
+ endnum, err := blockchain.InsertChain(chain)
+ if err != nil {
+ t.Fatalf("block %d imported with error: %v", endnum, err)
+ }
+
+ for i := 0; i < 2; i++ {
+ b := blockchain.GetBlockByNumber(uint64(i) + 1)
+ if b == nil {
+ t.Fatalf("expected block %d to be present in chain", i+1)
+ }
+ if b.Hash() != chain[i].Hash() {
+ t.Fatalf("block #%d not found at expected height", b.NumberU64())
+ }
+ if b.GasUsed() != blockGasUsagesExpected[i] {
+ t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed())
+ }
+ }
+}
+
+func TestProcessParentBlockHash(t *testing.T) {
+ var (
+ chainConfig = params.MergedTestChainConfig
+ hashA = common.Hash{0x01}
+ hashB = common.Hash{0x02}
+ header = &types.Header{ParentHash: hashA, Number: big.NewInt(2), Difficulty: big.NewInt(0)}
+ parent = &types.Header{ParentHash: hashB, Number: big.NewInt(1), Difficulty: big.NewInt(0)}
+ coinbase = common.Address{}
+ )
+ test := func(statedb *state.StateDB) {
+ statedb.SetNonce(params.HistoryStorageAddress, 1)
+ statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode)
+ statedb.IntermediateRoot(true)
+
+ vmContext := NewEVMBlockContext(header, nil, &coinbase)
+ evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vm.Config{})
+ ProcessParentBlockHash(header.ParentHash, evm, statedb)
+
+ vmContext = NewEVMBlockContext(parent, nil, &coinbase)
+ evm = vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vm.Config{})
+ ProcessParentBlockHash(parent.ParentHash, evm, statedb)
+
+ // make sure that the state is correct
+ if have := getParentBlockHash(statedb, 1); have != hashA {
+ t.Errorf("want parent hash %v, have %v", hashA, have)
+ }
+ if have := getParentBlockHash(statedb, 0); have != hashB {
+ t.Errorf("want parent hash %v, have %v", hashB, have)
+ }
+ }
+ t.Run("MPT", func(t *testing.T) {
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
+ test(statedb)
+ })
+ t.Run("Verkle", func(t *testing.T) {
+ db := rawdb.NewMemoryDatabase()
+ cacheConfig := DefaultCacheConfigWithScheme(rawdb.PathScheme)
+ cacheConfig.SnapshotLimit = 0
+ triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true))
+ statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil))
+ test(statedb)
+ })
+}
+
+func getParentBlockHash(statedb *state.StateDB, number uint64) common.Hash {
+ ringIndex := number % params.HistoryServeWindow
+ var key common.Hash
+ binary.BigEndian.PutUint64(key[24:], ringIndex)
+ return statedb.GetState(params.HistoryStorageAddress, key)
+}
diff --git a/core/state_transition.go b/core/state_transition.go
index 1ae76543a338..b6f7651606ba 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -23,6 +23,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common"
+ cmath "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -145,7 +146,10 @@ type Message struct {
// When SkipNonceChecks is true, the message nonce is not checked against the
// account nonce in state.
// This field will be set to true for operations like RPC eth_call.
- SkipAccountChecks bool
+ SkipNonceChecks bool
+
+ // When SkipFromEOACheck is true, the message sender is not checked to be an EOA.
+ SkipFromEOACheck bool
// CHANGE(taiko): whether the current transaction is the first TaikoL2.anchor transaction in a block.
IsAnchor bool
@@ -157,19 +161,20 @@ type Message struct {
// TransactionToMessage converts a transaction into a Message.
func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.Int) (*Message, error) {
msg := &Message{
- Nonce: tx.Nonce(),
- GasLimit: tx.Gas(),
- GasPrice: new(big.Int).Set(tx.GasPrice()),
- GasFeeCap: new(big.Int).Set(tx.GasFeeCap()),
- GasTipCap: new(big.Int).Set(tx.GasTipCap()),
- To: tx.To(),
- Value: tx.Value(),
- Data: tx.Data(),
- AccessList: tx.AccessList(),
- SkipAccountChecks: false,
- BlobHashes: tx.BlobHashes(),
- BlobGasFeeCap: tx.BlobGasFeeCap(),
- IsAnchor: tx.IsAnchor(),
+ Nonce: tx.Nonce(),
+ GasLimit: tx.Gas(),
+ GasPrice: new(big.Int).Set(tx.GasPrice()),
+ GasFeeCap: new(big.Int).Set(tx.GasFeeCap()),
+ GasTipCap: new(big.Int).Set(tx.GasTipCap()),
+ To: tx.To(),
+ Value: tx.Value(),
+ Data: tx.Data(),
+ AccessList: tx.AccessList(),
+ SkipNonceChecks: false,
+ SkipFromEOACheck: false,
+ BlobHashes: tx.BlobHashes(),
+ BlobGasFeeCap: tx.BlobGasFeeCap(),
+ IsAnchor: tx.IsAnchor(),
}
// If baseFee provided, set gasPrice to effectiveGasPrice.
if baseFee != nil {
@@ -269,6 +274,7 @@ func (st *StateTransition) buyGas() error {
if overflow {
return fmt.Errorf("%w: address %v required balance exceeds 256 bits", ErrInsufficientFunds, st.msg.From.Hex())
}
+ // CHANGE(taiko): if the transaction is an anchor transaction, the balance check is skipped.
if st.msg.IsAnchor {
balanceCheckU256 = common.U2560
mgval = common.Big0
@@ -486,7 +492,8 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
} else {
fee := new(uint256.Int).SetUint64(st.gasUsed())
fee.Mul(fee, effectiveTipU256)
- st.state.AddBalance(st.evm.Context.Coinbase, fee)
+ st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee)
+
// CHANGE(taiko): basefee is not burnt, but sent to a treasury and block.coinbase instead.
if st.evm.ChainConfig().Taiko && st.evm.Context.BaseFee != nil && !st.msg.IsAnchor {
totalFee := new(big.Int).Mul(st.evm.Context.BaseFee, new(big.Int).SetUint64(st.gasUsed()))
@@ -495,8 +502,12 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
new(big.Int).SetUint64(100),
)
feeTreasury := new(big.Int).Sub(totalFee, feeCoinbase)
- st.state.AddBalance(st.getTreasuryAddress(), uint256.MustFromBig(feeTreasury))
- st.state.AddBalance(st.evm.Context.Coinbase, uint256.MustFromBig(feeCoinbase))
+ st.state.AddBalance(st.getTreasuryAddress(), uint256.MustFromBig(feeTreasury), tracing.BalanceIncreaseTreasury)
+ st.state.AddBalance(st.evm.Context.Coinbase, uint256.MustFromBig(feeCoinbase), tracing.BalanceIncreaseBaseFeeSharing)
+ }
+ // add the coinbase to the witness iff the fee is greater than 0
+ if rules.IsEIP4762 && fee.Sign() != 0 {
+ st.evm.AccessEvents.AddAccount(st.evm.Context.Coinbase, true)
}
}
@@ -520,15 +531,17 @@ func (st *StateTransition) refundGas(refundQuotient uint64) uint64 {
}
st.gasRemaining += refund
-
- // Do not change the balance in anchor transactions.
+ // CHANGE(taiko): do not change the balance in anchor transactions.
if !st.msg.IsAnchor {
// Return ETH for remaining gas, exchanged at the original rate.
remaining := uint256.NewInt(st.gasRemaining)
- remaining = remaining.Mul(remaining, uint256.MustFromBig(st.msg.GasPrice))
- st.state.AddBalance(st.msg.From, remaining)
- }
+ remaining.Mul(remaining, uint256.MustFromBig(st.msg.GasPrice))
+ st.state.AddBalance(st.msg.From, remaining, tracing.BalanceIncreaseGasReturn)
+ if st.evm.Config.Tracer != nil && st.evm.Config.Tracer.OnGasChange != nil && st.gasRemaining > 0 {
+ st.evm.Config.Tracer.OnGasChange(st.gasRemaining, 0, tracing.GasChangeTxLeftOverReturned)
+ }
+ }
// Also return remaining gas to the block gas counter so it is
// available for the next transaction.
st.gp.AddGas(st.gasRemaining)
diff --git a/core/tracing/gen_balance_change_reason_stringer.go b/core/tracing/gen_balance_change_reason_stringer.go
index d3a515a12d37..a78c8ff5df51 100644
--- a/core/tracing/gen_balance_change_reason_stringer.go
+++ b/core/tracing/gen_balance_change_reason_stringer.go
@@ -23,15 +23,28 @@ func _() {
_ = x[BalanceIncreaseSelfdestruct-12]
_ = x[BalanceDecreaseSelfdestruct-13]
_ = x[BalanceDecreaseSelfdestructBurn-14]
+ _ = x[BalanceIncreaseTreasury-99]
+ _ = x[BalanceIncreaseBaseFeeSharing-100]
}
-const _BalanceChangeReason_name = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurn"
+const (
+ _BalanceChangeReason_name_0 = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurn"
+ _BalanceChangeReason_name_1 = "BalanceIncreaseTreasuryBalanceIncreaseBaseFeeSharing"
+)
-var _BalanceChangeReason_index = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400}
+var (
+ _BalanceChangeReason_index_0 = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400}
+ _BalanceChangeReason_index_1 = [...]uint8{0, 23, 52}
+)
func (i BalanceChangeReason) String() string {
- if i >= BalanceChangeReason(len(_BalanceChangeReason_index)-1) {
+ switch {
+ case i <= 14:
+ return _BalanceChangeReason_name_0[_BalanceChangeReason_index_0[i]:_BalanceChangeReason_index_0[i+1]]
+ case 99 <= i && i <= 100:
+ i -= 99
+ return _BalanceChangeReason_name_1[_BalanceChangeReason_index_1[i]:_BalanceChangeReason_index_1[i+1]]
+ default:
return "BalanceChangeReason(" + strconv.FormatInt(int64(i), 10) + ")"
}
- return _BalanceChangeReason_name[_BalanceChangeReason_index[i]:_BalanceChangeReason_index[i+1]]
}
diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go
index a21bb1577b08..e11b90034148 100644
--- a/core/tracing/hooks.go
+++ b/core/tracing/hooks.go
@@ -43,7 +43,6 @@ type StateDB interface {
GetNonce(common.Address) uint64
GetCode(common.Address) []byte
GetState(common.Address, common.Hash) common.Hash
- GetTransientState(common.Address, common.Hash) common.Hash
Exist(common.Address) bool
GetRefund() uint64
}
@@ -55,8 +54,9 @@ type VMContext struct {
Time uint64
Random *common.Hash
// Effective tx gas price
- GasPrice *big.Int
- StateDB StateDB
+ GasPrice *big.Int
+ ChainConfig *params.ChainConfig
+ StateDB StateDB
}
// BlockEvent is emitted upon tracing an incoming block.
@@ -245,6 +245,11 @@ const (
// account within the same tx (captured at end of tx).
// Note it doesn't account for a self-destruct which appoints itself as recipient.
BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14
+
+ // CHANGE(taiko)
+ // BalanceIncreaseDaoContract is ether sent to the DAO refund contract.
+ BalanceIncreaseTreasury BalanceChangeReason = 99
+ BalanceIncreaseBaseFeeSharing BalanceChangeReason = 100
)
// GasChangeReason is used to indicate the reason for a gas change, useful
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index e4441bec5dad..47b7ef3a4904 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -38,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/billy"
@@ -45,28 +46,12 @@ import (
)
var (
- testBlobs []*kzg4844.Blob
- testBlobCommits []kzg4844.Commitment
- testBlobProofs []kzg4844.Proof
- testBlobVHashes [][32]byte
+ emptyBlob = new(kzg4844.Blob)
+ emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
+ emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
+ emptyBlobVHash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
)
-func init() {
- for i := 0; i < 10; i++ {
- testBlob := &kzg4844.Blob{byte(i)}
- testBlobs = append(testBlobs, testBlob)
-
- testBlobCommit, _ := kzg4844.BlobToCommitment(testBlob)
- testBlobCommits = append(testBlobCommits, testBlobCommit)
-
- testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit)
- testBlobProofs = append(testBlobProofs, testBlobProof)
-
- testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
- testBlobVHashes = append(testBlobVHashes, testBlobVHash)
- }
-}
-
// testBlockChain is a mock of the live chain for testing the pool.
type testBlockChain struct {
config *params.ChainConfig
@@ -213,9 +198,9 @@ func makeUnsignedTxWithTestBlob(nonce uint64, gasTipCap uint64, gasFeeCap uint64
BlobHashes: []common.Hash{testBlobVHashes[blobIdx]},
Value: uint256.NewInt(100),
Sidecar: &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{*testBlobs[blobIdx]},
- Commitments: []kzg4844.Commitment{testBlobCommits[blobIdx]},
- Proofs: []kzg4844.Proof{testBlobProofs[blobIdx]},
+ Blobs: []kzg4844.Blob{*emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
},
}
}
@@ -245,32 +230,6 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
for hash := range seen {
t.Errorf("indexed transaction hash #%x missing from tx lookup table", hash)
}
- // Verify that all blobs in the index are present in the blob lookup and nothing more
- blobs := make(map[common.Hash]map[common.Hash]struct{})
- for _, txs := range pool.index {
- for _, tx := range txs {
- for _, vhash := range tx.vhashes {
- if blobs[vhash] == nil {
- blobs[vhash] = make(map[common.Hash]struct{})
- }
- blobs[vhash][tx.hash] = struct{}{}
- }
- }
- }
- for vhash, txs := range pool.lookup.blobIndex {
- for txhash := range txs {
- if _, ok := blobs[vhash][txhash]; !ok {
- t.Errorf("blob lookup entry missing from transaction index: blob hash #%x, tx hash #%x", vhash, txhash)
- }
- delete(blobs[vhash], txhash)
- if len(blobs[vhash]) == 0 {
- delete(blobs, vhash)
- }
- }
- }
- for vhash := range blobs {
- t.Errorf("indexed transaction blob hash #%x missing from blob lookup table", vhash)
- }
// Verify that transactions are sorted per account and contain no nonce gaps,
// and that the first nonce is the next expected one based on the state.
for addr, txs := range pool.index {
@@ -1078,7 +1037,7 @@ func TestAdd(t *testing.T) {
adds: []addtx{
{ // New account, 1 tx pending: reject duplicate nonce 0
from: "alice",
- tx: makeUnsignedTxWithTestBlob(0, 1, 1, 1, 0),
+ tx: makeUnsignedTx(0, 1, 1, 1),
err: txpool.ErrAlreadyKnown,
},
{ // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
@@ -1108,7 +1067,7 @@ func TestAdd(t *testing.T) {
},
{ // Old account, 1 tx in chain, 1 tx pending: reject duplicate nonce 1
from: "bob",
- tx: makeUnsignedTxWithTestBlob(1, 1, 1, 1, 1),
+ tx: makeUnsignedTx(1, 1, 1, 1),
err: txpool.ErrAlreadyKnown,
},
{ // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now)
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
index a23093c81034..d562759e0692 100644
--- a/core/txpool/validation.go
+++ b/core/txpool/validation.go
@@ -36,6 +36,8 @@ var (
// blobTxMinBlobGasPrice is the big.Int version of the configured protocol
// parameter to avoid constructing a new big integer for every transaction.
blobTxMinBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice)
+ // CHANGE(taiko): the miniumum baseFee defined in TaikoL2 (0.008847185 GWei).
+ minL2BaseFee = new(big.Int).SetUint64(8847185)
)
// ValidationOptions define certain differences between transaction validation
@@ -104,8 +106,12 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
return core.ErrTipAboveFeeCap
}
// CHANGE(taiko): check gasFeeCap.
- if os.Getenv("TAIKO_TEST") == "" && tx.GasFeeCap().Cmp(common.Big0) == 0 {
- return errors.New("max fee per gas is 0")
+ if os.Getenv("TAIKO_TEST") == "" {
+ if opts.Config.IsOntake(head.Number) && tx.GasFeeCap().Cmp(minL2BaseFee) < 0 {
+ return errors.New("max fee per gas is less than the minimum base fee (0.008847185 GWei)")
+ } else if tx.GasFeeCap().Cmp(common.Big0) == 0 {
+ return errors.New("max fee per gas is zero")
+ }
}
// Make sure the transaction is signed properly
if _, err := types.Sender(signer, tx); err != nil {
diff --git a/core/types.go b/core/types.go
index bed20802ab51..65cd4973e488 100644
--- a/core/types.go
+++ b/core/types.go
@@ -54,7 +54,7 @@ type Processor interface {
// ProcessResult contains the values computed by Process.
type ProcessResult struct {
Receipts types.Receipts
- Requests [][]byte
+ Requests types.Requests
Logs []*types.Log
GasUsed uint64
}
diff --git a/core/types/block.go b/core/types/block.go
index f20fc7d7785b..d6930558a531 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -169,8 +169,9 @@ func (h *Header) SanityCheck() error {
func (h *Header) EmptyBody() bool {
var (
emptyWithdrawals = h.WithdrawalsHash == nil || *h.WithdrawalsHash == EmptyWithdrawalsHash
+ emptyRequests = h.RequestsHash == nil || *h.RequestsHash == EmptyReceiptsHash
)
- return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals
+ return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals && emptyRequests
}
// EmptyReceipts returns true if there are no receipts for this header/block.
@@ -184,6 +185,7 @@ type Body struct {
Transactions []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
+ Requests []*Request `rlp:"optional"`
}
// Block represents an Ethereum block.
@@ -208,6 +210,12 @@ type Block struct {
uncles []*Header
transactions Transactions
withdrawals Withdrawals
+ requests Requests
+
+ // witness is not an encoded part of the block body.
+ // It is held in Block in order for easy relaying to the places
+ // that process it.
+ witness *ExecutionWitness
// witness is not an encoded part of the block body.
// It is held in Block in order for easy relaying to the places
@@ -230,6 +238,7 @@ type extblock struct {
Txs []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
+ Requests []*Request `rlp:"optional"`
}
// NewBlock creates a new block. The input data is copied, changes to header and to the
@@ -246,6 +255,7 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher
txs = body.Transactions
uncles = body.Uncles
withdrawals = body.Withdrawals
+ requests = body.Requests
)
if len(txs) == 0 {
@@ -284,6 +294,17 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher
b.withdrawals = slices.Clone(withdrawals)
}
+ if requests == nil {
+ b.header.RequestsHash = nil
+ } else if len(requests) == 0 {
+ b.header.RequestsHash = &EmptyRequestsHash
+ b.requests = Requests{}
+ } else {
+ h := DeriveSha(Requests(requests), hasher)
+ b.header.RequestsHash = &h
+ b.requests = slices.Clone(requests)
+ }
+
return b
}
@@ -333,7 +354,7 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error {
if err := s.Decode(&eb); err != nil {
return err
}
- b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals
+ b.header, b.uncles, b.transactions, b.withdrawals, b.requests = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals, eb.Requests
b.size.Store(rlp.ListSize(size))
return nil
}
@@ -345,13 +366,14 @@ func (b *Block) EncodeRLP(w io.Writer) error {
Txs: b.transactions,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
+ Requests: b.requests,
})
}
// Body returns the non-header content of the block.
// Note the returned data is not an independent copy.
func (b *Block) Body() *Body {
- return &Body{b.transactions, b.uncles, b.withdrawals}
+ return &Body{b.transactions, b.uncles, b.withdrawals, b.requests}
}
// Accessors for body data. These do not return a copy because the content
@@ -360,6 +382,7 @@ func (b *Block) Body() *Body {
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
+func (b *Block) Requests() Requests { return b.requests }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
@@ -498,6 +521,7 @@ func (b *Block) WithBody(body Body) *Block {
transactions: slices.Clone(body.Transactions),
uncles: make([]*Header, len(body.Uncles)),
withdrawals: slices.Clone(body.Withdrawals),
+ requests: slices.Clone(body.Requests),
witness: b.witness,
}
for i := range body.Uncles {
@@ -512,6 +536,7 @@ func (b *Block) WithWitness(witness *ExecutionWitness) *Block {
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
+ requests: b.requests,
witness: witness,
}
}
diff --git a/core/types/deposit.go b/core/types/deposit.go
index 3bba2c7aa4fb..172acc36ed3e 100644
--- a/core/types/deposit.go
+++ b/core/types/deposit.go
@@ -17,27 +17,52 @@
package types
import (
+ "bytes"
+ "encoding/binary"
"fmt"
-)
-const (
- depositRequestSize = 192
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rlp"
)
+//go:generate go run github.com/fjl/gencodec -type Deposit -field-override depositMarshaling -out gen_deposit_json.go
+
+// Deposit contains EIP-6110 deposit data.
+type Deposit struct {
+ PublicKey [48]byte `json:"pubkey"` // public key of validator
+ WithdrawalCredentials common.Hash `json:"withdrawalCredentials"` // beneficiary of the validator funds
+ Amount uint64 `json:"amount"` // deposit size in Gwei
+ Signature [96]byte `json:"signature"` // signature over deposit msg
+ Index uint64 `json:"index"` // deposit count value
+}
+
+// field type overrides for gencodec
+type depositMarshaling struct {
+ PublicKey hexutil.Bytes
+ WithdrawalCredentials hexutil.Bytes
+ Amount hexutil.Uint64
+ Signature hexutil.Bytes
+ Index hexutil.Uint64
+}
+
+// Deposits implements DerivableList for requests.
+type Deposits []*Deposit
+
+// Len returns the length of s.
+func (s Deposits) Len() int { return len(s) }
+
+// EncodeIndex encodes the i'th deposit to s.
+func (s Deposits) EncodeIndex(i int, w *bytes.Buffer) {
+ rlp.Encode(w, s[i])
+}
+
// UnpackIntoDeposit unpacks a serialized DepositEvent.
-func DepositLogToRequest(data []byte) ([]byte, error) {
+func UnpackIntoDeposit(data []byte) (*Deposit, error) {
if len(data) != 576 {
return nil, fmt.Errorf("deposit wrong length: want 576, have %d", len(data))
}
-
- request := make([]byte, depositRequestSize)
- const (
- pubkeyOffset = 0
- withdrawalCredOffset = pubkeyOffset + 48
- amountOffset = withdrawalCredOffset + 32
- signatureOffset = amountOffset + 8
- indexOffset = signatureOffset + 96
- )
+ var d Deposit
// The ABI encodes the position of dynamic elements first. Since there are 5
// elements, skip over the positional data. The first 32 bytes of dynamic
// elements also encode their actual length. Skip over that value too.
@@ -45,20 +70,34 @@ func DepositLogToRequest(data []byte) ([]byte, error) {
// PublicKey is the first element. ABI encoding pads values to 32 bytes, so
// despite BLS public keys being length 48, the value length here is 64. Then
// skip over the next length value.
- copy(request[pubkeyOffset:], data[b:b+48])
+ copy(d.PublicKey[:], data[b:b+48])
b += 48 + 16 + 32
// WithdrawalCredentials is 32 bytes. Read that value then skip over next
// length.
- copy(request[withdrawalCredOffset:], data[b:b+32])
+ copy(d.WithdrawalCredentials[:], data[b:b+32])
b += 32 + 32
// Amount is 8 bytes, but it is padded to 32. Skip over it and the next
// length.
- copy(request[amountOffset:], data[b:b+8])
+ d.Amount = binary.LittleEndian.Uint64(data[b : b+8])
b += 8 + 24 + 32
// Signature is 96 bytes. Skip over it and the next length.
- copy(request[signatureOffset:], data[b:b+96])
+ copy(d.Signature[:], data[b:b+96])
b += 96 + 32
- // Index is 8 bytes.
- copy(request[indexOffset:], data[b:b+8])
- return request, nil
+ // Amount is 8 bytes.
+ d.Index = binary.LittleEndian.Uint64(data[b : b+8])
+
+ return &d, nil
+}
+
+func (d *Deposit) requestType() byte { return DepositRequestType }
+func (d *Deposit) encode(b *bytes.Buffer) error { return rlp.Encode(b, d) }
+func (d *Deposit) decode(input []byte) error { return rlp.DecodeBytes(input, d) }
+func (d *Deposit) copy() RequestData {
+ return &Deposit{
+ PublicKey: d.PublicKey,
+ WithdrawalCredentials: d.WithdrawalCredentials,
+ Amount: d.Amount,
+ Signature: d.Signature,
+ Index: d.Index,
+ }
}
diff --git a/core/types/deposit_test.go b/core/types/deposit_test.go
index 0648920ac9a3..ed2e18445d3f 100644
--- a/core/types/deposit_test.go
+++ b/core/types/deposit_test.go
@@ -17,7 +17,8 @@
package types
import (
- "bytes"
+ "encoding/binary"
+ "reflect"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi"
@@ -70,26 +71,23 @@ func FuzzUnpackIntoDeposit(f *testing.F) {
copy(sig[:], s)
copy(index[:], i)
- var enc []byte
- enc = append(enc, pubkey[:]...)
- enc = append(enc, wxCred[:]...)
- enc = append(enc, amount[:]...)
- enc = append(enc, sig[:]...)
- enc = append(enc, index[:]...)
-
- out, err := depositABI.Pack("DepositEvent", pubkey[:], wxCred[:], amount[:], sig[:], index[:])
+ want := Deposit{
+ PublicKey: pubkey,
+ WithdrawalCredentials: wxCred,
+ Amount: binary.LittleEndian.Uint64(amount[:]),
+ Signature: sig,
+ Index: binary.LittleEndian.Uint64(index[:]),
+ }
+ out, err := depositABI.Pack("DepositEvent", want.PublicKey[:], want.WithdrawalCredentials[:], amount[:], want.Signature[:], index[:])
if err != nil {
t.Fatalf("error packing deposit: %v", err)
}
- got, err := DepositLogToRequest(out[4:])
+ got, err := UnpackIntoDeposit(out[4:])
if err != nil {
t.Errorf("error unpacking deposit: %v", err)
}
- if len(got) != depositRequestSize {
- t.Errorf("wrong output size: %d, want %d", len(got), depositRequestSize)
- }
- if !bytes.Equal(enc, got) {
- t.Errorf("roundtrip failed: want %x, got %x", enc, got)
+ if !reflect.DeepEqual(want, *got) {
+ t.Errorf("roundtrip failed: want %v, got %v", want, got)
}
})
}
diff --git a/core/types/gen_deposit_json.go b/core/types/gen_deposit_json.go
new file mode 100644
index 000000000000..a65691188f58
--- /dev/null
+++ b/core/types/gen_deposit_json.go
@@ -0,0 +1,70 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+var _ = (*depositMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (d Deposit) MarshalJSON() ([]byte, error) {
+ type Deposit struct {
+ PublicKey hexutil.Bytes `json:"pubkey"`
+ WithdrawalCredentials hexutil.Bytes `json:"withdrawalCredentials"`
+ Amount hexutil.Uint64 `json:"amount"`
+ Signature hexutil.Bytes `json:"signature"`
+ Index hexutil.Uint64 `json:"index"`
+ }
+ var enc Deposit
+ enc.PublicKey = d.PublicKey[:]
+ enc.WithdrawalCredentials = d.WithdrawalCredentials[:]
+ enc.Amount = hexutil.Uint64(d.Amount)
+ enc.Signature = d.Signature[:]
+ enc.Index = hexutil.Uint64(d.Index)
+ return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (d *Deposit) UnmarshalJSON(input []byte) error {
+ type Deposit struct {
+ PublicKey *hexutil.Bytes `json:"pubkey"`
+ WithdrawalCredentials *hexutil.Bytes `json:"withdrawalCredentials"`
+ Amount *hexutil.Uint64 `json:"amount"`
+ Signature *hexutil.Bytes `json:"signature"`
+ Index *hexutil.Uint64 `json:"index"`
+ }
+ var dec Deposit
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ if dec.PublicKey != nil {
+ if len(*dec.PublicKey) != len(d.PublicKey) {
+ return errors.New("field 'pubkey' has wrong length, need 48 items")
+ }
+ copy(d.PublicKey[:], *dec.PublicKey)
+ }
+ if dec.WithdrawalCredentials != nil {
+ if len(*dec.WithdrawalCredentials) != len(d.WithdrawalCredentials) {
+ return errors.New("field 'withdrawalCredentials' has wrong length, need 32 items")
+ }
+ copy(d.WithdrawalCredentials[:], *dec.WithdrawalCredentials)
+ }
+ if dec.Amount != nil {
+ d.Amount = uint64(*dec.Amount)
+ }
+ if dec.Signature != nil {
+ if len(*dec.Signature) != len(d.Signature) {
+ return errors.New("field 'signature' has wrong length, need 96 items")
+ }
+ copy(d.Signature[:], *dec.Signature)
+ }
+ if dec.Index != nil {
+ d.Index = uint64(*dec.Index)
+ }
+ return nil
+}
diff --git a/core/types/hashes.go b/core/types/hashes.go
index 43e9130fd170..cbd197072e5e 100644
--- a/core/types/hashes.go
+++ b/core/types/hashes.go
@@ -41,6 +41,9 @@ var (
// EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ // EmptyRequestsHash is the known hash of the empty requests set.
+ EmptyRequestsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+
// EmptyVerkleHash is the known hash of an empty verkle trie.
EmptyVerkleHash = common.Hash{}
)
diff --git a/core/types/request.go b/core/types/request.go
new file mode 100644
index 000000000000..7b1cade26e75
--- /dev/null
+++ b/core/types/request.go
@@ -0,0 +1,157 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+var (
+ ErrRequestTypeNotSupported = errors.New("request type not supported")
+ errShortTypedRequest = errors.New("typed request too short")
+)
+
+// Request types.
+const (
+ DepositRequestType = 0x00
+)
+
+// Request is an EIP-7685 request object. It represents execution layer
+// triggered messages bound for the consensus layer.
+type Request struct {
+ inner RequestData
+}
+
+// Type returns the EIP-7685 type of the request.
+func (r *Request) Type() byte {
+ return r.inner.requestType()
+}
+
+// Inner returns the inner request data.
+func (r *Request) Inner() RequestData {
+ return r.inner
+}
+
+// NewRequest creates a new request.
+func NewRequest(inner RequestData) *Request {
+ req := new(Request)
+ req.inner = inner.copy()
+ return req
+}
+
+// Requests implements DerivableList for requests.
+type Requests []*Request
+
+// Len returns the length of s.
+func (s Requests) Len() int { return len(s) }
+
+// EncodeIndex encodes the i'th request to s.
+func (s Requests) EncodeIndex(i int, w *bytes.Buffer) {
+ s[i].encode(w)
+}
+
+// RequestData is the underlying data of a request.
+type RequestData interface {
+ requestType() byte
+ encode(*bytes.Buffer) error
+ decode([]byte) error
+ copy() RequestData // creates a deep copy and initializes all fields
+}
+
+// EncodeRLP implements rlp.Encoder
+func (r *Request) EncodeRLP(w io.Writer) error {
+ buf := encodeBufferPool.Get().(*bytes.Buffer)
+ defer encodeBufferPool.Put(buf)
+ buf.Reset()
+ if err := r.encode(buf); err != nil {
+ return err
+ }
+ return rlp.Encode(w, buf.Bytes())
+}
+
+// encode writes the canonical encoding of a request to w.
+func (r *Request) encode(w *bytes.Buffer) error {
+ w.WriteByte(r.Type())
+ return r.inner.encode(w)
+}
+
+// MarshalBinary returns the canonical encoding of the request.
+func (r *Request) MarshalBinary() ([]byte, error) {
+ var buf bytes.Buffer
+ err := r.encode(&buf)
+ return buf.Bytes(), err
+}
+
+// DecodeRLP implements rlp.Decoder
+func (r *Request) DecodeRLP(s *rlp.Stream) error {
+ kind, size, err := s.Kind()
+ switch {
+ case err != nil:
+ return err
+ case kind == rlp.List:
+ return fmt.Errorf("untyped request")
+ case kind == rlp.Byte:
+ return errShortTypedRequest
+ default:
+ // First read the request payload bytes into a temporary buffer.
+ b, buf, err := getPooledBuffer(size)
+ if err != nil {
+ return err
+ }
+ defer encodeBufferPool.Put(buf)
+ if err := s.ReadBytes(b); err != nil {
+ return err
+ }
+ // Now decode the inner request.
+ inner, err := r.decode(b)
+ if err == nil {
+ r.inner = inner
+ }
+ return err
+ }
+}
+
+// UnmarshalBinary decodes the canonical encoding of requests.
+func (r *Request) UnmarshalBinary(b []byte) error {
+ inner, err := r.decode(b)
+ if err != nil {
+ return err
+ }
+ r.inner = inner
+ return nil
+}
+
+// decode decodes a request from the canonical format.
+func (r *Request) decode(b []byte) (RequestData, error) {
+ if len(b) <= 1 {
+ return nil, errShortTypedRequest
+ }
+ var inner RequestData
+ switch b[0] {
+ case DepositRequestType:
+ inner = new(Deposit)
+ default:
+ return nil, ErrRequestTypeNotSupported
+ }
+ err := inner.decode(b[1:])
+ return inner, err
+}
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index f54d5ab86e66..ade2506cad7f 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"maps"
- "math"
"math/big"
"github.com/consensys/gnark-crypto/ecc"
@@ -30,6 +29,7 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-381/fp"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/blake2b"
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 71d51f81efe0..edd6ec8d0a2c 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -533,173 +533,3 @@ func enable4762(jt *JumpTable) {
}
}
}
-
-// enableEOF applies the EOF changes.
-// OBS! For EOF, there are two changes:
-// 1. Two separate jumptables are required. One, EOF-jumptable, is used by
-// eof contracts. This one contains things like RJUMP.
-// 2. The regular non-eof jumptable also needs to be modified, specifically to
-// modify how EXTCODECOPY works under the hood.
-//
-// This method _only_ deals with case 1.
-func enableEOF(jt *JumpTable) {
- // Deprecate opcodes
- undefined := &operation{
- execute: opUndefined,
- constantGas: 0,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- undefined: true,
- }
- jt[CALL] = undefined
- jt[CALLCODE] = undefined
- jt[DELEGATECALL] = undefined
- jt[STATICCALL] = undefined
- jt[SELFDESTRUCT] = undefined
- jt[JUMP] = undefined
- jt[JUMPI] = undefined
- jt[PC] = undefined
- jt[CREATE] = undefined
- jt[CREATE2] = undefined
- jt[CODESIZE] = undefined
- jt[CODECOPY] = undefined
- jt[EXTCODESIZE] = undefined
- jt[EXTCODECOPY] = undefined
- jt[EXTCODEHASH] = undefined
- jt[GAS] = undefined
- // Allow 0xFE to terminate sections
- jt[INVALID] = &operation{
- execute: opUndefined,
- constantGas: 0,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- }
-
- // New opcodes
- jt[RJUMP] = &operation{
- execute: opRjump,
- constantGas: GasQuickStep,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- }
- jt[RJUMPI] = &operation{
- execute: opRjumpi,
- constantGas: GasFastishStep,
- minStack: minStack(1, 0),
- maxStack: maxStack(1, 0),
- }
- jt[RJUMPV] = &operation{
- execute: opRjumpv,
- constantGas: GasFastishStep,
- minStack: minStack(1, 0),
- maxStack: maxStack(1, 0),
- }
- jt[CALLF] = &operation{
- execute: opCallf,
- constantGas: GasFastStep,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- }
- jt[RETF] = &operation{
- execute: opRetf,
- constantGas: GasFastestStep,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- }
- jt[JUMPF] = &operation{
- execute: opJumpf,
- constantGas: GasFastStep,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- }
- jt[EOFCREATE] = &operation{
- execute: opEOFCreate,
- constantGas: params.Create2Gas,
- dynamicGas: gasEOFCreate,
- minStack: minStack(4, 1),
- maxStack: maxStack(4, 1),
- memorySize: memoryEOFCreate,
- }
- jt[RETURNCONTRACT] = &operation{
- execute: opReturnContract,
- // returncontract has zero constant gas cost
- dynamicGas: pureMemoryGascost,
- minStack: minStack(2, 0),
- maxStack: maxStack(2, 0),
- memorySize: memoryReturnContract,
- }
- jt[DATALOAD] = &operation{
- execute: opDataLoad,
- constantGas: GasFastishStep,
- minStack: minStack(1, 1),
- maxStack: maxStack(1, 1),
- }
- jt[DATALOADN] = &operation{
- execute: opDataLoadN,
- constantGas: GasFastestStep,
- minStack: minStack(0, 1),
- maxStack: maxStack(0, 1),
- }
- jt[DATASIZE] = &operation{
- execute: opDataSize,
- constantGas: GasQuickStep,
- minStack: minStack(0, 1),
- maxStack: maxStack(0, 1),
- }
- jt[DATACOPY] = &operation{
- execute: opDataCopy,
- constantGas: GasFastestStep,
- dynamicGas: memoryCopierGas(2),
- minStack: minStack(3, 0),
- maxStack: maxStack(3, 0),
- memorySize: memoryDataCopy,
- }
- jt[DUPN] = &operation{
- execute: opDupN,
- constantGas: GasFastestStep,
- minStack: minStack(0, 1),
- maxStack: maxStack(0, 1),
- }
- jt[SWAPN] = &operation{
- execute: opSwapN,
- constantGas: GasFastestStep,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- }
- jt[EXCHANGE] = &operation{
- execute: opExchange,
- constantGas: GasFastestStep,
- minStack: minStack(0, 0),
- maxStack: maxStack(0, 0),
- }
- jt[RETURNDATALOAD] = &operation{
- execute: opReturnDataLoad,
- constantGas: GasFastestStep,
- minStack: minStack(1, 1),
- maxStack: maxStack(1, 1),
- }
- jt[EXTCALL] = &operation{
- execute: opExtCall,
- constantGas: params.WarmStorageReadCostEIP2929,
- dynamicGas: makeCallVariantGasCallEIP2929(gasExtCall, 0),
- minStack: minStack(4, 1),
- maxStack: maxStack(4, 1),
- memorySize: memoryExtCall,
- }
- jt[EXTDELEGATECALL] = &operation{
- execute: opExtDelegateCall,
- dynamicGas: makeCallVariantGasCallEIP2929(gasExtDelegateCall, 0),
- constantGas: params.WarmStorageReadCostEIP2929,
- minStack: minStack(3, 1),
- maxStack: maxStack(3, 1),
- memorySize: memoryExtCall,
- }
- jt[EXTSTATICCALL] = &operation{
- execute: opExtStaticCall,
- constantGas: params.WarmStorageReadCostEIP2929,
- dynamicGas: makeCallVariantGasCallEIP2929(gasExtStaticCall, 0),
- minStack: minStack(3, 1),
- maxStack: maxStack(3, 1),
- memorySize: memoryExtCall,
- }
-}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 0593a32a3e09..616668d565cc 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -213,6 +213,9 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
code := evm.StateDB.GetCode(addr)
+ if witness := evm.StateDB.Witness(); witness != nil {
+ witness.AddCode(code)
+ }
if len(code) == 0 {
ret, err = nil, nil // gas is unchanged
} else {
@@ -280,6 +283,9 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, AccountRef(caller.Address()), value, gas)
+ if witness := evm.StateDB.Witness(); witness != nil {
+ witness.AddCode(evm.StateDB.GetCode(addrCopy))
+ }
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
ret, err = evm.interpreter.Run(contract, input, false)
gas = contract.Gas
@@ -327,6 +333,9 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
addrCopy := addr
// Initialise a new contract and make initialise the delegate values
contract := NewContract(caller, AccountRef(caller.Address()), nil, gas).AsDelegate()
+ if witness := evm.StateDB.Witness(); witness != nil {
+ witness.AddCode(evm.StateDB.GetCode(addrCopy))
+ }
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
ret, err = evm.interpreter.Run(contract, input, false)
gas = contract.Gas
@@ -382,6 +391,9 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, AccountRef(addrCopy), new(uint256.Int), gas)
+ if witness := evm.StateDB.Witness(); witness != nil {
+ witness.AddCode(evm.StateDB.GetCode(addrCopy))
+ }
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
@@ -601,6 +613,7 @@ func (evm *EVM) GetVMContext() *tracing.VMContext {
Time: evm.Context.Time,
Random: evm.Context.Random,
GasPrice: evm.TxContext.GasPrice,
+ ChainConfig: evm.ChainConfig(),
StateDB: evm.StateDB,
}
}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 47eb62be08d5..35d6393fba0e 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -340,6 +340,10 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
+ address := slot.Bytes20()
+ if witness := interpreter.evm.StateDB.Witness(); witness != nil {
+ witness.AddCode(interpreter.evm.StateDB.GetCode(address))
+ }
slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())))
return nil, nil
}
@@ -379,6 +383,9 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
}
addr := common.Address(a.Bytes20())
code := interpreter.evm.StateDB.GetCode(addr)
+ if witness := interpreter.evm.StateDB.Witness(); witness != nil {
+ witness.AddCode(code)
+ }
codeCopy := getData(code, uint64CodeOffset, length.Uint64())
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
@@ -912,7 +919,7 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
- interpreter.evm.StateDB.SelfDestruct6780(scope.Contract.Address())
+ interpreter.evm.StateDB.Selfdestruct6780(scope.Contract.Address())
if tracer := interpreter.evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil {
tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
@@ -977,13 +984,13 @@ func makePush(size uint64, pushByteSize int) executionFunc {
start = min(codeLen, int(*pc+1))
end = min(codeLen, start+pushByteSize)
)
- a := new(uint256.Int).SetBytes(scope.Contract.Code[start:end])
+ scope.Stack.push(new(uint256.Int).SetBytes(
+ common.RightPadBytes(
+ scope.Contract.Code[start:end],
+ pushByteSize,
+ )),
+ )
- // Missing bytes: pushByteSize - len(pushData)
- if missing := pushByteSize - (end - start); missing > 0 {
- a.Lsh(a, uint(8*missing))
- }
- scope.Stack.push(a)
*pc += size
return nil, nil
}
diff --git a/core/vm/interface.go b/core/vm/interface.go
index 9229f4d2cd95..0f50cc742fd0 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -33,8 +33,8 @@ type StateDB interface {
CreateAccount(common.Address)
CreateContract(common.Address)
- SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) uint256.Int
- AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) uint256.Int
+ SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason)
+ AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason)
GetBalance(common.Address) *uint256.Int
GetNonce(common.Address) uint64
@@ -51,7 +51,7 @@ type StateDB interface {
GetCommittedState(common.Address, common.Hash) common.Hash
GetState(common.Address, common.Hash) common.Hash
- SetState(common.Address, common.Hash, common.Hash) common.Hash
+ SetState(common.Address, common.Hash, common.Hash)
GetStorageRoot(addr common.Address) common.Hash
GetTransientState(addr common.Address, key common.Hash) common.Hash
@@ -95,9 +95,6 @@ type StateDB interface {
AddPreimage(common.Hash, []byte)
Witness() *stateless.Witness
-
- // Finalise must be invoked at the end of a transaction
- Finalise(bool)
}
// CallContext provides a basic interface for the EVM calling conventions. The EVM
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index c40899440174..8611f2650aef 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -249,8 +249,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
} else if sLen > operation.maxStack {
return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
}
- // for tracing: this gas consumption event is emitted below in the debug section.
- if contract.Gas < cost {
+ if !contract.UseGas(cost, in.evm.Config.Tracer, tracing.GasChangeIgnored) {
return nil, ErrOutOfGas
} else {
contract.Gas -= cost
@@ -282,11 +281,8 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrOutOfGas, err)
}
- // for tracing: this gas consumption event is emitted below in the debug section.
- if contract.Gas < dynamicCost {
+ if !contract.UseGas(dynamicCost, in.evm.Config.Tracer, tracing.GasChangeIgnored) {
return nil, ErrOutOfGas
- } else {
- contract.Gas -= dynamicCost
}
// Do tracing before memory expansion
diff --git a/core/vm/interpreter_test.go b/core/vm/interpreter_test.go
index 7d4b2ddf36d4..a67bd92a5485 100644
--- a/core/vm/interpreter_test.go
+++ b/core/vm/interpreter_test.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 658014f24ca4..99f97048e60f 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -61,7 +61,6 @@ var (
shanghaiInstructionSet = newShanghaiInstructionSet()
cancunInstructionSet = newCancunInstructionSet()
verkleInstructionSet = newVerkleInstructionSet()
- pragueEOFInstructionSet = newPragueEOFInstructionSet()
)
// JumpTable contains the EVM opcodes supported at a given fork.
@@ -91,16 +90,6 @@ func newVerkleInstructionSet() JumpTable {
return validate(instructionSet)
}
-func NewPragueEOFInstructionSetForTesting() JumpTable {
- return newPragueEOFInstructionSet()
-}
-
-func newPragueEOFInstructionSet() JumpTable {
- instructionSet := newCancunInstructionSet()
- enableEOF(&instructionSet)
- return validate(instructionSet)
-}
-
func newCancunInstructionSet() JumpTable {
instructionSet := newShanghaiInstructionSet()
enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode)
diff --git a/core/vm/operations_verkle.go b/core/vm/operations_verkle.go
index 349299477864..722d5ed2ce09 100644
--- a/core/vm/operations_verkle.go
+++ b/core/vm/operations_verkle.go
@@ -17,8 +17,6 @@
package vm
import (
- gomath "math"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/params"
@@ -128,7 +126,7 @@ func gasCodeCopyEip4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory,
)
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
if overflow {
- uint64CodeOffset = gomath.MaxUint64
+ uint64CodeOffset = math.MaxUint64
}
_, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(contract.Code, uint64CodeOffset, length.Uint64())
if !contract.IsDeployment {
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 2243e14b65a9..2e044286c73e 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -61,26 +61,27 @@ func setDefaults(cfg *Config) {
cancunTime = uint64(0)
)
cfg.ChainConfig = ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: new(big.Int),
- DAOForkBlock: new(big.Int),
- DAOForkSupport: false,
- EIP150Block: new(big.Int),
- EIP155Block: new(big.Int),
- EIP158Block: new(big.Int),
- ByzantiumBlock: new(big.Int),
- ConstantinopleBlock: new(big.Int),
- PetersburgBlock: new(big.Int),
- IstanbulBlock: new(big.Int),
- MuirGlacierBlock: new(big.Int),
- BerlinBlock: new(big.Int),
- LondonBlock: new(big.Int),
- ArrowGlacierBlock: nil,
- GrayGlacierBlock: nil,
- TerminalTotalDifficulty: big.NewInt(0),
- MergeNetsplitBlock: nil,
- ShanghaiTime: &shanghaiTime,
- CancunTime: &cancunTime}
+ ChainID: big.NewInt(1),
+ HomesteadBlock: new(big.Int),
+ DAOForkBlock: new(big.Int),
+ DAOForkSupport: false,
+ EIP150Block: new(big.Int),
+ EIP155Block: new(big.Int),
+ EIP158Block: new(big.Int),
+ ByzantiumBlock: new(big.Int),
+ ConstantinopleBlock: new(big.Int),
+ PetersburgBlock: new(big.Int),
+ IstanbulBlock: new(big.Int),
+ MuirGlacierBlock: new(big.Int),
+ BerlinBlock: new(big.Int),
+ LondonBlock: new(big.Int),
+ ArrowGlacierBlock: nil,
+ GrayGlacierBlock: nil,
+ TerminalTotalDifficulty: big.NewInt(0),
+ TerminalTotalDifficultyPassed: true,
+ MergeNetsplitBlock: nil,
+ ShanghaiTime: &shanghaiTime,
+ CancunTime: &cancunTime}
}
if cfg.Difficulty == nil {
cfg.Difficulty = new(big.Int)
@@ -108,7 +109,10 @@ func setDefaults(cfg *Config) {
if cfg.BlobBaseFee == nil {
cfg.BlobBaseFee = big.NewInt(params.BlobTxMinBlobGasprice)
}
- cfg.Random = &(common.Hash{})
+ // Merge indicators
+ if t := cfg.ChainConfig.ShanghaiTime; cfg.ChainConfig.TerminalTotalDifficultyPassed || (t != nil && *t == 0) {
+ cfg.Random = &(common.Hash{})
+ }
}
// Execute executes the code using the input as call data during the execution.
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 97234368ee0c..ce31f141c14f 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -934,7 +934,7 @@ func TestJSTracerCreateTx(t *testing.T) {
code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)}
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
- tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig)
+ tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil)
if err != nil {
t.Fatal(err)
}
diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go
index 16a785a18600..c18692c80448 100644
--- a/crypto/signature_nocgo.go
+++ b/crypto/signature_nocgo.go
@@ -88,7 +88,7 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
return nil, errors.New("invalid private key")
}
defer priv.Zero()
- sig := decred_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
+ sig := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
// Convert to Ethereum signature format with 'recovery id' v at the end.
v := sig[0] - 27
copy(sig, sig[1:])
@@ -157,11 +157,11 @@ func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
// S256 returns an instance of the secp256k1 curve.
func S256() EllipticCurve {
- return btCurve{secp256k1.S256()}
+ return btCurve{btcec.S256()}
}
type btCurve struct {
- *secp256k1.KoblitzCurve
+ *btcec.KoblitzCurve
}
// Marshal converts a point given as (x, y) into a byte slice.
diff --git a/eth/backend.go b/eth/backend.go
index ccfe650f41c6..a8f1c960a82e 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -199,7 +199,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
)
if config.VMTrace != "" {
- traceConfig := json.RawMessage("{}")
+ var traceConfig json.RawMessage
if config.VMTraceJsonConfig != "" {
traceConfig = json.RawMessage(config.VMTraceJsonConfig)
}
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index d9476214a51b..5b2b1f298580 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"math/big"
+ "strconv"
"sync"
"time"
@@ -92,7 +93,6 @@ var caps = []string{
"engine_getPayloadV2",
"engine_getPayloadV3",
"engine_getPayloadV4",
- "engine_getBlobsV1",
"engine_newPayloadV1",
"engine_newPayloadV2",
"engine_newPayloadV3",
@@ -386,7 +386,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
}
}
- // CHANGE(taiko): check whether --taiko flag is set.
+ // CHANGE(taiko): check whether `--taiko` flag is set.
isTaiko := api.eth.BlockChain().Config().Taiko
if rawdb.ReadCanonicalHash(api.eth.ChainDb(), block.NumberU64()) != update.HeadBlockHash {
@@ -469,7 +469,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
Version: payloadVersion,
}
id := args.Id()
- payload, err := api.eth.Miner().BuildPayload(args)
+ payload, err := api.eth.Miner().BuildPayload(args, false)
if err != nil {
log.Error("Failed to build payload", "err", err)
return valid(nil), engine.InvalidPayloadAttributes.With(err)
@@ -618,7 +618,7 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl
if params.Withdrawals != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
- return api.newPayload(params, nil, nil, nil, false)
+ return api.newPayload(params, nil, nil, false)
}
// NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
@@ -642,7 +642,7 @@ func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.Payl
if params.BlobGasUsed != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
- return api.newPayload(params, nil, nil, nil, false)
+ return api.newPayload(params, nil, nil, false)
}
// NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
@@ -667,11 +667,12 @@ func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHas
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 must only be called for cancun payloads"))
}
- return api.newPayload(params, versionedHashes, beaconRoot, nil, false)
+ return api.newPayload(params, versionedHashes, beaconRoot, false)
}
// NewPayloadV4 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
-func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) {
+// NewPayloadV4 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
+func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
@@ -681,6 +682,9 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
if params.BlobGasUsed == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
+ if params.Deposits == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
+ }
if versionedHashes == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
@@ -688,15 +692,11 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
if beaconRoot == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
- if executionRequests == nil {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague"))
- }
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV4 must only be called for prague payloads"))
}
- requests := convertRequests(executionRequests)
- return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
+ return api.newPayload(params, versionedHashes, beaconRoot, false)
}
// NewPayloadWithWitnessV1 is analogous to NewPayloadV1, only it also generates
@@ -705,7 +705,7 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV1(params engine.ExecutableData) (
if params.Withdrawals != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
- return api.newPayload(params, nil, nil, nil, true)
+ return api.newPayload(params, nil, nil, true)
}
// NewPayloadWithWitnessV2 is analogous to NewPayloadV2, only it also generates
@@ -729,7 +729,7 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV2(params engine.ExecutableData) (
if params.BlobGasUsed != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
- return api.newPayload(params, nil, nil, nil, true)
+ return api.newPayload(params, nil, nil, true)
}
// NewPayloadWithWitnessV3 is analogous to NewPayloadV3, only it also generates
@@ -755,12 +755,12 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV3(params engine.ExecutableData, v
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV3 must only be called for cancun payloads"))
}
- return api.newPayload(params, versionedHashes, beaconRoot, nil, true)
+ return api.newPayload(params, versionedHashes, beaconRoot, true)
}
// NewPayloadWithWitnessV4 is analogous to NewPayloadV4, only it also generates
// and returns a stateless witness after running the payload.
-func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) {
+func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
@@ -770,6 +770,9 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, v
if params.BlobGasUsed == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
+ if params.Deposits == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
+ }
if versionedHashes == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
@@ -777,15 +780,11 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, v
if beaconRoot == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
- if executionRequests == nil {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague"))
- }
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV4 must only be called for prague payloads"))
}
- requests := convertRequests(executionRequests)
- return api.newPayload(params, versionedHashes, beaconRoot, requests, true)
+ return api.newPayload(params, versionedHashes, beaconRoot, true)
}
// ExecuteStatelessPayloadV1 is analogous to NewPayloadV1, only it operates in
@@ -794,7 +793,7 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV1(params engine.ExecutableData,
if params.Withdrawals != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
- return api.executeStatelessPayload(params, nil, nil, nil, opaqueWitness)
+ return api.executeStatelessPayload(params, nil, nil, opaqueWitness)
}
// ExecuteStatelessPayloadV2 is analogous to NewPayloadV2, only it operates in
@@ -818,7 +817,7 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV2(params engine.ExecutableData,
if params.BlobGasUsed != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
- return api.executeStatelessPayload(params, nil, nil, nil, opaqueWitness)
+ return api.executeStatelessPayload(params, nil, nil, opaqueWitness)
}
// ExecuteStatelessPayloadV3 is analogous to NewPayloadV3, only it operates in
@@ -844,12 +843,12 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV3(params engine.ExecutableData,
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("executeStatelessPayloadV3 must only be called for cancun payloads"))
}
- return api.executeStatelessPayload(params, versionedHashes, beaconRoot, nil, opaqueWitness)
+ return api.executeStatelessPayload(params, versionedHashes, beaconRoot, opaqueWitness)
}
// ExecuteStatelessPayloadV4 is analogous to NewPayloadV4, only it operates in
// a stateless mode on top of a provided witness instead of the local database.
-func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
+func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
@@ -859,6 +858,9 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData,
if params.BlobGasUsed == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
+ if params.Deposits == nil {
+ return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
+ }
if versionedHashes == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
@@ -866,18 +868,14 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData,
if beaconRoot == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
- if executionRequests == nil {
- return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague"))
- }
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("executeStatelessPayloadV4 must only be called for prague payloads"))
}
- requests := convertRequests(executionRequests)
- return api.executeStatelessPayload(params, versionedHashes, beaconRoot, requests, opaqueWitness)
+ return api.executeStatelessPayload(params, versionedHashes, beaconRoot, opaqueWitness)
}
-func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (engine.PayloadStatusV1, error) {
+func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, witness bool) (engine.PayloadStatusV1, error) {
// The locking here is, strictly, not required. Without these locks, this can happen:
//
// 1. NewPayload( execdata-N ) is invoked from the CL. It goes all the way down to
@@ -923,10 +921,38 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
} else {
block, err = engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot)
if err != nil {
- log.Debug("Invalid NewPayload params", "params", params, "error", err)
- return engine.PayloadStatusV1{Status: engine.INVALID}, nil
+ bgu := "nil"
+ if params.BlobGasUsed != nil {
+ bgu = strconv.Itoa(int(*params.BlobGasUsed))
+ }
+ ebg := "nil"
+ if params.ExcessBlobGas != nil {
+ ebg = strconv.Itoa(int(*params.ExcessBlobGas))
+ }
+ log.Warn("Invalid NewPayload params",
+ "params.Number", params.Number,
+ "params.ParentHash", params.ParentHash,
+ "params.BlockHash", params.BlockHash,
+ "params.StateRoot", params.StateRoot,
+ "params.FeeRecipient", params.FeeRecipient,
+ "params.LogsBloom", common.PrettyBytes(params.LogsBloom),
+ "params.Random", params.Random,
+ "params.GasLimit", params.GasLimit,
+ "params.GasUsed", params.GasUsed,
+ "params.Timestamp", params.Timestamp,
+ "params.ExtraData", common.PrettyBytes(params.ExtraData),
+ "params.BaseFeePerGas", params.BaseFeePerGas,
+ "params.BlobGasUsed", bgu,
+ "params.ExcessBlobGas", ebg,
+ "len(params.Transactions)", len(params.Transactions),
+ "len(params.Withdrawals)", len(params.Withdrawals),
+ "len(params.Deposits)", len(params.Deposits),
+ "beaconRoot", beaconRoot,
+ "error", err)
+ return api.invalid(err, nil), nil
}
}
+
// Stash away the last update to warn the user if the beacon client goes offline
api.lastNewPayloadLock.Lock()
api.lastNewPayloadUpdate = time.Now()
@@ -1016,9 +1042,10 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
return engine.PayloadStatusV1{Status: engine.VALID, Witness: ow, LatestValidHash: &hash}, nil
}
-func (api *ConsensusAPI) executeStatelessPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
+func (api *ConsensusAPI) executeStatelessPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
log.Trace("Engine API request received", "method", "ExecuteStatelessPayload", "number", params.Number, "hash", params.BlockHash)
- block, err := engine.ExecutableDataToBlockNoHash(params, versionedHashes, beaconRoot, requests)
+
+ block, err := engine.ExecutableDataToBlockNoHash(params, versionedHashes, beaconRoot)
if err != nil {
bgu := "nil"
if params.BlobGasUsed != nil {
@@ -1045,8 +1072,8 @@ func (api *ConsensusAPI) executeStatelessPayload(params engine.ExecutableData, v
"params.ExcessBlobGas", ebg,
"len(params.Transactions)", len(params.Transactions),
"len(params.Withdrawals)", len(params.Withdrawals),
+ "len(params.Deposits)", len(params.Deposits),
"beaconRoot", beaconRoot,
- "len(requests)", len(requests),
"error", err)
errorMsg := err.Error()
return engine.StatelessPayloadStatusV1{Status: engine.INVALID, ValidationError: &errorMsg}, nil
@@ -1273,7 +1300,13 @@ func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engin
bodies := make([]*engine.ExecutionPayloadBody, len(hashes))
for i, hash := range hashes {
block := api.eth.BlockChain().GetBlockByHash(hash)
- bodies[i] = getBody(block)
+ body := getBody(block)
+ if body != nil {
+ // Nil out the V2 values, clients should know to not request V1 objects
+ // after Prague.
+ body.Deposits = nil
+ }
+ bodies[i] = body
}
return bodies
}
@@ -1292,7 +1325,18 @@ func (api *ConsensusAPI) GetPayloadBodiesByHashV2(hashes []common.Hash) []*engin
// GetPayloadBodiesByRangeV1 implements engine_getPayloadBodiesByRangeV1 which allows for retrieval of a range
// of block bodies by the engine api.
func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64) ([]*engine.ExecutionPayloadBody, error) {
- return api.getBodiesByRange(start, count)
+ bodies, err := api.getBodiesByRange(start, count)
+ if err != nil {
+ return nil, err
+ }
+ // Nil out the V2 values, clients should know to not request V1 objects
+ // after Prague.
+ for i := range bodies {
+ if bodies[i] != nil {
+ bodies[i].Deposits = nil
+ }
+ }
+ return bodies, nil
}
// GetPayloadBodiesByRangeV2 implements engine_getPayloadBodiesByRangeV1 which allows for retrieval of a range
@@ -1327,7 +1371,12 @@ func getBody(block *types.Block) *engine.ExecutionPayloadBody {
return nil
}
- var result engine.ExecutionPayloadBody
+ var (
+ body = block.Body()
+ txs = make([]hexutil.Bytes, len(body.Transactions))
+ withdrawals = body.Withdrawals
+ depositRequests types.Deposits
+ )
result.TransactionData = make([]hexutil.Bytes, len(block.Transactions()))
for j, tx := range block.Transactions() {
@@ -1340,17 +1389,20 @@ func getBody(block *types.Block) *engine.ExecutionPayloadBody {
result.Withdrawals = []*types.Withdrawal{}
}
- return &result
-}
-
-// convertRequests converts a hex requests slice to plain [][]byte.
-func convertRequests(hex []hexutil.Bytes) [][]byte {
- if hex == nil {
- return nil
+ if block.Header().RequestsHash != nil {
+ // TODO: this isn't future proof because we can't determine if a request
+ // type has activated yet or if there are just no requests of that type from
+ // only the block.
+ for _, req := range block.Requests() {
+ if d, ok := req.Inner().(*types.Deposit); ok {
+ depositRequests = append(depositRequests, d)
+ }
+ }
}
- req := make([][]byte, len(hex))
- for i := range hex {
- req[i] = hex[i]
+
+ return &engine.ExecutionPayloadBody{
+ TransactionData: txs,
+ Withdrawals: withdrawals,
+ Deposits: depositRequests,
}
- return req
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index 3ac719c23ee1..033185efb6d6 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -452,6 +452,7 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block)
}
mcfg := miner.DefaultConfig
+ mcfg.PendingFeeRecipient = testAddr
ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256, Miner: mcfg}
ethservice, err := eth.New(n, ethcfg)
if err != nil {
@@ -503,8 +504,8 @@ func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.He
h = &beaconRoots[i]
}
- envelope := getNewEnvelope(t, api, parent, w, h)
- execResp, err := api.newPayload(*envelope.ExecutionPayload, []common.Hash{}, h, envelope.Requests, false)
+ payload := getNewPayload(t, api, parent, w, h)
+ execResp, err := api.newPayload(*payload, []common.Hash{}, h, false)
if err != nil {
t.Fatalf("can't execute payload: %v", err)
}
@@ -756,7 +757,7 @@ func TestEmptyBlocks(t *testing.T) {
}
}
-func getNewEnvelope(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal, beaconRoot *common.Hash) *engine.ExecutionPayloadEnvelope {
+func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal, beaconRoot *common.Hash) *engine.ExecutableData {
params := engine.PayloadAttributes{
Timestamp: parent.Time + 1,
Random: crypto.Keccak256Hash([]byte{byte(1)}),
@@ -1558,7 +1559,7 @@ func TestGetBlockBodiesByRangeInvalidParams(t *testing.T) {
}
}
-func checkEqualBody(a *types.Body, b *engine.ExecutionPayloadBody) error {
+func equalBody(a *types.Body, b *engine.ExecutionPayloadBody) bool {
if a == nil && b == nil {
return nil
} else if a == nil || b == nil {
@@ -1573,10 +1574,23 @@ func checkEqualBody(a *types.Body, b *engine.ExecutionPayloadBody) error {
return fmt.Errorf("transaction %d mismatch", i)
}
}
+
if !reflect.DeepEqual(a.Withdrawals, b.Withdrawals) {
- return fmt.Errorf("withdrawals mismatch")
+ return false
+ }
+
+ var deposits types.Deposits
+ if a.Requests != nil {
+ // If requests is non-nil, it means deposits are available in block and we
+ // should return an empty slice instead of nil if there are no deposits.
+ deposits = make(types.Deposits, 0)
}
- return nil
+ for _, r := range a.Requests {
+ if d, ok := r.Inner().(*types.Deposit); ok {
+ deposits = append(deposits, d)
+ }
+ }
+ return reflect.DeepEqual(deposits, b.Deposits)
}
func TestBlockToPayloadWithBlobs(t *testing.T) {
@@ -1597,7 +1611,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) {
}
block := types.NewBlock(&header, &types.Body{Transactions: txs}, nil, trie.NewStackTrie(nil))
- envelope := engine.BlockToExecutableData(block, nil, sidecars, nil)
+ envelope := engine.BlockToExecutableData(block, nil, sidecars)
var want int
for _, tx := range txs {
want += len(tx.BlobHashes())
@@ -1701,7 +1715,7 @@ func TestParentBeaconBlockRoot(t *testing.T) {
}
func TestWitnessCreationAndConsumption(t *testing.T) {
- //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(colorable.NewColorableStderr(), log.LevelTrace, true)))
+ log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(colorable.NewColorableStderr(), log.LevelTrace, true)))
genesis, blocks := generateMergeChain(10, true)
@@ -1733,6 +1747,9 @@ func TestWitnessCreationAndConsumption(t *testing.T) {
if err != nil {
t.Fatalf("error preparing payload, err=%v", err)
}
+ // Give the payload some time to be built
+ time.Sleep(100 * time.Millisecond)
+
payloadID := (&miner.BuildPayloadArgs{
Parent: fcState.HeadBlockHash,
Timestamp: blockParams.Timestamp,
@@ -1742,7 +1759,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) {
BeaconRoot: blockParams.BeaconRoot,
Version: engine.PayloadV3,
}).Id()
- envelope, err := api.getPayload(payloadID, true)
+ envelope, err := api.GetPayloadV3(payloadID)
if err != nil {
t.Fatalf("error getting payload, err=%v", err)
}
diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go
index a24ff5210119..e7df22fc7c47 100644
--- a/eth/catalyst/simulated_beacon.go
+++ b/eth/catalyst/simulated_beacon.go
@@ -21,6 +21,7 @@ import (
"crypto/sha256"
"errors"
"fmt"
+ "math/big"
"sync"
"time"
@@ -218,8 +219,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal, timestamp u
}
}
// Mark the payload as canon
- _, err = c.engineAPI.newPayload(*payload, blobHashes, &common.Hash{}, envelope.Requests, false)
- if err != nil {
+ if _, err = c.engineAPI.NewPayloadV3(*payload, blobHashes, &common.Hash{}); err != nil {
return err
}
c.setCurrentState(payload.BlockHash, finalizedHash)
diff --git a/eth/catalyst/simulated_beacon_test.go b/eth/catalyst/simulated_beacon_test.go
index 7e9fd7b32453..152b374e2dbc 100644
--- a/eth/catalyst/simulated_beacon_test.go
+++ b/eth/catalyst/simulated_beacon_test.go
@@ -186,12 +186,11 @@ func TestOnDemandSpam(t *testing.T) {
)
for {
select {
- case ev := <-chainHeadCh:
- block := eth.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64())
- for _, itx := range block.Transactions() {
+ case evt := <-chainHeadCh:
+ for _, itx := range evt.Block.Transactions() {
includedTxs[itx.Hash()] = struct{}{}
}
- for _, iwx := range block.Withdrawals() {
+ for _, iwx := range evt.Block.Withdrawals() {
includedWxs = append(includedWxs, iwx.Index)
}
// ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 47c89bf768fa..0f81e152ef4d 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -230,6 +230,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
txsHashes = make([]common.Hash, len(bodies))
uncleHashes = make([]common.Hash, len(bodies))
withdrawalHashes = make([]common.Hash, len(bodies))
+ requestsHashes = make([]common.Hash, len(bodies))
)
hasher := trie.NewStackTrie(nil)
for i, body := range bodies {
@@ -248,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
res := ð.Response{
Req: req,
Res: (*eth.BlockBodiesResponse)(&bodies),
- Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
+ Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, requestsHashes},
Time: 1,
Done: make(chan error, 1), // Ignore the returned status
}
diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go
index 56359b33c94e..709df7757507 100644
--- a/eth/downloader/fetchers_concurrent_bodies.go
+++ b/eth/downloader/fetchers_concurrent_bodies.go
@@ -88,10 +88,10 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
- txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
- hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
+ txs, uncles, withdrawals, requests := packet.Res.(*eth.BlockBodiesResponse).Unpack()
+ hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes, requests hashes}
- accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
+ accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2], requests, hashsets[3])
switch {
case err == nil && len(txs) == 0:
peer.log.Trace("Requested bodies delivered")
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index a2f916ebbcc7..adad45020040 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -785,7 +785,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []comm
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
uncleLists [][]*types.Header, uncleListHashes []common.Hash,
withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash,
-) (int, error) {
+ requestsLists [][]*types.Request, requestsListHashes []common.Hash) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@@ -809,6 +809,19 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
return errInvalidBody
}
}
+ if header.RequestsHash == nil {
+ // nil hash means that requests should not be present in body
+ if requestsLists[index] != nil {
+ return errInvalidBody
+ }
+ } else { // non-nil hash: body must have requests
+ if requestsLists[index] == nil {
+ return errInvalidBody
+ }
+ if requestsListHashes[index] != *header.RequestsHash {
+ return errInvalidBody
+ }
+ }
// Blocks must have a number of blobs corresponding to the header gas usage,
// and zero before the Cancun hardfork.
var blobs int
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index 857ac4813a7d..e29d23f80b7a 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -341,7 +341,7 @@ func XTestDelivery(t *testing.T) {
uncleHashes[i] = types.CalcUncleHash(uncles)
}
time.Sleep(100 * time.Millisecond)
- _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil)
+ _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil, nil, nil)
if err != nil {
fmt.Printf("delivered %d bodies %v\n", len(txset), err)
}
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index bd3b6f274d0d..21ba2475f8b4 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -139,6 +139,9 @@ type Config struct {
VMTrace string
VMTraceJsonConfig string
+ // Miscellaneous options
+ DocRoot string `toml:"-"`
+
// RPCGasCap is the global gas cap for eth-call variants.
RPCGasCap uint64
@@ -164,7 +167,12 @@ func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (conse
if config.Taiko {
return taiko.New(config), nil
}
- // If proof-of-authority is requested, set it up
+ // Geth v1.14.0 dropped support for non-merged networks in any consensus
+ // mode. If such a network is requested, reject startup.
+ if !config.TerminalTotalDifficultyPassed {
+ return nil, errors.New("only PoS networks are supported, please transition old ones with Geth v1.13.x")
+ }
+ // Wrap previously supported consensus engines into their post-merge counterpart
if config.Clique != nil {
return beacon.New(clique.New(config.Clique, db)), nil
}
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 0ec0eaddebb4..d96ba0ccb734 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -46,6 +46,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
EnablePreimageRecording bool
VMTrace string
VMTraceJsonConfig string
+ DocRoot string `toml:"-"`
RPCGasCap uint64
RPCEVMTimeout time.Duration
RPCTxFeeCap float64
@@ -82,6 +83,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.VMTrace = c.VMTrace
enc.VMTraceJsonConfig = c.VMTraceJsonConfig
+ enc.DocRoot = c.DocRoot
enc.RPCGasCap = c.RPCGasCap
enc.RPCEVMTimeout = c.RPCEVMTimeout
enc.RPCTxFeeCap = c.RPCTxFeeCap
@@ -122,6 +124,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
EnablePreimageRecording *bool
VMTrace *string
VMTraceJsonConfig *string
+ DocRoot *string `toml:"-"`
RPCGasCap *uint64
RPCEVMTimeout *time.Duration
RPCTxFeeCap *float64
@@ -219,6 +222,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.VMTraceJsonConfig != nil {
c.VMTraceJsonConfig = *dec.VMTraceJsonConfig
}
+ if dec.DocRoot != nil {
+ c.DocRoot = *dec.DocRoot
+ }
if dec.RPCGasCap != nil {
c.RPCGasCap = *dec.RPCGasCap
}
diff --git a/eth/handler.go b/eth/handler.go
index b28081eef0ec..ea3f9a2e8fe7 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -40,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
)
const (
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index 55f7da87dde0..c41c9abc267f 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -390,6 +390,8 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
}
// Interconnect all the sink handlers with the source handler
for i, sink := range sinks {
+ sink := sink // Closure for goroutine below
+
sourcePipe, sinkPipe := p2p.MsgPipe()
defer sourcePipe.Close()
defer sinkPipe.Close()
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index b3886270f3dd..951352319ffc 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -316,6 +316,7 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
+ requestsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
for i, body := range res.BlockBodiesResponse {
@@ -324,8 +325,11 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
if body.Withdrawals != nil {
withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher)
}
+ if body.Requests != nil {
+ requestsHashes[i] = types.DeriveSha(types.Requests(body.Requests), hasher)
+ }
}
- return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}
+ return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, requestsHashes}
}
return peer.dispatchResponse(&Response{
id: res.RequestId,
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index aeef4330ff4e..cbc895eabb8e 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -224,20 +224,22 @@ type BlockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
Uncles []*types.Header // Uncles contained within a block
Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block
+ Requests []*types.Request `rlp:"optional"` // Requests contained within a block
}
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
-func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
+func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal, [][]*types.Request) {
var (
txset = make([][]*types.Transaction, len(*p))
uncleset = make([][]*types.Header, len(*p))
withdrawalset = make([][]*types.Withdrawal, len(*p))
+ requestset = make([][]*types.Request, len(*p))
)
for i, body := range *p {
- txset[i], uncleset[i], withdrawalset[i] = body.Transactions, body.Uncles, body.Withdrawals
+ txset[i], uncleset[i], withdrawalset[i], requestset[i] = body.Transactions, body.Uncles, body.Withdrawals, body.Requests
}
- return txset, uncleset, withdrawalset
+ return txset, uncleset, withdrawalset, requestset
}
// GetReceiptsRequest represents a block receipts query.
diff --git a/eth/protocols/snap/gentrie.go b/eth/protocols/snap/gentrie.go
index 8ef1a007530e..5126d26777c8 100644
--- a/eth/protocols/snap/gentrie.go
+++ b/eth/protocols/snap/gentrie.go
@@ -31,6 +31,9 @@ type genTrie interface {
// update inserts the state item into generator trie.
update(key, value []byte) error
+ // delete removes the state item from the generator trie.
+ delete(key []byte) error
+
// commit flushes the right boundary nodes if complete flag is true. This
// function must be called before flushing the associated database batch.
commit(complete bool) common.Hash
@@ -113,7 +116,7 @@ func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) {
// removed because it's a sibling of the nodes we want to commit, not
// the parent or ancestor.
for i := 0; i < len(path); i++ {
- t.delete(path[:i], false)
+ t.deleteNode(path[:i], false)
}
}
return
@@ -132,11 +135,11 @@ func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) {
//
// The extension node is detected if its path is the prefix of last committed
// one and path gap is larger than one. If the path gap is only one byte,
- // the current node could either be a full node, or a extension with single
+ // the current node could either be a full node, or an extension with single
// byte key. In either case, no gaps will be left in the path.
if t.last != nil && bytes.HasPrefix(t.last, path) && len(t.last)-len(path) > 1 {
for i := len(path) + 1; i < len(t.last); i++ {
- t.delete(t.last[:i], true)
+ t.deleteNode(t.last[:i], true)
}
}
t.write(path, blob)
@@ -164,7 +167,7 @@ func (t *pathTrie) deleteAccountNode(path []byte, inner bool) {
} else {
accountOuterLookupGauge.Inc(1)
}
- if !rawdb.ExistsAccountTrieNode(t.db, path) {
+ if !rawdb.HasAccountTrieNode(t.db, path) {
return
}
if inner {
@@ -181,7 +184,7 @@ func (t *pathTrie) deleteStorageNode(path []byte, inner bool) {
} else {
storageOuterLookupGauge.Inc(1)
}
- if !rawdb.ExistsStorageTrieNode(t.db, t.owner, path) {
+ if !rawdb.HasStorageTrieNode(t.db, t.owner, path) {
return
}
if inner {
@@ -192,8 +195,8 @@ func (t *pathTrie) deleteStorageNode(path []byte, inner bool) {
rawdb.DeleteStorageTrieNode(t.batch, t.owner, path)
}
-// delete commits the node deletion to provided database batch in path mode.
-func (t *pathTrie) delete(path []byte, inner bool) {
+// deleteNode commits the node deletion to provided database batch in path mode.
+func (t *pathTrie) deleteNode(path []byte, inner bool) {
if t.owner == (common.Hash{}) {
t.deleteAccountNode(path, inner)
} else {
@@ -207,6 +210,34 @@ func (t *pathTrie) update(key, value []byte) error {
return t.tr.Update(key, value)
}
+// delete implements genTrie interface, deleting the item from the stack trie.
+func (t *pathTrie) delete(key []byte) error {
+ // Commit the trie since the right boundary is incomplete because
+ // of the deleted item. This will implicitly discard the last inserted
+ // item and clean some ancestor trie nodes of the last committed
+ // item in the database.
+ t.commit(false)
+
+ // Reset the trie and all the internal trackers
+ t.first = nil
+ t.last = nil
+ t.tr.Reset()
+
+ // Explicitly mark the left boundary as incomplete, as the left-side
+ // item of the next one has been deleted. Be aware that the next item
+ // to be inserted will be ignored from committing as well as it's on
+ // the left boundary.
+ t.skipLeftBoundary = true
+
+ // Explicitly delete the potential leftover nodes on the specific
+ // path from the database.
+ tkey := t.tr.TrieKey(key)
+ for i := 0; i <= len(tkey); i++ {
+ t.deleteNode(tkey[:i], false)
+ }
+ return nil
+}
+
// commit implements genTrie interface, flushing the right boundary if it's
// considered as complete. Otherwise, the nodes on the right boundary are
// discarded and cleaned up.
@@ -255,7 +286,7 @@ func (t *pathTrie) commit(complete bool) common.Hash {
// with no issues as they are actually complete. Also, from a database
// perspective, first deleting and then rewriting is a valid data update.
for i := 0; i < len(t.last); i++ {
- t.delete(t.last[:i], false)
+ t.deleteNode(t.last[:i], false)
}
return common.Hash{} // the hash is meaningless for incomplete commit
}
@@ -278,6 +309,9 @@ func (t *hashTrie) update(key, value []byte) error {
return t.tr.Update(key, value)
}
+// delete implements genTrie interface, ignoring the state item for deleting.
+func (t *hashTrie) delete(key []byte) error { return nil }
+
// commit implements genTrie interface, committing the nodes on right boundary.
func (t *hashTrie) commit(complete bool) common.Hash {
if !complete {
diff --git a/eth/protocols/snap/gentrie_test.go b/eth/protocols/snap/gentrie_test.go
index 1fb2dbce7568..2da4f3c866e6 100644
--- a/eth/protocols/snap/gentrie_test.go
+++ b/eth/protocols/snap/gentrie_test.go
@@ -551,3 +551,145 @@ func TestTinyPartialTree(t *testing.T) {
}
}
}
+
+func TestTrieDelete(t *testing.T) {
+ var entries []*kv
+ for i := 0; i < 1024; i++ {
+ entries = append(entries, &kv{
+ k: testrand.Bytes(32),
+ v: testrand.Bytes(32),
+ })
+ }
+ slices.SortFunc(entries, (*kv).cmp)
+
+ nodes := make(map[string]common.Hash)
+ tr := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
+ nodes[string(path)] = hash
+ })
+ for i := 0; i < len(entries); i++ {
+ tr.Update(entries[i].k, entries[i].v)
+ }
+ tr.Hash()
+
+ check := func(index []int) {
+ var (
+ db = rawdb.NewMemoryDatabase()
+ batch = db.NewBatch()
+ marks = map[int]struct{}{}
+ neighbors = map[int]struct{}{}
+ )
+ for _, n := range index {
+ marks[n] = struct{}{}
+ }
+ for _, n := range index {
+ if n != 0 {
+ if _, ok := marks[n-1]; !ok {
+ neighbors[n-1] = struct{}{}
+ }
+ }
+ if n != len(entries)-1 {
+ if _, ok := neighbors[n+1]; !ok {
+ neighbors[n+1] = struct{}{}
+ }
+ }
+ }
+ // Write the junk nodes as the dangling
+ var injects []string
+ for _, n := range index {
+ nibbles := byteToHex(entries[n].k)
+ for i := 0; i <= len(nibbles); i++ {
+ injects = append(injects, string(nibbles[:i]))
+ }
+ }
+ for _, path := range injects {
+ rawdb.WriteAccountTrieNode(db, []byte(path), testrand.Bytes(32))
+ }
+ tr := newPathTrie(common.Hash{}, false, db, batch)
+ for i := 0; i < len(entries); i++ {
+ if _, ok := marks[i]; ok {
+ tr.delete(entries[i].k)
+ } else {
+ tr.update(entries[i].k, entries[i].v)
+ }
+ }
+ tr.commit(true)
+
+ r := newBatchReplay()
+ batch.Replay(r)
+ batch.Write()
+
+ for _, path := range injects {
+ if rawdb.HasAccountTrieNode(db, []byte(path)) {
+ t.Fatalf("Unexpected leftover node %v", []byte(path))
+ }
+ }
+
+ // ensure all the written nodes match with the complete tree
+ set := make(map[string]common.Hash)
+ for path, hash := range r.modifies() {
+ if hash == (common.Hash{}) {
+ continue
+ }
+ n, ok := nodes[path]
+ if !ok {
+ t.Fatalf("Unexpected trie node: %v", []byte(path))
+ }
+ if n != hash {
+ t.Fatalf("Unexpected trie node content: %v, want: %x, got: %x", []byte(path), n, hash)
+ }
+ set[path] = hash
+ }
+
+ // ensure all the missing nodes either on the deleted path, or
+ // on the neighbor paths.
+ isMissing := func(path []byte) bool {
+ for n := range marks {
+ key := byteToHex(entries[n].k)
+ if bytes.HasPrefix(key, path) {
+ return true
+ }
+ }
+ for n := range neighbors {
+ key := byteToHex(entries[n].k)
+ if bytes.HasPrefix(key, path) {
+ return true
+ }
+ }
+ return false
+ }
+ for path := range nodes {
+ if _, ok := set[path]; ok {
+ continue
+ }
+ if !isMissing([]byte(path)) {
+ t.Fatalf("Missing node %v", []byte(path))
+ }
+ }
+ }
+ var cases = []struct {
+ index []int
+ }{
+ // delete the first
+ {[]int{0}},
+
+ // delete the last
+ {[]int{len(entries) - 1}},
+
+ // delete the first two
+ {[]int{0, 1}},
+
+ // delete the last two
+ {[]int{len(entries) - 2, len(entries) - 1}},
+
+ {[]int{
+ 0, 2, 4, 6,
+ len(entries) - 1,
+ len(entries) - 3,
+ len(entries) - 5,
+ len(entries) - 7,
+ }},
+ }
+ for _, c := range cases {
+ check(c.index)
+ }
+}
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 4f309a20f6e4..9e079f540f07 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -2427,6 +2427,13 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
panic(err) // Really shouldn't ever happen
}
task.genTrie.update(hash[:], full)
+ } else {
+ // If the storage task is incomplete, explicitly delete the corresponding
+ // account item from the account trie to ensure that all nodes along the
+ // path to the incomplete storage trie are cleaned up.
+ if err := task.genTrie.delete(hash[:]); err != nil {
+ panic(err) // Really shouldn't ever happen
+ }
}
}
// Flush anything written just now and update the stats
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index d318077d99a8..cca0bcc860e1 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -1515,7 +1515,7 @@ func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv)
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes := accTrie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), triedb.NewStateSet())
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1577,7 +1577,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes := accTrie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), triedb.NewStateSet())
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index a2733bfc61f3..b9ec92fcb89d 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -615,17 +615,6 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
return nil, err
}
defer release()
-
- blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
- if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
- vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
- core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
- }
- if api.backend.ChainConfig().IsPrague(block.Number(), block.Time()) {
- vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
- core.ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
- }
-
// JS tracers have high overhead. In this case run a parallel
// process that generates states in one thread and traces txes
// in separate worker threads.
@@ -638,9 +627,18 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
var (
txs = block.Transactions()
blockHash = block.Hash()
+ blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time())
results = make([]*txTraceResult, len(txs))
)
+ if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
+ vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
+ core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
+ }
+ if api.backend.ChainConfig().IsPrague(block.Number(), block.Time()) {
+ vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
+ core.ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
+ }
for i, tx := range txs {
if i == 0 && api.backend.ChainConfig().Taiko {
if err := tx.MarkAsAnchor(); err != nil {
@@ -1041,13 +1039,14 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
Stop: logger.Stop,
}
} else {
- tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig, api.backend.ChainConfig())
+ tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig)
if err != nil {
return nil, err
}
}
// The actual TxContext will be created as part of ApplyTransactionWithEVM.
vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: message.GasPrice, BlobFeeCap: message.BlobGasFeeCap}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true})
+ statedb.SetLogger(tracer.Hooks)
// Define a meaningful timeout of a single transaction trace
if config.Timeout != nil {
diff --git a/eth/tracers/dir.go b/eth/tracers/dir.go
index 55bcb44d23ad..99324bb7f91b 100644
--- a/eth/tracers/dir.go
+++ b/eth/tracers/dir.go
@@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
- "github.com/ethereum/go-ethereum/params"
)
// Context contains some contextual infos for a transaction execution that is not
@@ -45,8 +44,8 @@ type Tracer struct {
Stop func(err error)
}
-type ctorFn func(*Context, json.RawMessage, *params.ChainConfig) (*Tracer, error)
-type jsCtorFn func(string, *Context, json.RawMessage, *params.ChainConfig) (*Tracer, error)
+type ctorFn func(*Context, json.RawMessage) (*Tracer, error)
+type jsCtorFn func(string, *Context, json.RawMessage) (*Tracer, error)
type elem struct {
ctor ctorFn
@@ -79,10 +78,7 @@ func (d *directory) RegisterJSEval(f jsCtorFn) {
// New returns a new instance of a tracer, by iterating through the
// registered lookups. Name is either name of an existing tracer
// or an arbitrary JS code.
-func (d *directory) New(name string, ctx *Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*Tracer, error) {
- if len(cfg) == 0 {
- cfg = json.RawMessage("{}")
- }
+func (d *directory) New(name string, ctx *Context, cfg json.RawMessage) (*Tracer, error) {
if elem, ok := d.elems[name]; ok {
return elem.ctor(ctx, cfg, chainConfig)
}
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index cb635e7127e8..dc402b56dd65 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -116,7 +116,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
var (
signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
context = test.Context.toBlockContext(test.Genesis)
- st = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ state = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
st.Close()
@@ -124,15 +124,13 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
- logState := vm.StateDB(st.StateDB)
- if tracer.Hooks != nil {
- logState = state.NewHookedState(st.StateDB, tracer.Hooks)
- }
+
+ state.StateDB.SetLogger(tracer.Hooks)
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
- evm := vm.NewEVM(context, core.NewEVMTxContext(msg), logState, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
+ evm := vm.NewEVM(context, core.NewEVMTxContext(msg), state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil {
@@ -258,7 +256,7 @@ func TestInternals(t *testing.T) {
}
)
mkTracer := func(name string, cfg json.RawMessage) *tracers.Tracer {
- tr, err := tracers.DefaultDirectory.New(name, nil, cfg, config)
+ tr, err := tracers.DefaultDirectory.New(name, nil, cfg)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
@@ -352,13 +350,8 @@ func TestInternals(t *testing.T) {
Balance: big.NewInt(500000000000000),
},
}, false, rawdb.HashScheme)
- defer st.Close()
-
- logState := vm.StateDB(st.StateDB)
- if hooks := tc.tracer.Hooks; hooks != nil {
- logState = state.NewHookedState(st.StateDB, hooks)
- }
-
+ defer state.Close()
+ state.StateDB.SetLogger(tc.tracer.Hooks)
tx, err := types.SignNewTx(key, signer, &types.LegacyTx{
To: &to,
Value: big.NewInt(0),
@@ -372,7 +365,7 @@ func TestInternals(t *testing.T) {
Origin: origin,
GasPrice: tx.GasPrice(),
}
- evm := vm.NewEVM(context, txContext, logState, config, vm.Config{Tracer: tc.tracer.Hooks})
+ evm := vm.NewEVM(context, txContext, state.StateDB, config, vm.Config{Tracer: tc.tracer.Hooks})
msg, err := core.TransactionToMessage(tx, signer, big.NewInt(0))
if err != nil {
t.Fatalf("test %v: failed to create message: %v", tc.name, err)
diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go
index 0ec3c367bc5b..7a6e1751e87d 100644
--- a/eth/tracers/internal/tracetest/flat_calltrace_test.go
+++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go
@@ -94,6 +94,7 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
return fmt.Errorf("failed to create call tracer: %v", err)
}
+ state.StateDB.SetLogger(tracer.Hooks)
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
if err != nil {
return fmt.Errorf("failed to prepare transaction for tracing: %v", err)
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index c6cf10a48346..90f59225dfd0 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -102,6 +102,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
t.Fatalf("failed to create call tracer: %v", err)
}
+ state.StateDB.SetLogger(tracer.Hooks)
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
diff --git a/eth/tracers/internal/tracetest/supply_test.go b/eth/tracers/internal/tracetest/supply_test.go
index 2391add91b95..5c11b5e47296 100644
--- a/eth/tracers/internal/tracetest/supply_test.go
+++ b/eth/tracers/internal/tracetest/supply_test.go
@@ -86,7 +86,7 @@ func TestSupplyOmittedFields(t *testing.T) {
expected := supplyInfo{
Number: 0,
- Hash: common.HexToHash("0xc02ee8ee5b54a40e43f0fa827d431e1bd4f217e941790dda10b2521d1925a20b"),
+ Hash: common.HexToHash("0x52f276d96f0afaaf2c3cb358868bdc2779c4b0cb8de3e7e5302e247c0b66a703"),
ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
}
actual := out[expected.Number]
@@ -597,7 +597,6 @@ func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockG
}
func compareAsJSON(t *testing.T, expected interface{}, actual interface{}) {
- t.Helper()
want, err := json.Marshal(expected)
if err != nil {
t.Fatalf("failed to marshal expected value to JSON: %v", err)
@@ -609,6 +608,6 @@ func compareAsJSON(t *testing.T, expected interface{}, actual interface{}) {
}
if !bytes.Equal(want, have) {
- t.Fatalf("incorrect supply info:\nwant %s\nhave %s", string(want), string(have))
+ t.Fatalf("incorrect supply info: expected %s, got %s", string(want), string(have))
}
}
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json
index 05da3b42e194..b974151c1b36 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json
@@ -41,7 +41,8 @@
"grayGlacierBlock": 0,
"shanghaiTime": 0,
"cancunTime": 0,
- "terminalTotalDifficulty": 0
+ "terminalTotalDifficulty": 0,
+ "terminalTotalDifficultyPassed": true
}
},
"context": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json
index a098b5702953..0dbffe9cc058 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json
@@ -67,7 +67,8 @@
"transactionPosition": 74,
"transactionHash": "0x5ef60b27ac971c22a7d484e546e50093ca62300c8986d165154e47773764b6a4",
"blockNumber": 1555279,
- "blockHash": "0xd6c98d1b87dfa92a210d99bad2873adaf0c9e51fe43addc63fd9cca03a5c6f46"
+ "blockHash": "0xd6c98d1b87dfa92a210d99bad2873adaf0c9e51fe43addc63fd9cca03a5c6f46",
+ "time": "209.346µs"
},
{
"action": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json
index 41199e90e398..da9e906c0ab9 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json
@@ -73,11 +73,11 @@
"transactionPosition": 26,
"transactionHash": "0xcb1090fa85d2a3da8326b75333e92b3dca89963c895d9c981bfdaa64643135e4",
"blockNumber": 839247,
- "blockHash": "0xce7ff7d84ca97f0f89d6065e2c12409a795c9f607cdb14aef0713cad5d7e311c"
+ "blockHash": "0xce7ff7d84ca97f0f89d6065e2c12409a795c9f607cdb14aef0713cad5d7e311c",
+ "time": "182.267µs"
},
{
"action": {
- "creationMethod": "create",
"from": "0x76554b33410b6d90b7dc889bfed0451ad195f27e",
"gas": "0x25a18",
"init": "0x0000000000000000000000000000000000000000000000000000000000000000",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json
index d7b4a22cf5b2..812dd5f8d9ce 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json
@@ -83,6 +83,19 @@
"traceAddress": [0],
"type": "create"
},
+ {
+ "action": {
+ "from": "0x1d99a1a3efa9181f540f9e24fa6e4e08eb7844ca",
+ "gas": "0x50ac",
+ "init": "0x5a",
+ "value": "0x1"
+ },
+ "error": "insufficient balance for transfer",
+ "result": {},
+ "subtraces": 0,
+ "traceAddress": [0],
+ "type": "create"
+ },
{
"type": "suicide",
"action": {
@@ -91,7 +104,9 @@
"balance": "0x0"
},
"result": null,
- "traceAddress": [1],
+ "traceAddress": [
+ 1
+ ],
"subtraces": 0,
"transactionPosition": 14,
"transactionHash": "0xdd76f02407e2f8329303ba688e111cae4f7008ad0d14d6e42c5698424ea36d79",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json
index 6e020fe2b744..b11e3e779768 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json
@@ -63,11 +63,11 @@
"transactionPosition": 16,
"transactionHash": "0x384487e5ae8d2997aece8e28403d393cb9752425e6de358891bed981c5af1c05",
"blockNumber": 1555285,
- "blockHash": "0x93231d8e9662adb4c5c703583a92c7b3112cd5448f43ab4fa1f0f00a0183ed3f"
+ "blockHash": "0x93231d8e9662adb4c5c703583a92c7b3112cd5448f43ab4fa1f0f00a0183ed3f",
+ "time": "665.278µs"
},
{
"action": {
- "creationMethod": "create",
"from": "0xf84bf5189ccd19f5897739756d214fa0dc099e0d",
"gas": "0x1d5c",
"init": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json
index e562affb1576..049f24d9328f 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json
@@ -60,6 +60,7 @@
"grayGlacierBlock": 15050000,
"shanghaiTime": 1681338455,
"terminalTotalDifficulty": 7797655526461000,
+ "terminalTotalDifficultyPassed": true,
"ethash": {}
}
},
@@ -185,4 +186,4 @@
"value": "0x0",
"type": "CREATE"
}
-}
+}
\ No newline at end of file
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json
index f8adbabf6377..444eba450bec 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json
@@ -41,7 +41,8 @@
"grayGlacierBlock": 0,
"shanghaiTime": 0,
"cancunTime": 0,
- "terminalTotalDifficulty": 0
+ "terminalTotalDifficulty": 0,
+ "terminalTotalDifficultyPassed": true
}
},
"context": {
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_create.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_create.json
index 489a1ae6b538..b013d520c1ad 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_create.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_create.json
@@ -38,6 +38,7 @@
"grayGlacierBlock": 0,
"shanghaiTime": 0,
"terminalTotalDifficulty": 0,
+ "terminalTotalDifficultyPassed": true,
"isDev": true
}
},
@@ -58,4 +59,4 @@
"balance": "0x8ac7230489e80000"
}
}
-}
+}
\ No newline at end of file
diff --git a/eth/tracers/internal/tracetest/util.go b/eth/tracers/internal/tracetest/util.go
index abc2699498d4..a74a96f8a489 100644
--- a/eth/tracers/internal/tracetest/util.go
+++ b/eth/tracers/internal/tracetest/util.go
@@ -47,11 +47,6 @@ func (c *callContext) toBlockContext(genesis *core.Genesis) vm.BlockContext {
if genesis.Config.IsLondon(context.BlockNumber) {
context.BaseFee = (*big.Int)(c.BaseFee)
}
-
- if genesis.Config.TerminalTotalDifficulty != nil && genesis.Config.TerminalTotalDifficulty.Sign() == 0 {
- context.Random = &genesis.Mixhash
- }
-
if genesis.ExcessBlobGas != nil && genesis.BlobGasUsed != nil {
excessBlobGas := eip4844.CalcExcessBlobGas(*genesis.ExcessBlobGas, *genesis.BlobGasUsed)
context.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go
index 35abd00017b7..c6653fa6bfe9 100644
--- a/eth/tracers/js/goja.go
+++ b/eth/tracers/js/goja.go
@@ -22,14 +22,12 @@ import (
"fmt"
"math/big"
"slices"
- "sync"
"github.com/dop251/goja"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/internal"
- "github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/common"
@@ -48,10 +46,10 @@ func init() {
if err != nil {
panic(err)
}
- type ctorFn = func(*tracers.Context, json.RawMessage, *params.ChainConfig) (*tracers.Tracer, error)
+ type ctorFn = func(*tracers.Context, json.RawMessage) (*tracers.Tracer, error)
lookup := func(code string) ctorFn {
- return func(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
- return newJsTracer(code, ctx, cfg, chainConfig)
+ return func(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+ return newJsTracer(code, ctx, cfg)
}
}
for name, code := range assetTracers {
@@ -112,7 +110,6 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b
type jsTracer struct {
vm *goja.Runtime
env *tracing.VMContext
- chainConfig *params.ChainConfig
toBig toBigFn // Converts a hex string into a JS bigint
toBuf toBufFn // Converts a []byte into a JS buffer
fromBuf fromBufFn // Converts an array, hex string or Uint8Array to a []byte
@@ -149,7 +146,7 @@ type jsTracer struct {
// The methods `result` and `fault` are required to be present.
// The methods `step`, `enter`, and `exit` are optional, but note that
// `enter` and `exit` always go together.
-func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
vm := goja.New()
// By default field names are exported to JS as is, i.e. capitalized.
vm.SetFieldNameMapper(goja.UncapFieldNameMapper())
@@ -256,7 +253,7 @@ func (t *jsTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from
db := &dbObj{db: env.StateDB, vm: t.vm, toBig: t.toBig, toBuf: t.toBuf, fromBuf: t.fromBuf}
t.dbValue = db.setupObject()
// Update list of precompiles based on current block
- rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
t.ctx["block"] = t.vm.ToValue(t.env.BlockNumber.Uint64())
t.ctx["gas"] = t.vm.ToValue(tx.Gas())
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index ed2789d70dde..d61ffeb93250 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -191,7 +191,7 @@ func TestHaltBetweenSteps(t *testing.T) {
scope := &vm.ScopeContext{
Contract: vm.NewContract(&account{}, &account{}, uint256.NewInt(0), 0),
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 0, big.NewInt(0))
tracer.OnOpcode(0, 0, 0, 0, scope, nil, 0, nil)
@@ -214,7 +214,7 @@ func TestNoStepExec(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 1000, big.NewInt(0))
tracer.OnExit(0, nil, 0, nil, false)
diff --git a/eth/tracers/live.go b/eth/tracers/live.go
index 8b222d2e6cdf..ffb2303af4f1 100644
--- a/eth/tracers/live.go
+++ b/eth/tracers/live.go
@@ -1,19 +1,3 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
package tracers
import (
@@ -40,9 +24,6 @@ func (d *liveDirectory) Register(name string, f ctorFunc) {
// New instantiates a tracer by name.
func (d *liveDirectory) New(name string, config json.RawMessage) (*tracing.Hooks, error) {
- if len(config) == 0 {
- config = json.RawMessage("{}")
- }
if f, ok := d.elems[name]; ok {
return f(config)
}
diff --git a/eth/tracers/live/noop.go b/eth/tracers/live/noop.go
index 46c5700d2515..7433c288408f 100644
--- a/eth/tracers/live/noop.go
+++ b/eth/tracers/live/noop.go
@@ -1,19 +1,3 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
package live
import (
diff --git a/eth/tracers/live/supply.go b/eth/tracers/live/supply.go
index fa4e5b190431..96f70594548c 100644
--- a/eth/tracers/live/supply.go
+++ b/eth/tracers/live/supply.go
@@ -1,19 +1,3 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
package live
import (
@@ -35,7 +19,7 @@ import (
)
func init() {
- tracers.LiveDirectory.Register("supply", newSupplyTracer)
+ tracers.LiveDirectory.Register("supply", newSupply)
}
type supplyInfoIssuance struct {
@@ -79,7 +63,7 @@ type supplyTxCallstack struct {
burn *big.Int
}
-type supplyTracer struct {
+type supply struct {
delta supplyInfo
txCallstack []supplyTxCallstack // Callstack for current transaction
logger *lumberjack.Logger
@@ -90,10 +74,12 @@ type supplyTracerConfig struct {
MaxSize int `json:"maxSize"` // MaxSize is the maximum size in megabytes of the tracer log file before it gets rotated. It defaults to 100 megabytes.
}
-func newSupplyTracer(cfg json.RawMessage) (*tracing.Hooks, error) {
+func newSupply(cfg json.RawMessage) (*tracing.Hooks, error) {
var config supplyTracerConfig
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, fmt.Errorf("failed to parse config: %v", err)
+ if cfg != nil {
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, fmt.Errorf("failed to parse config: %v", err)
+ }
}
if config.Path == "" {
return nil, errors.New("supply tracer output path is required")
@@ -107,19 +93,19 @@ func newSupplyTracer(cfg json.RawMessage) (*tracing.Hooks, error) {
logger.MaxSize = config.MaxSize
}
- t := &supplyTracer{
+ t := &supply{
delta: newSupplyInfo(),
logger: logger,
}
return &tracing.Hooks{
- OnBlockStart: t.onBlockStart,
- OnBlockEnd: t.onBlockEnd,
- OnGenesisBlock: t.onGenesisBlock,
- OnTxStart: t.onTxStart,
- OnBalanceChange: t.onBalanceChange,
- OnEnter: t.onEnter,
- OnExit: t.onExit,
- OnClose: t.onClose,
+ OnBlockStart: t.OnBlockStart,
+ OnBlockEnd: t.OnBlockEnd,
+ OnGenesisBlock: t.OnGenesisBlock,
+ OnTxStart: t.OnTxStart,
+ OnBalanceChange: t.OnBalanceChange,
+ OnEnter: t.OnEnter,
+ OnExit: t.OnExit,
+ OnClose: t.OnClose,
}, nil
}
@@ -142,11 +128,11 @@ func newSupplyInfo() supplyInfo {
}
}
-func (s *supplyTracer) resetDelta() {
+func (s *supply) resetDelta() {
s.delta = newSupplyInfo()
}
-func (s *supplyTracer) onBlockStart(ev tracing.BlockEvent) {
+func (s *supply) OnBlockStart(ev tracing.BlockEvent) {
s.resetDelta()
s.delta.Number = ev.Block.NumberU64()
@@ -169,11 +155,11 @@ func (s *supplyTracer) onBlockStart(ev tracing.BlockEvent) {
}
}
-func (s *supplyTracer) onBlockEnd(err error) {
+func (s *supply) OnBlockEnd(err error) {
s.write(s.delta)
}
-func (s *supplyTracer) onGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
+func (s *supply) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
s.resetDelta()
s.delta.Number = b.NumberU64()
@@ -188,7 +174,7 @@ func (s *supplyTracer) onGenesisBlock(b *types.Block, alloc types.GenesisAlloc)
s.write(s.delta)
}
-func (s *supplyTracer) onBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) {
+func (s *supply) OnBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) {
diff := new(big.Int).Sub(newBalance, prevBalance)
// NOTE: don't handle "BalanceIncreaseGenesisBalance" because it is handled in OnGenesisBlock
@@ -207,12 +193,12 @@ func (s *supplyTracer) onBalanceChange(a common.Address, prevBalance, newBalance
}
}
-func (s *supplyTracer) onTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
+func (s *supply) OnTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
s.txCallstack = make([]supplyTxCallstack, 0, 1)
}
// internalTxsHandler handles internal transactions burned amount
-func (s *supplyTracer) internalTxsHandler(call *supplyTxCallstack) {
+func (s *supply) internalTxsHandler(call *supplyTxCallstack) {
// Handle Burned amount
if call.burn != nil {
s.delta.Burn.Misc.Add(s.delta.Burn.Misc, call.burn)
@@ -225,7 +211,7 @@ func (s *supplyTracer) internalTxsHandler(call *supplyTxCallstack) {
}
}
-func (s *supplyTracer) onEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+func (s *supply) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
call := supplyTxCallstack{
calls: make([]supplyTxCallstack, 0),
}
@@ -240,7 +226,7 @@ func (s *supplyTracer) onEnter(depth int, typ byte, from common.Address, to comm
s.txCallstack = append(s.txCallstack, call)
}
-func (s *supplyTracer) onExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
+func (s *supply) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
if depth == 0 {
// No need to handle Burned amount if transaction is reverted
if !reverted {
@@ -266,13 +252,13 @@ func (s *supplyTracer) onExit(depth int, output []byte, gasUsed uint64, err erro
s.txCallstack[size-1].calls = append(s.txCallstack[size-1].calls, call)
}
-func (s *supplyTracer) onClose() {
+func (s *supply) OnClose() {
if err := s.logger.Close(); err != nil {
log.Warn("failed to close supply tracer log file", "error", err)
}
}
-func (s *supplyTracer) write(data any) {
+func (s *supply) write(data any) {
supply, ok := data.(supplyInfo)
if !ok {
log.Warn("failed to cast supply tracer data on write to log file")
diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go
index cec45a1e7a58..907df44181aa 100644
--- a/eth/tracers/native/4byte.go
+++ b/eth/tracers/native/4byte.go
@@ -49,16 +49,15 @@ func init() {
// 0xc281d19e-0: 1
// }
type fourByteTracer struct {
- ids map[string]int // ids aggregates the 4byte ids found
- interrupt atomic.Bool // Atomic flag to signal execution interruption
- reason error // Textual reason for the interruption
- chainConfig *params.ChainConfig
+ ids map[string]int // ids aggregates the 4byte ids found
+ interrupt atomic.Bool // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
activePrecompiles []common.Address // Updated on tx start based on given rules
}
// newFourByteTracer returns a native go tracer which collects
// 4 byte-identifiers of a tx, and implements vm.EVMLogger.
-func newFourByteTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+func newFourByteTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
t := &fourByteTracer{
ids: make(map[string]int),
chainConfig: chainConfig,
@@ -91,7 +90,7 @@ func (t *fourByteTracer) store(id []byte, size int) {
func (t *fourByteTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) {
// Update list of precompiles based on current block
- rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go
index c2247d1ce491..6a6cc1663c66 100644
--- a/eth/tracers/native/call.go
+++ b/eth/tracers/native/call.go
@@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
- "github.com/ethereum/go-ethereum/params"
)
//go:generate go run github.com/fjl/gencodec -type callFrame -field-override callFrameMarshaling -out gen_callframe_json.go
@@ -126,7 +125,7 @@ type callTracerConfig struct {
// newCallTracer returns a native go tracer which tracks
// call frames of a tx, and implements vm.EVMLogger.
-func newCallTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
t, err := newCallTracerObject(ctx, cfg)
if err != nil {
return nil, err
diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go
index e56d0111391e..135e0897c0b4 100644
--- a/eth/tracers/native/call_flat.go
+++ b/eth/tracers/native/call_flat.go
@@ -128,7 +128,7 @@ type flatCallTracerConfig struct {
}
// newFlatCallTracer returns a new flatCallTracer.
-func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
var config flatCallTracerConfig
if err := json.Unmarshal(cfg, &config); err != nil {
return nil, err
@@ -136,12 +136,12 @@ func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *p
// Create inner call tracer with default configuration, don't forward
// the OnlyTopCall or WithLog to inner for now
- t, err := newCallTracerObject(ctx, json.RawMessage("{}"))
+ t, err := newCallTracerObject(ctx, nil)
if err != nil {
return nil, err
}
- ft := &flatCallTracer{tracer: t, ctx: ctx, config: config, chainConfig: chainConfig}
+ ft := &flatCallTracer{tracer: t, ctx: ctx, config: config}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
OnTxStart: ft.OnTxStart,
@@ -207,7 +207,7 @@ func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction
}
t.tracer.OnTxStart(env, tx, from)
// Update list of precompiles based on current block
- rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
diff --git a/eth/tracers/native/call_flat_test.go b/eth/tracers/native/call_flat_test.go
index a81af6d6bc19..d5481b868bcc 100644
--- a/eth/tracers/native/call_flat_test.go
+++ b/eth/tracers/native/call_flat_test.go
@@ -31,7 +31,7 @@ import (
)
func TestCallFlatStop(t *testing.T) {
- tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
+ tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil)
require.NoError(t, err)
// this error should be returned by GetResult
@@ -47,7 +47,9 @@ func TestCallFlatStop(t *testing.T) {
Data: nil,
})
- tracer.OnTxStart(&tracing.VMContext{}, tx, common.Address{})
+ tracer.OnTxStart(&tracing.VMContext{
+ ChainConfig: params.MainnetChainConfig,
+ }, tx, common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, nil, 0, big.NewInt(0))
diff --git a/eth/tracers/native/mux.go b/eth/tracers/native/mux.go
index 77ab254568e6..435e3a7aa8a4 100644
--- a/eth/tracers/native/mux.go
+++ b/eth/tracers/native/mux.go
@@ -39,7 +39,7 @@ type muxTracer struct {
}
// newMuxTracer returns a new mux tracer.
-func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
var config map[string]json.RawMessage
if err := json.Unmarshal(cfg, &config); err != nil {
return nil, err
diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go
index ac174cc25e7f..bd38ee773cac 100644
--- a/eth/tracers/native/noop.go
+++ b/eth/tracers/native/noop.go
@@ -36,7 +36,7 @@ func init() {
type noopTracer struct{}
// newNoopTracer returns a new noop tracer.
-func newNoopTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+func newNoopTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
t := &noopTracer{}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go
index 9706eb43f60d..a83588f8a3b0 100644
--- a/eth/tracers/native/prestate.go
+++ b/eth/tracers/native/prestate.go
@@ -77,7 +77,7 @@ type prestateTracerConfig struct {
DisableStorage bool `json:"disableStorage"` // If true, this tracer will not return the contract storage
}
-func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
var config prestateTracerConfig
if err := json.Unmarshal(cfg, &config); err != nil {
return nil, err
@@ -202,6 +202,27 @@ func (t *prestateTracer) Stop(err error) {
t.interrupt.Store(true)
}
+func (t *prestateTracer) processDiffState() {
+ for addr, state := range t.pre {
+ // The deleted account's state is pruned from `post` but kept in `pre`
+ if _, ok := t.deleted[addr]; ok {
+ continue
+ }
+ modified := false
+ postAccount := &account{Storage: make(map[common.Hash]common.Hash)}
+ newBalance := t.env.StateDB.GetBalance(addr).ToBig()
+ newNonce := t.env.StateDB.GetNonce(addr)
+ newCode := t.env.StateDB.GetCode(addr)
+
+func (t *prestateTracer) OnTxEnd(receipt *types.Receipt, err error) {
+ if err != nil {
+ return
+ }
+ if t.config.DiffMode {
+ t.processDiffState()
+ }
+}
+
func (t *prestateTracer) processDiffState() {
for addr, state := range t.pre {
// The deleted account's state is pruned from `post` but kept in `pre`
@@ -273,13 +294,6 @@ func (t *prestateTracer) lookupAccount(addr common.Address) {
if !acc.exists() {
acc.empty = true
}
- // The code must be fetched first for the emptiness check.
- if t.config.DisableCode {
- acc.Code = nil
- }
- if !t.config.DisableStorage {
- acc.Storage = make(map[common.Hash]common.Hash)
- }
t.pre[addr] = acc
}
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index f10626c01fb7..6357152f0757 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -123,6 +123,7 @@ type rpcBlock struct {
Transactions []rpcTransaction `json:"transactions"`
UncleHashes []common.Hash `json:"uncles"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
+ Requests []*types.Request `json:"requests,omitempty"`
}
func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) {
@@ -191,12 +192,12 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface
}
txs[i] = tx.tx
}
-
return types.NewBlockWithHeader(head).WithBody(
types.Body{
Transactions: txs,
Uncles: uncles,
Withdrawals: body.Withdrawals,
+ Requests: body.Requests,
}), nil
}
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 4ad8a552d268..cf223b9c0aeb 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -104,8 +104,8 @@ func newTestBackend(config *node.Config) (*node.Node, []*types.Block, error) {
return nil, nil, fmt.Errorf("can't create new node: %v", err)
}
// Create Ethereum Service
- ecfg := ðconfig.Config{Genesis: genesis, RPCGasCap: 1000000}
- ethservice, err := eth.New(n, ecfg)
+ config := ðconfig.Config{Genesis: genesis, RPCGasCap: 1000000}
+ ethservice, err := eth.New(n, config)
if err != nil {
return nil, nil, fmt.Errorf("can't create new ethereum service: %v", err)
}
diff --git a/ethclient/example_test.go b/ethclient/example_test.go
deleted file mode 100644
index 5d0038f0c7ba..000000000000
--- a/ethclient/example_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package ethclient_test
-
-import (
- "github.com/ethereum/go-ethereum/node"
-)
-
-var exampleNode *node.Node
-
-// launch example server
-func init() {
- config := &node.Config{
- HTTPHost: "127.0.0.1",
- }
- n, _, err := newTestBackend(config)
- if err != nil {
- panic("can't launch node: " + err.Error())
- }
- exampleNode = n
-}
diff --git a/ethdb/database.go b/ethdb/database.go
index 323f8f5d6fd9..84d0d687a5f7 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -182,6 +182,11 @@ type ResettableAncientStore interface {
// Database contains all the methods required by the high level database to not
// only access the key-value data store but also the ancient chain store.
type Database interface {
- KeyValueStore
- AncientStore
+ Reader
+ Writer
+ Batcher
+ Iteratee
+ Stater
+ Compacter
+ io.Closer
}
diff --git a/go.mod b/go.mod
index a1fd13d1300e..04226742b3d2 100644
--- a/go.mod
+++ b/go.mod
@@ -12,6 +12,7 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.18.45
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2
+ github.com/btcsuite/btcd/btcec/v2 v2.3.4
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.79.0
github.com/cockroachdb/pebble v1.1.2
@@ -20,7 +21,6 @@ require (
github.com/crate-crypto/go-kzg-4844 v1.0.0
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.6.0
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1
github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
github.com/ethereum/c-kzg-4844 v1.0.0
@@ -31,7 +31,7 @@ require (
github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/gofrs/flock v0.8.1
- github.com/golang-jwt/jwt/v4 v4.5.1
+ github.com/golang-jwt/jwt/v4 v4.5.0
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.2.0
github.com/google/uuid v1.3.0
@@ -47,6 +47,7 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52
+ github.com/kilic/bls12-381 v0.1.0
github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
@@ -74,7 +75,6 @@ require (
google.golang.org/protobuf v1.34.2
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gopkg.in/yaml.v3 v3.0.1
- modernc.org/mathutil v1.6.0
)
require (
@@ -116,7 +116,6 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
- github.com/kilic/bls12-381 v0.1.0 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/kr/pretty v0.3.1 // indirect
@@ -135,7 +134,6 @@ require (
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
- github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
diff --git a/go.sum b/go.sum
index 2252662ca0da..21a5e5bcd81f 100644
--- a/go.sum
+++ b/go.sum
@@ -92,6 +92,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
+github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
+github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
+github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
@@ -213,8 +217,8 @@ github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
-github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -464,8 +468,7 @@ github.com/protolambda/zrnt v0.32.2/go.mod h1:A0fezkp9Tt3GBLATSPIbuY4ywYESyAuc/F
github.com/protolambda/ztyp v0.2.2 h1:rVcL3vBu9W/aV646zF6caLS/dyn9BN8NYiuJzicLNyY=
github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU=
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw=
-github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
-github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -500,6 +503,8 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
+github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -864,8 +869,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
-modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index a508b0ca5b28..b502bec6edc1 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"maps"
- gomath "math"
"math/big"
"strings"
"time"
@@ -295,6 +294,344 @@ func (api *EthereumAccountAPI) Accounts() []common.Address {
return api.am.Accounts()
}
+// PersonalAccountAPI provides an API to access accounts managed by this node.
+// It offers methods to create, (un)lock en list accounts. Some methods accept
+// passwords and are therefore considered private by default.
+type PersonalAccountAPI struct {
+ am *accounts.Manager
+ nonceLock *AddrLocker
+ b Backend
+}
+
+// NewPersonalAccountAPI creates a new PersonalAccountAPI.
+func NewPersonalAccountAPI(b Backend, nonceLock *AddrLocker) *PersonalAccountAPI {
+ return &PersonalAccountAPI{
+ am: b.AccountManager(),
+ nonceLock: nonceLock,
+ b: b,
+ }
+}
+
+// ListAccounts will return a list of addresses for accounts this node manages.
+func (api *PersonalAccountAPI) ListAccounts() []common.Address {
+ return api.am.Accounts()
+}
+
+// rawWallet is a JSON representation of an accounts.Wallet interface, with its
+// data contents extracted into plain fields.
+type rawWallet struct {
+ URL string `json:"url"`
+ Status string `json:"status"`
+ Failure string `json:"failure,omitempty"`
+ Accounts []accounts.Account `json:"accounts,omitempty"`
+}
+
+// ListWallets will return a list of wallets this node manages.
+func (api *PersonalAccountAPI) ListWallets() []rawWallet {
+ wallets := make([]rawWallet, 0) // return [] instead of nil if empty
+ for _, wallet := range api.am.Wallets() {
+ status, failure := wallet.Status()
+
+ raw := rawWallet{
+ URL: wallet.URL().String(),
+ Status: status,
+ Accounts: wallet.Accounts(),
+ }
+ if failure != nil {
+ raw.Failure = failure.Error()
+ }
+ wallets = append(wallets, raw)
+ }
+ return wallets
+}
+
+// OpenWallet initiates a hardware wallet opening procedure, establishing a USB
+// connection and attempting to authenticate via the provided passphrase. Note,
+// the method may return an extra challenge requiring a second open (e.g. the
+// Trezor PIN matrix challenge).
+func (api *PersonalAccountAPI) OpenWallet(url string, passphrase *string) error {
+ wallet, err := api.am.Wallet(url)
+ if err != nil {
+ return err
+ }
+ pass := ""
+ if passphrase != nil {
+ pass = *passphrase
+ }
+ return wallet.Open(pass)
+}
+
+// DeriveAccount requests an HD wallet to derive a new account, optionally pinning
+// it for later reuse.
+func (api *PersonalAccountAPI) DeriveAccount(url string, path string, pin *bool) (accounts.Account, error) {
+ wallet, err := api.am.Wallet(url)
+ if err != nil {
+ return accounts.Account{}, err
+ }
+ derivPath, err := accounts.ParseDerivationPath(path)
+ if err != nil {
+ return accounts.Account{}, err
+ }
+ if pin == nil {
+ pin = new(bool)
+ }
+ return wallet.Derive(derivPath, *pin)
+}
+
+// NewAccount will create a new account and returns the address for the new account.
+func (api *PersonalAccountAPI) NewAccount(password string) (common.AddressEIP55, error) {
+ ks, err := fetchKeystore(api.am)
+ if err != nil {
+ return common.AddressEIP55{}, err
+ }
+ acc, err := ks.NewAccount(password)
+ if err == nil {
+ addrEIP55 := common.AddressEIP55(acc.Address)
+ log.Info("Your new key was generated", "address", addrEIP55.String())
+ log.Warn("Please backup your key file!", "path", acc.URL.Path)
+ log.Warn("Please remember your password!")
+ return addrEIP55, nil
+ }
+ return common.AddressEIP55{}, err
+}
+
+// fetchKeystore retrieves the encrypted keystore from the account manager.
+func fetchKeystore(am *accounts.Manager) (*keystore.KeyStore, error) {
+ if ks := am.Backends(keystore.KeyStoreType); len(ks) > 0 {
+ return ks[0].(*keystore.KeyStore), nil
+ }
+ return nil, errors.New("local keystore not used")
+}
+
+// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
+// encrypting it with the passphrase.
+func (api *PersonalAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
+ key, err := crypto.HexToECDSA(privkey)
+ if err != nil {
+ return common.Address{}, err
+ }
+ ks, err := fetchKeystore(api.am)
+ if err != nil {
+ return common.Address{}, err
+ }
+ acc, err := ks.ImportECDSA(key, password)
+ return acc.Address, err
+}
+
+// UnlockAccount will unlock the account associated with the given address with
+// the given password for duration seconds. If duration is nil it will use a
+// default of 300 seconds. It returns an indication if the account was unlocked.
+func (api *PersonalAccountAPI) UnlockAccount(ctx context.Context, addr common.Address, password string, duration *uint64) (bool, error) {
+ // When the API is exposed by external RPC(http, ws etc), unless the user
+ // explicitly specifies to allow the insecure account unlocking, otherwise
+ // it is disabled.
+ if api.b.ExtRPCEnabled() && !api.b.AccountManager().Config().InsecureUnlockAllowed {
+ return false, errors.New("account unlock with HTTP access is forbidden")
+ }
+
+ const max = uint64(time.Duration(math.MaxInt64) / time.Second)
+ var d time.Duration
+ if duration == nil {
+ d = 300 * time.Second
+ } else if *duration > max {
+ return false, errors.New("unlock duration too large")
+ } else {
+ d = time.Duration(*duration) * time.Second
+ }
+ ks, err := fetchKeystore(api.am)
+ if err != nil {
+ return false, err
+ }
+ err = ks.TimedUnlock(accounts.Account{Address: addr}, password, d)
+ if err != nil {
+ log.Warn("Failed account unlock attempt", "address", addr, "err", err)
+ }
+ return err == nil, err
+}
+
+// LockAccount will lock the account associated with the given address when it's unlocked.
+func (api *PersonalAccountAPI) LockAccount(addr common.Address) bool {
+ if ks, err := fetchKeystore(api.am); err == nil {
+ return ks.Lock(addr) == nil
+ }
+ return false
+}
+
+// signTransaction sets defaults and signs the given transaction
+// NOTE: the caller needs to ensure that the nonceLock is held, if applicable,
+// and release it after the transaction has been submitted to the tx pool
+func (api *PersonalAccountAPI) signTransaction(ctx context.Context, args *TransactionArgs, passwd string) (*types.Transaction, error) {
+ // Look up the wallet containing the requested signer
+ account := accounts.Account{Address: args.from()}
+ wallet, err := api.am.Find(account)
+ if err != nil {
+ return nil, err
+ }
+ // Set some sanity defaults and terminate on failure
+ if err := args.setDefaults(ctx, api.b, false); err != nil {
+ return nil, err
+ }
+ // Assemble the transaction and sign with the wallet
+ tx := args.ToTransaction(types.LegacyTxType)
+
+ return wallet.SignTxWithPassphrase(account, passwd, tx, api.b.ChainConfig().ChainID)
+}
+
+// SendTransaction will create a transaction from the given arguments and
+// tries to sign it with the key associated with args.From. If the given
+// passwd isn't able to decrypt the key it fails.
+func (api *PersonalAccountAPI) SendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) {
+ if args.Nonce == nil {
+ // Hold the mutex around signing to prevent concurrent assignment of
+ // the same nonce to multiple accounts.
+ api.nonceLock.LockAddr(args.from())
+ defer api.nonceLock.UnlockAddr(args.from())
+ }
+ if args.IsEIP4844() {
+ return common.Hash{}, errBlobTxNotSupported
+ }
+ signed, err := api.signTransaction(ctx, &args, passwd)
+ if err != nil {
+ log.Warn("Failed transaction send attempt", "from", args.from(), "to", args.To, "value", args.Value.ToInt(), "err", err)
+ return common.Hash{}, err
+ }
+ return SubmitTransaction(ctx, api.b, signed)
+}
+
+// SignTransaction will create a transaction from the given arguments and
+// tries to sign it with the key associated with args.From. If the given passwd isn't
+// able to decrypt the key it fails. The transaction is returned in RLP-form, not broadcast
+// to other nodes
+func (api *PersonalAccountAPI) SignTransaction(ctx context.Context, args TransactionArgs, passwd string) (*SignTransactionResult, error) {
+ // No need to obtain the noncelock mutex, since we won't be sending this
+ // tx into the transaction pool, but right back to the user
+ if args.From == nil {
+ return nil, errors.New("sender not specified")
+ }
+ if args.Gas == nil {
+ return nil, errors.New("gas not specified")
+ }
+ if args.GasPrice == nil && (args.MaxFeePerGas == nil || args.MaxPriorityFeePerGas == nil) {
+ return nil, errors.New("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas")
+ }
+ if args.IsEIP4844() {
+ return nil, errBlobTxNotSupported
+ }
+ if args.Nonce == nil {
+ return nil, errors.New("nonce not specified")
+ }
+ // Before actually signing the transaction, ensure the transaction fee is reasonable.
+ tx := args.ToTransaction(types.LegacyTxType)
+ if err := checkTxFee(tx.GasPrice(), tx.Gas(), api.b.RPCTxFeeCap()); err != nil {
+ return nil, err
+ }
+ signed, err := api.signTransaction(ctx, &args, passwd)
+ if err != nil {
+ log.Warn("Failed transaction sign attempt", "from", args.from(), "to", args.To, "value", args.Value.ToInt(), "err", err)
+ return nil, err
+ }
+ data, err := signed.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+ return &SignTransactionResult{data, signed}, nil
+}
+
+// Sign calculates an Ethereum ECDSA signature for:
+// keccak256("\x19Ethereum Signed Message:\n" + len(message) + message))
+//
+// Note, the produced signature conforms to the secp256k1 curve R, S and V values,
+// where the V value will be 27 or 28 for legacy reasons.
+//
+// The key used to calculate the signature is decrypted with the given password.
+//
+// https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-personal#personal-sign
+func (api *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) {
+ // Look up the wallet containing the requested signer
+ account := accounts.Account{Address: addr}
+
+ wallet, err := api.b.AccountManager().Find(account)
+ if err != nil {
+ return nil, err
+ }
+ // Assemble sign the data with the wallet
+ signature, err := wallet.SignTextWithPassphrase(account, passwd, data)
+ if err != nil {
+ log.Warn("Failed data sign attempt", "address", addr, "err", err)
+ return nil, err
+ }
+ signature[crypto.RecoveryIDOffset] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper
+ return signature, nil
+}
+
+// EcRecover returns the address for the account that was used to create the signature.
+// Note, this function is compatible with eth_sign and personal_sign. As such it recovers
+// the address of:
+// hash = keccak256("\x19Ethereum Signed Message:\n"${message length}${message})
+// addr = ecrecover(hash, signature)
+//
+// Note, the signature must conform to the secp256k1 curve R, S and V values, where
+// the V value must be 27 or 28 for legacy reasons.
+//
+// https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-personal#personal-ecrecover
+func (api *PersonalAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) {
+ if len(sig) != crypto.SignatureLength {
+ return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength)
+ }
+ if sig[crypto.RecoveryIDOffset] != 27 && sig[crypto.RecoveryIDOffset] != 28 {
+ return common.Address{}, errors.New("invalid Ethereum signature (V is not 27 or 28)")
+ }
+ sig[crypto.RecoveryIDOffset] -= 27 // Transform yellow paper V from 27/28 to 0/1
+
+ rpk, err := crypto.SigToPub(accounts.TextHash(data), sig)
+ if err != nil {
+ return common.Address{}, err
+ }
+ return crypto.PubkeyToAddress(*rpk), nil
+}
+
+// InitializeWallet initializes a new wallet at the provided URL, by generating and returning a new private key.
+func (api *PersonalAccountAPI) InitializeWallet(ctx context.Context, url string) (string, error) {
+ wallet, err := api.am.Wallet(url)
+ if err != nil {
+ return "", err
+ }
+
+ entropy, err := bip39.NewEntropy(256)
+ if err != nil {
+ return "", err
+ }
+
+ mnemonic, err := bip39.NewMnemonic(entropy)
+ if err != nil {
+ return "", err
+ }
+
+ seed := bip39.NewSeed(mnemonic, "")
+
+ switch wallet := wallet.(type) {
+ case *scwallet.Wallet:
+ return mnemonic, wallet.Initialize(seed)
+ default:
+ return "", errors.New("specified wallet does not support initialization")
+ }
+}
+
+// Unpair deletes a pairing between wallet and geth.
+func (api *PersonalAccountAPI) Unpair(ctx context.Context, url string, pin string) error {
+ wallet, err := api.am.Wallet(url)
+ if err != nil {
+ return err
+ }
+
+ switch wallet := wallet.(type) {
+ case *scwallet.Wallet:
+ return wallet.Unpair([]byte(pin))
+ default:
+ return errors.New("specified wallet does not support pairing")
+ }
+}
+
// BlockChainAPI provides an API to access Ethereum blockchain data.
type BlockChainAPI struct {
b Backend
@@ -843,7 +1180,7 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S
defer cancel()
gp := new(core.GasPool)
if globalGasCap == 0 {
- gp.AddGas(gomath.MaxUint64)
+ gp.AddGas(math.MaxUint64)
} else {
gp.AddGas(globalGasCap)
}
@@ -868,16 +1205,11 @@ func applyMessage(ctx context.Context, b Backend, args TransactionArgs, state *s
if precompiles != nil {
evm.SetPrecompiles(precompiles)
}
- res, err := applyMessageWithEVM(ctx, evm, msg, timeout, gp)
- // If an internal state error occurred, let that have precedence. Otherwise,
- // a "trie root missing" type of error will masquerade as e.g. "insufficient gas"
- if err := state.Error(); err != nil {
- return nil, err
- }
- return res, err
+
+ return applyMessageWithEVM(ctx, evm, msg, state, timeout, gp)
}
-func applyMessageWithEVM(ctx context.Context, evm *vm.EVM, msg *core.Message, timeout time.Duration, gp *core.GasPool) (*core.ExecutionResult, error) {
+func applyMessageWithEVM(ctx context.Context, evm *vm.EVM, msg *core.Message, state *state.StateDB, timeout time.Duration, gp *core.GasPool) (*core.ExecutionResult, error) {
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
@@ -953,7 +1285,7 @@ func (api *BlockChainAPI) SimulateV1(ctx context.Context, opts simOpts, blockNrO
}
gasCap := api.b.RPCGasCap()
if gasCap == 0 {
- gasCap = gomath.MaxUint64
+ gasCap = math.MaxUint64
}
sim := &simulator{
b: api.b,
@@ -1098,6 +1430,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals()
}
+ if block.Header().RequestsHash != nil {
+ fields["requests"] = block.Requests()
+ }
return fields
}
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index f570c5dc4ce4..53b264a01aa8 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -2192,7 +2192,6 @@ func TestSimulateV1(t *testing.T) {
t.Fatalf("failed to unmarshal result: %v", err)
}
if !reflect.DeepEqual(have, tc.want) {
- t.Log(string(resBytes))
t.Errorf("test %s, result mismatch, have\n%v\n, want\n%v\n", tc.name, have, tc.want)
}
})
diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go
index 81b4633d42cf..4371a4246480 100644
--- a/internal/ethapi/simulate.go
+++ b/internal/ethapi/simulate.go
@@ -187,10 +187,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
}
evm = vm.NewEVM(blockContext, vm.TxContext{GasPrice: new(big.Int)}, sim.state, sim.chainConfig, *vmConfig)
)
- var tracingStateDB = vm.StateDB(sim.state)
- if hooks := tracer.Hooks(); hooks != nil {
- tracingStateDB = state.NewHookedState(sim.state, hooks)
- }
+ sim.state.SetLogger(tracer.Hooks())
// It is possible to override precompiles with EVM bytecode, or
// move them to another address.
if precompiles != nil {
@@ -208,8 +205,8 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
tracer.reset(tx.Hash(), uint(i))
// EoA check is always skipped, even in validation mode.
msg := call.ToMessage(header.BaseFee, !sim.validate, true)
- evm.Reset(core.NewEVMTxContext(msg), tracingStateDB)
- result, err := applyMessageWithEVM(ctx, evm, msg, timeout, sim.gp)
+ evm.Reset(core.NewEVMTxContext(msg), sim.state)
+ result, err := applyMessageWithEVM(ctx, evm, msg, sim.state, timeout, sim.gp)
if err != nil {
txErr := txValidationError(err)
return nil, nil, txErr
@@ -217,7 +214,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
// Update the state with pending changes.
var root []byte
if sim.chainConfig.IsByzantium(blockContext.BlockNumber) {
- tracingStateDB.Finalise(true)
+ sim.state.Finalise(true)
} else {
root = sim.state.IntermediateRoot(sim.chainConfig.IsEIP158(blockContext.BlockNumber)).Bytes()
}
diff --git a/internal/flags/flags_test.go b/internal/flags/flags_test.go
index 82e23fb4d2ff..a0cc9c98963a 100644
--- a/internal/flags/flags_test.go
+++ b/internal/flags/flags_test.go
@@ -23,6 +23,8 @@ import (
)
func TestPathExpansion(t *testing.T) {
+ t.Parallel()
+
user, _ := user.Current()
var tests map[string]string
diff --git a/log/format.go b/log/format.go
index e7dd8a4099b6..8f6d3ce0c9a9 100644
--- a/log/format.go
+++ b/log/format.go
@@ -79,7 +79,7 @@ func (h *TerminalHandler) format(buf []byte, r slog.Record, usecolor bool) []byt
}
func (h *TerminalHandler) formatAttributes(buf *bytes.Buffer, r slog.Record, color string) {
- writeAttr := func(attr slog.Attr, last bool) {
+ writeAttr := func(attr slog.Attr, first, last bool) {
buf.WriteByte(' ')
if color != "" {
diff --git a/miner/payload_building.go b/miner/payload_building.go
index a2497704e433..6a7d9c09757c 100644
--- a/miner/payload_building.go
+++ b/miner/payload_building.go
@@ -69,24 +69,27 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID {
// the revenue. Therefore, the empty-block here is always available and full-block
// will be set/updated afterwards.
type Payload struct {
- id engine.PayloadID
- empty *types.Block
- full *types.Block
- sidecars []*types.BlobTxSidecar
- fullFees *big.Int
- stop chan struct{}
- lock sync.Mutex
- cond *sync.Cond
+ id engine.PayloadID
+ empty *types.Block
+ emptyWitness *stateless.Witness
+ full *types.Block
+ fullWitness *stateless.Witness
+ sidecars []*types.BlobTxSidecar
+ fullFees *big.Int
+ stop chan struct{}
+ lock sync.Mutex
+ cond *sync.Cond
// CHANGE(taiko): done channel to communicate we shouldnt write to `stop` channel.
done chan struct{}
}
// newPayload initializes the payload object.
-func newPayload(empty *types.Block, emptyRequests [][]byte, witness *stateless.Witness, id engine.PayloadID) *Payload {
+func newPayload(empty *types.Block, witness *stateless.Witness, id engine.PayloadID) *Payload {
payload := &Payload{
- id: id,
- empty: empty,
- stop: make(chan struct{}),
+ id: id,
+ empty: empty,
+ emptyWitness: witness,
+ stop: make(chan struct{}),
// CHANGE(taiko): buffered channel to communicate done to taiko payload builder
done: make(chan struct{}, 1),
}
@@ -112,7 +115,6 @@ func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) {
payload.full = r.block
payload.fullFees = r.fees
payload.sidecars = r.sidecars
- payload.requests = r.requests
payload.fullWitness = r.witness
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(r.fees), big.NewFloat(params.Ether))
@@ -148,14 +150,14 @@ func (payload *Payload) Resolve() *engine.ExecutionPayloadEnvelope {
close(payload.stop)
}
if payload.full != nil {
- envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars, payload.requests)
+ envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
}
return envelope
}
- envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil, payload.emptyRequests)
+ envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
@@ -169,7 +171,7 @@ func (payload *Payload) ResolveEmpty() *engine.ExecutionPayloadEnvelope {
payload.lock.Lock()
defer payload.lock.Unlock()
- envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil, payload.emptyRequests)
+ envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
@@ -205,7 +207,7 @@ func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope {
}
close(payload.stop)
}
- envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars, payload.requests)
+ envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
@@ -233,7 +235,7 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload
return nil, empty.err
}
// Construct a payload object for return.
- payload := newPayload(empty.block, empty.requests, empty.witness, args.Id())
+ payload := newPayload(empty.block, empty.witness, args.Id())
// Spin up a routine for updating the payload in background. This strategy
// can maximum the revenue for including transactions with highest fee.
@@ -263,7 +265,7 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload
select {
case <-timer.C:
// CHANGE(taiko): do not update payload.
- if w.chainConfig.Taiko {
+ if miner.chainConfig.Taiko {
continue
}
start := time.Now()
diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go
index e5eb0297a155..aad87627e674 100644
--- a/miner/payload_building_test.go
+++ b/miner/payload_building_test.go
@@ -22,6 +22,7 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -113,7 +114,9 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
case *clique.Clique:
gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength)
copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes())
- e.Authorize(testBankAddress)
+ e.Authorize(testBankAddress, func(account accounts.Account, s string, data []byte) ([]byte, error) {
+ return crypto.Sign(crypto.Keccak256(data), testBankKey)
+ })
case *ethash.Ethash:
default:
t.Fatalf("unexpected consensus engine type: %T", engine)
diff --git a/miner/taiko_miner.go b/miner/taiko_miner.go
index 9b9a246c94fd..4bc09792a2da 100644
--- a/miner/taiko_miner.go
+++ b/miner/taiko_miner.go
@@ -24,7 +24,7 @@ func (miner *Miner) SealBlockWith(
baseFeePerGas *big.Int,
withdrawals types.Withdrawals,
) (*types.Block, error) {
- return miner.worker.sealBlockWith(parent, timestamp, blkMeta, baseFeePerGas, withdrawals)
+ return miner.sealBlockWith(parent, timestamp, blkMeta, baseFeePerGas, withdrawals)
}
// BuildTransactionsLists builds multiple transactions lists which satisfy all the given limits.
@@ -36,7 +36,7 @@ func (miner *Miner) BuildTransactionsLists(
locals []string,
maxTransactionsLists uint64,
) ([]*PreBuiltTxList, error) {
- return miner.BuildTransactionsListsWithMinTip(
+ return miner.buildTransactionsLists(
beneficiary,
baseFee,
blockMaxGasLimit,
@@ -58,7 +58,7 @@ func (miner *Miner) BuildTransactionsListsWithMinTip(
maxTransactionsLists uint64,
minTip uint64,
) ([]*PreBuiltTxList, error) {
- return miner.worker.BuildTransactionsLists(
+ return miner.buildTransactionsLists(
beneficiary,
baseFee,
blockMaxGasLimit,
diff --git a/miner/taiko_payload_building_test.go b/miner/taiko_payload_building_test.go
index 6b7118c3a860..d61df949f03c 100644
--- a/miner/taiko_payload_building_test.go
+++ b/miner/taiko_payload_building_test.go
@@ -18,7 +18,7 @@ func newTestBlock() *types.Block {
tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
txs := []*types.Transaction{tx1}
- block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, trie.NewStackTrie(nil))
+ block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, &types.Body{Transactions: txs}, nil, trie.NewStackTrie(nil))
return block
}
@@ -28,7 +28,6 @@ func TestSetFullBlock_AvoidPanic(t *testing.T) {
recipient = common.HexToAddress("0xdeadbeef")
)
w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0)
- defer w.close()
timestamp := uint64(time.Now().Unix())
args := &BuildPayloadArgs{
@@ -37,7 +36,7 @@ func TestSetFullBlock_AvoidPanic(t *testing.T) {
Random: common.Hash{},
FeeRecipient: recipient,
}
- payload, err := w.buildPayload(args)
+ payload, err := w.buildPayload(args, false)
if err != nil {
t.Fatalf("Failed to build payload %v", err)
}
@@ -59,7 +58,6 @@ func TestAfterSetFullBlock_Panic_DoneChannelNotSent(t *testing.T) {
recipient = common.HexToAddress("0xdeadbeef")
)
w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0)
- defer w.close()
timestamp := uint64(time.Now().Unix())
args := &BuildPayloadArgs{
@@ -68,7 +66,7 @@ func TestAfterSetFullBlock_Panic_DoneChannelNotSent(t *testing.T) {
Random: common.Hash{},
FeeRecipient: recipient,
}
- payload, err := w.buildPayload(args)
+ payload, err := w.buildPayload(args, false)
if err != nil {
t.Fatalf("Failed to build payload %v", err)
}
@@ -82,13 +80,12 @@ func TestAfterSetFullBlock_Panic_DoneChannelNotSent(t *testing.T) {
})
}
-func TestAfterSetFullBlock_AvoidPanic_DoneChannelSent(t *testing.T) {
+func TestAfterSetFullBlockAvoidPanicDoneChannelSent(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
recipient = common.HexToAddress("0xdeadbeef")
)
w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0)
- defer w.close()
timestamp := uint64(time.Now().Unix())
args := &BuildPayloadArgs{
@@ -97,7 +94,7 @@ func TestAfterSetFullBlock_AvoidPanic_DoneChannelSent(t *testing.T) {
Random: common.Hash{},
FeeRecipient: recipient,
}
- payload, err := w.buildPayload(args)
+ payload, err := w.buildPayload(args, false)
if err != nil {
t.Fatalf("Failed to build payload %v", err)
}
@@ -116,7 +113,6 @@ func TestSetFullBlock(t *testing.T) {
recipient = common.HexToAddress("0xdeadbeef")
)
w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0)
- defer w.close()
timestamp := uint64(time.Now().Unix())
args := &BuildPayloadArgs{
@@ -125,7 +121,7 @@ func TestSetFullBlock(t *testing.T) {
Random: common.Hash{},
FeeRecipient: recipient,
}
- payload, err := w.buildPayload(args)
+ payload, err := w.buildPayload(args, false)
if err != nil {
t.Fatalf("Failed to build payload %v", err)
}
diff --git a/miner/taiko_worker.go b/miner/taiko_worker.go
index d59d29cd9130..1224cca8f662 100644
--- a/miner/taiko_worker.go
+++ b/miner/taiko_worker.go
@@ -26,7 +26,7 @@ import (
// 2. The total gas used should not exceed the given blockMaxGasLimit
// 3. The total bytes used should not exceed the given maxBytesPerTxList
// 4. The total number of transactions lists should not exceed the given maxTransactionsLists
-func (w *worker) BuildTransactionsLists(
+func (w *Miner) buildTransactionsLists(
beneficiary common.Address,
baseFee *big.Int,
blockMaxGasLimit uint64,
@@ -45,7 +45,7 @@ func (w *worker) BuildTransactionsLists(
}
// Check if tx pool is empty at first.
- if len(w.eth.TxPool().Pending(txpool.PendingFilter{MinTip: uint256.NewInt(minTip), BaseFee: uint256.MustFromBig(baseFee), OnlyPlainTxs: true})) == 0 {
+ if len(w.txpool.Pending(txpool.PendingFilter{MinTip: uint256.NewInt(minTip), BaseFee: uint256.MustFromBig(baseFee), OnlyPlainTxs: true})) == 0 {
return txsLists, nil
}
@@ -59,11 +59,10 @@ func (w *worker) BuildTransactionsLists(
baseFeePerGas: baseFee,
}
- env, err := w.prepareWork(params)
+ env, err := w.prepareWork(params, false)
if err != nil {
return nil, err
}
- defer env.discard()
var (
signer = types.MakeSigner(w.chainConfig, new(big.Int).Add(currentHead.Number, common.Big1), currentHead.Time)
@@ -115,7 +114,7 @@ func (w *worker) BuildTransactionsLists(
}
// sealBlockWith mines and seals a block with the given block metadata.
-func (w *worker) sealBlockWith(
+func (w *Miner) sealBlockWith(
parent common.Hash,
timestamp uint64,
blkMeta *engine.BlockMetadata,
@@ -145,13 +144,12 @@ func (w *worker) sealBlockWith(
}
// Set extraData
- w.extra = blkMeta.ExtraData
+ w.SetExtra(blkMeta.ExtraData)
- env, err := w.prepareWork(params)
+ env, err := w.prepareWork(params, false)
if err != nil {
return nil, err
}
- defer env.discard()
env.header.GasLimit = blkMeta.GasLimit
@@ -180,14 +178,20 @@ func (w *worker) sealBlockWith(
env.state.Prepare(rules, sender, blkMeta.Beneficiary, tx.To(), vm.ActivePrecompiles(rules), tx.AccessList())
env.state.SetTxContext(tx.Hash(), env.tcount)
- if _, err := w.commitTransaction(env, tx); err != nil {
+ if err := w.commitTransaction(env, tx); err != nil {
log.Debug("Skip an invalid proposed transaction", "hash", tx.Hash(), "reason", err)
continue
}
env.tcount++
}
- block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, nil, env.receipts, withdrawals)
+ block, err := w.engine.FinalizeAndAssemble(
+ w.chain,
+ env.header,
+ env.state,
+ &types.Body{Transactions: env.txs, Withdrawals: withdrawals},
+ env.receipts,
+ )
if err != nil {
return nil, err
}
@@ -202,11 +206,11 @@ func (w *worker) sealBlockWith(
}
// getPendingTxs fetches the pending transactions from tx pool.
-func (w *worker) getPendingTxs(localAccounts []string, baseFee *big.Int) (
+func (w *Miner) getPendingTxs(localAccounts []string, baseFee *big.Int) (
map[common.Address][]*txpool.LazyTransaction,
map[common.Address][]*txpool.LazyTransaction,
) {
- pending := w.eth.TxPool().Pending(txpool.PendingFilter{OnlyPlainTxs: true, BaseFee: uint256.MustFromBig(baseFee)})
+ pending := w.txpool.Pending(txpool.PendingFilter{OnlyPlainTxs: true, BaseFee: uint256.MustFromBig(baseFee)})
localTxs, remoteTxs := make(map[common.Address][]*txpool.LazyTransaction), pending
for _, local := range localAccounts {
@@ -221,7 +225,7 @@ func (w *worker) getPendingTxs(localAccounts []string, baseFee *big.Int) (
}
// commitL2Transactions tries to commit the transactions into the given state.
-func (w *worker) commitL2Transactions(
+func (w *Miner) commitL2Transactions(
env *environment,
txsLocal *transactionsByPriceAndNonce,
txsRemote *transactionsByPriceAndNonce,
@@ -282,7 +286,7 @@ loop:
snap := env.state.RevisionId()
gasPool := env.gasPool.Gas()
- _, err := w.commitTransaction(env, tx)
+ err := w.commitTransaction(env, tx)
switch {
case errors.Is(err, core.ErrNonceTooLow):
// New head notification data race between the transaction pool and miner, shift
diff --git a/miner/taiko_worker_test.go b/miner/taiko_worker_test.go
index 95bab25315f3..e978b7bc75cd 100644
--- a/miner/taiko_worker_test.go
+++ b/miner/taiko_worker_test.go
@@ -1,16 +1,39 @@
package miner
import (
+ "math/big"
"testing"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/assert"
)
-func testGenerateWorker(t *testing.T, txCount int) *worker {
+const (
+ // testCode is the testing contract binary code which will initialises some
+ // variables in constructor
+ testCode = "0x60806040527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0060005534801561003457600080fd5b5060fc806100436000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c80630c4dae8814603757806398a213cf146053575b600080fd5b603d607e565b6040518082815260200191505060405180910390f35b607c60048036036020811015606757600080fd5b81019080803590602001909291905050506084565b005b60005481565b806000819055507fe9e44f9f7da8c559de847a3232b57364adc0354f15a2cd8dc636d54396f9587a6000546040518082815260200191505060405180910390a15056fea265627a7a723058208ae31d9424f2d0bc2a3da1a5dd659db2d71ec322a17db8f87e19e209e3a1ff4a64736f6c634300050a0032"
+
+ // testGas is the gas required for contract deployment.
+ testGas = 144109
+)
+
+func newRandomTx(txPool *txpool.TxPool, creation bool) *types.Transaction {
+ var tx *types.Transaction
+ gasPrice := big.NewInt(10 * params.InitialBaseFee)
+ if creation {
+ tx, _ = types.SignTx(types.NewContractCreation(txPool.Nonce(testBankAddress), big.NewInt(0), testGas, gasPrice, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey)
+ } else {
+ tx, _ = types.SignTx(types.NewTransaction(txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(1000), params.TxGas, gasPrice, nil), types.HomesteadSigner{}, testBankKey)
+ }
+ return tx
+}
+
+func testGenerateWorker(t *testing.T, txCount int) *Miner {
t.Parallel()
var (
db = rawdb.NewMemoryDatabase()
@@ -21,11 +44,10 @@ func testGenerateWorker(t *testing.T, txCount int) *worker {
engine := clique.New(config.Clique, db)
w, b := newTestWorker(t, &config, engine, db, 0)
- //defer w.close()
for i := 0; i < txCount; i++ {
- b.txPool.Add([]*types.Transaction{b.newRandomTx(true)}, true, false)
- b.txPool.Add([]*types.Transaction{b.newRandomTx(false)}, true, false)
+ b.txPool.Add([]*types.Transaction{newRandomTx(b.txPool, true)}, true, false)
+ b.txPool.Add([]*types.Transaction{newRandomTx(b.txPool, false)}, true, false)
}
return w
@@ -33,18 +55,17 @@ func testGenerateWorker(t *testing.T, txCount int) *worker {
func TestBuildTransactionsLists(t *testing.T) {
w := testGenerateWorker(t, 1000)
- defer w.close()
maxBytesPerTxList := (params.BlobTxBytesPerFieldElement - 1) * params.BlobTxFieldElementsPerBlob
- txLst, err := w.BuildTransactionsLists(
+ txList, err := w.BuildTransactionsLists(
testBankAddress,
nil,
240_000_000,
uint64(maxBytesPerTxList),
nil,
1,
- 0)
+ )
assert.NoError(t, err)
- assert.LessOrEqual(t, 1, len(txLst))
- assert.LessOrEqual(t, txLst[0].BytesLength, uint64(maxBytesPerTxList))
+ assert.LessOrEqual(t, 1, len(txList))
+ assert.LessOrEqual(t, txList[0].BytesLength, uint64(maxBytesPerTxList))
}
diff --git a/miner/worker.go b/miner/worker.go
index eca5ffcec03a..434f698370dc 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -76,7 +76,6 @@ type newPayloadResult struct {
sidecars []*types.BlobTxSidecar // collected blobs of blob transactions
stateDB *state.StateDB // StateDB after executing the transactions
receipts []*types.Receipt // Receipts collected during construction
- requests [][]byte // Consensus layer requests collected during block construction
witness *stateless.Witness // Witness is an optional stateless proof
}
@@ -90,6 +89,8 @@ type generateParams struct {
withdrawals types.Withdrawals // List of withdrawals to include in block (shanghai field)
beaconRoot *common.Hash // The beacon root (cancun field).
noTxs bool // Flag whether an empty block without any transaction is expected
+ // CHANGE(taiko): The base fee per gas for the next block, used by the legacy Taiko blocks.
+ baseFeePerGas *big.Int
}
// generateWork generates a sealing block based on the given parameters.
@@ -116,31 +117,14 @@ func (miner *Miner) generateWork(params *generateParams, witness bool) *newPaylo
for _, r := range work.receipts {
allLogs = append(allLogs, r.Logs...)
}
-
- // Collect consensus-layer requests if Prague is enabled.
- var requests [][]byte
+ // Read requests if Prague is enabled.
if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
- // EIP-6110 deposits
- depositRequests, err := core.ParseDepositLogs(allLogs, miner.chainConfig)
+ requests, err := core.ParseDepositLogs(allLogs, miner.chainConfig)
if err != nil {
return &newPayloadResult{err: err}
}
- requests = append(requests, depositRequests)
- // create EVM for system calls
- blockContext := core.NewEVMBlockContext(work.header, miner.chain, &work.header.Coinbase)
- vmenv := vm.NewEVM(blockContext, vm.TxContext{}, work.state, miner.chainConfig, vm.Config{})
- // EIP-7002 withdrawals
- withdrawalRequests := core.ProcessWithdrawalQueue(vmenv, work.state)
- requests = append(requests, withdrawalRequests)
- // EIP-7251 consolidations
- consolidationRequests := core.ProcessConsolidationQueue(vmenv, work.state)
- requests = append(requests, consolidationRequests)
- }
- if requests != nil {
- reqHash := types.CalcRequestsHash(requests)
- work.header.RequestsHash = &reqHash
+ body.Requests = requests
}
-
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts)
if err != nil {
return &newPayloadResult{err: err}
@@ -151,7 +135,6 @@ func (miner *Miner) generateWork(params *generateParams, witness bool) *newPaylo
sidecars: work.sidecars,
stateDB: work.state,
receipts: work.receipts,
- requests: requests,
witness: work.witness,
}
}
@@ -176,10 +159,17 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
// to parent+1 if the mutation is allowed.
timestamp := genParams.timestamp
if parent.Time >= timestamp {
- if genParams.forceTime {
- return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, timestamp)
+ // CHANGE(taiko): block.timestamp == parent.timestamp is allowed in Taiko protocol.
+ if !miner.chainConfig.Taiko {
+ if genParams.forceTime {
+ return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, timestamp)
+ }
+ timestamp = parent.Time + 1
+ } else {
+ if parent.Time > timestamp {
+ return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, timestamp)
+ }
}
- timestamp = parent.Time + 1
}
// Construct the sealing block header.
header := &types.Header{
@@ -199,10 +189,14 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
}
// Set baseFee and GasLimit if we are on an EIP-1559 chain
if miner.chainConfig.IsLondon(header.Number) {
- header.BaseFee = eip1559.CalcBaseFee(miner.chainConfig, parent)
- if !miner.chainConfig.IsLondon(parent.Number) {
- parentGasLimit := parent.GasLimit * miner.chainConfig.ElasticityMultiplier()
- header.GasLimit = core.CalcGasLimit(parentGasLimit, miner.config.GasCeil)
+ if miner.chainConfig.Taiko && genParams.baseFeePerGas != nil {
+ header.BaseFee = genParams.baseFeePerGas
+ } else {
+ header.BaseFee = eip1559.CalcBaseFee(miner.chainConfig, parent)
+ if !miner.chainConfig.IsLondon(parent.Number) {
+ parentGasLimit := parent.GasLimit * miner.chainConfig.ElasticityMultiplier()
+ header.GasLimit = core.CalcGasLimit(parentGasLimit, miner.config.GasCeil)
+ }
}
}
// Run the consensus preparation with the default or customized consensus engine.
@@ -422,114 +416,6 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran
return nil
}
-// generateParams wraps various of settings for generating sealing task.
-type generateParams struct {
- timestamp uint64 // The timestamp for sealing task
- forceTime bool // Flag whether the given timestamp is immutable or not
- parentHash common.Hash // Parent block hash, empty means the latest chain head
- coinbase common.Address // The fee recipient address for including transaction
- random common.Hash // The randomness generated by beacon chain, empty before the merge
- withdrawals types.Withdrawals // List of withdrawals to include in block.
- beaconRoot *common.Hash // The beacon root (cancun field).
- noTxs bool // Flag whether an empty block without any transaction is expected
- // CHANGE(taiko): The base fee per gas for the next block, used by the legacy Taiko blocks.
- baseFeePerGas *big.Int
-}
-
-// prepareWork constructs the sealing task according to the given parameters,
-// either based on the last chain head or specified parent. In this function
-// the pending transactions are not filled yet, only the empty task returned.
-func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
- w.mu.RLock()
- defer w.mu.RUnlock()
-
- // Find the parent block for sealing task
- parent := w.chain.CurrentBlock()
- if genParams.parentHash != (common.Hash{}) {
- block := w.chain.GetBlockByHash(genParams.parentHash)
- if block == nil {
- return nil, fmt.Errorf("missing parent")
- }
- parent = block.Header()
- }
- // Sanity check the timestamp correctness, recap the timestamp
- // to parent+1 if the mutation is allowed.
- timestamp := genParams.timestamp
- if parent.Time >= timestamp {
- // CHANGE(taiko): block.timestamp == parent.timestamp is allowed in Taiko protocol.
- if !w.chainConfig.Taiko {
- if genParams.forceTime {
- return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, timestamp)
- }
- timestamp = parent.Time + 1
- } else {
- if parent.Time > timestamp {
- return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, timestamp)
- }
- }
- }
- // Construct the sealing block header.
- header := &types.Header{
- ParentHash: parent.Hash(),
- Number: new(big.Int).Add(parent.Number, common.Big1),
- GasLimit: core.CalcGasLimit(parent.GasLimit, w.config.GasCeil),
- Time: timestamp,
- Coinbase: genParams.coinbase,
- }
- // Set the extra field.
- if len(w.extra) != 0 {
- header.Extra = w.extra
- }
- // Set the randomness field from the beacon chain if it's available.
- if genParams.random != (common.Hash{}) {
- header.MixDigest = genParams.random
- }
- // Set baseFee and GasLimit if we are on an EIP-1559 chain
- if w.chainConfig.IsLondon(header.Number) {
- if w.chainConfig.Taiko && genParams.baseFeePerGas != nil {
- header.BaseFee = genParams.baseFeePerGas
- } else {
- header.BaseFee = eip1559.CalcBaseFee(w.chainConfig, parent)
- if !w.chainConfig.IsLondon(parent.Number) {
- parentGasLimit := parent.GasLimit * w.chainConfig.ElasticityMultiplier()
- header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil)
- }
- }
- }
- // Apply EIP-4844, EIP-4788.
- if w.chainConfig.IsCancun(header.Number, header.Time) {
- var excessBlobGas uint64
- if w.chainConfig.IsCancun(parent.Number, parent.Time) {
- excessBlobGas = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed)
- } else {
- // For the first post-fork block, both parent.data_gas_used and parent.excess_data_gas are evaluated as 0
- excessBlobGas = eip4844.CalcExcessBlobGas(0, 0)
- }
- header.BlobGasUsed = new(uint64)
- header.ExcessBlobGas = &excessBlobGas
- header.ParentBeaconRoot = genParams.beaconRoot
- }
- // Run the consensus preparation with the default or customized consensus engine.
- if err := w.engine.Prepare(w.chain, header); err != nil {
- log.Error("Failed to prepare header for sealing", "err", err)
- return nil, err
- }
- // Could potentially happen if starting to mine in an odd state.
- // Note genParams.coinbase can be different with header.Coinbase
- // since clique algorithm can modify the coinbase field in header.
- env, err := w.makeEnv(parent, header, genParams.coinbase)
- if err != nil {
- log.Error("Failed to create sealing context", "err", err)
- return nil, err
- }
- if header.ParentBeaconRoot != nil {
- context := core.NewEVMBlockContext(header, w.chain, nil)
- vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{})
- core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state)
- }
- return env, nil
-}
-
// fillTransactions retrieves the pending transactions from the txpool and fills them
// into the given sealing block. The transaction selection and ordering strategy can
// be customized with the plugin in the future.
diff --git a/params/config.go b/params/config.go
index 07e7bcd5cbb3..1291c8f48cdd 100644
--- a/params/config.go
+++ b/params/config.go
@@ -39,27 +39,28 @@ var (
// MainnetChainConfig is the chain parameters to run a node on the main network.
MainnetChainConfig = &ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: big.NewInt(1_150_000),
- DAOForkBlock: big.NewInt(1_920_000),
- DAOForkSupport: true,
- EIP150Block: big.NewInt(2_463_000),
- EIP155Block: big.NewInt(2_675_000),
- EIP158Block: big.NewInt(2_675_000),
- ByzantiumBlock: big.NewInt(4_370_000),
- ConstantinopleBlock: big.NewInt(7_280_000),
- PetersburgBlock: big.NewInt(7_280_000),
- IstanbulBlock: big.NewInt(9_069_000),
- MuirGlacierBlock: big.NewInt(9_200_000),
- BerlinBlock: big.NewInt(12_244_000),
- LondonBlock: big.NewInt(12_965_000),
- ArrowGlacierBlock: big.NewInt(13_773_000),
- GrayGlacierBlock: big.NewInt(15_050_000),
- TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000
- ShanghaiTime: newUint64(1681338455),
- CancunTime: newUint64(1710338135),
- DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"),
- Ethash: new(EthashConfig),
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(1_150_000),
+ DAOForkBlock: big.NewInt(1_920_000),
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(2_463_000),
+ EIP155Block: big.NewInt(2_675_000),
+ EIP158Block: big.NewInt(2_675_000),
+ ByzantiumBlock: big.NewInt(4_370_000),
+ ConstantinopleBlock: big.NewInt(7_280_000),
+ PetersburgBlock: big.NewInt(7_280_000),
+ IstanbulBlock: big.NewInt(9_069_000),
+ MuirGlacierBlock: big.NewInt(9_200_000),
+ BerlinBlock: big.NewInt(12_244_000),
+ LondonBlock: big.NewInt(12_965_000),
+ ArrowGlacierBlock: big.NewInt(13_773_000),
+ GrayGlacierBlock: big.NewInt(15_050_000),
+ TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000
+ TerminalTotalDifficultyPassed: true,
+ ShanghaiTime: newUint64(1681338455),
+ CancunTime: newUint64(1710338135),
+ DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"),
+ Ethash: new(EthashConfig),
}
// HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network.
HoleskyChainConfig = &ChainConfig{
@@ -87,27 +88,28 @@ var (
}
// SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network.
SepoliaChainConfig = &ChainConfig{
- ChainID: big.NewInt(11155111),
- HomesteadBlock: big.NewInt(0),
- DAOForkBlock: nil,
- DAOForkSupport: true,
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- BerlinBlock: big.NewInt(0),
- LondonBlock: big.NewInt(0),
- ArrowGlacierBlock: nil,
- GrayGlacierBlock: nil,
- TerminalTotalDifficulty: big.NewInt(17_000_000_000_000_000),
- MergeNetsplitBlock: big.NewInt(1735371),
- ShanghaiTime: newUint64(1677557088),
- CancunTime: newUint64(1706655072),
- Ethash: new(EthashConfig),
+ ChainID: big.NewInt(11155111),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: nil,
+ GrayGlacierBlock: nil,
+ TerminalTotalDifficulty: big.NewInt(17_000_000_000_000_000),
+ TerminalTotalDifficultyPassed: true,
+ MergeNetsplitBlock: big.NewInt(1735371),
+ ShanghaiTime: newUint64(1677557088),
+ CancunTime: newUint64(1706655072),
+ Ethash: new(EthashConfig),
}
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Ethash consensus.
@@ -139,24 +141,24 @@ var (
}
AllDevChainProtocolChanges = &ChainConfig{
- ChainID: big.NewInt(1337),
- HomesteadBlock: big.NewInt(0),
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- BerlinBlock: big.NewInt(0),
- LondonBlock: big.NewInt(0),
- ArrowGlacierBlock: big.NewInt(0),
- GrayGlacierBlock: big.NewInt(0),
- ShanghaiTime: newUint64(0),
- CancunTime: newUint64(0),
- TerminalTotalDifficulty: big.NewInt(0),
- PragueTime: newUint64(0),
+ ChainID: big.NewInt(1337),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ GrayGlacierBlock: big.NewInt(0),
+ ShanghaiTime: newUint64(0),
+ CancunTime: newUint64(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ TerminalTotalDifficultyPassed: true,
}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
@@ -335,6 +337,13 @@ type ChainConfig struct {
// the network that triggers the consensus upgrade.
TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"`
+ // TerminalTotalDifficultyPassed is a flag specifying that the network already
+ // passed the terminal total difficulty. Its purpose is to disable legacy sync
+ // even without having seen the TTD locally (safer long term).
+ //
+ // TODO(karalabe): Drop this field eventually (always assuming PoS mode)
+ TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"`
+
DepositContractAddress common.Address `json:"depositContractAddress,omitempty"`
// Various consensus engines
@@ -548,6 +557,11 @@ func (c *ChainConfig) IsOntake(num *big.Int) bool {
return isBlockForked(c.OntakeBlock, num)
}
+// IsEIP4762 returns whether eip 4762 has been activated at given block.
+func (c *ChainConfig) IsEIP4762(num *big.Int, time uint64) bool {
+ return c.IsVerkle(num, time)
+}
+
// CheckCompatible checks whether scheduled fork transitions have been imported
// with a mismatching chain configuration.
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError {
diff --git a/params/config_test.go b/params/config_test.go
index f658c336dc3a..8d79f92506c1 100644
--- a/params/config_test.go
+++ b/params/config_test.go
@@ -23,6 +23,7 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/stretchr/testify/require"
)
diff --git a/params/protocol_params.go b/params/protocol_params.go
index d65a4747a090..d56c7373c620 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -175,7 +175,8 @@ const (
BlobTxTargetBlobGasPerBlock = 3 * BlobTxBlobGasPerBlob // Target consumable blob gas for data blobs per block (for 1559-like pricing)
MaxBlobGasPerBlock = 6 * BlobTxBlobGasPerBlob // Maximum consumable blob gas for data blobs per block
- BlobTxHashVersion = 0x01 // CHANGE(taiko): Version byte of the commitment hash
+ BlobTxHashVersion = 0x01 // CHANGE(taiko): Version byte of the commitment hash
+ HistoryServeWindow = 8192 // Number of blocks to serve historical block hashes for, EIP-2935.
)
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
@@ -187,26 +188,17 @@ var (
GenesisDifficulty = big.NewInt(131072) // Difficulty of the Genesis block.
MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be.
DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
-)
-
-// System contracts.
-var (
- // SystemAddress is where the system-transaction is sent from as per EIP-4788
- SystemAddress = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe")
- // EIP-4788 - Beacon block root in the EVM
+ // BeaconRootsAddress is the address where historical beacon roots are stored as per EIP-4788
BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
- BeaconRootsCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
- // EIP-2935 - Serve historical block hashes from state
- HistoryStorageAddress = common.HexToAddress("0x0aae40965e6800cd9b1f4b05ff21581047e3f91e")
- HistoryStorageCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460575767ffffffffffffffff5f3511605357600143035f3511604b575f35612000014311604b57611fff5f3516545f5260205ff35b5f5f5260205ff35b5f5ffd5b5f35611fff60014303165500")
-
- // EIP-7002 - Execution layer triggerable withdrawals
- WithdrawalQueueAddress = common.HexToAddress("0x09Fc772D0857550724b07B850a4323f39112aAaA")
- WithdrawalQueueCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460c7573615156028575f545f5260205ff35b36603814156101f05760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f057600182026001905f5b5f821115608057810190830284830290049160010191906065565b9093900434106101f057600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160db575060105b5f5b81811461017f5780604c02838201600302600401805490600101805490600101549160601b83528260140152807fffffffffffffffffffffffffffffffff0000000000000000000000000000000016826034015260401c906044018160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160dd565b9101809214610191579060025561019c565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101c957505f5b6001546002828201116101de5750505f6101e4565b01600290035b5f555f600155604c025ff35b5f5ffd")
+ // BeaconRootsCode is the code where historical beacon roots are stored as per EIP-4788
+ BeaconRootsCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
- // EIP-7251 - Increase the MAX_EFFECTIVE_BALANCE
- ConsolidationQueueAddress = common.HexToAddress("0x01aBEa29659e5e97C95107F20bb753cD3e09bBBb")
- ConsolidationQueueCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460cf573615156028575f545f5260205ff35b366060141561019a5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f821115608057810190830284830290049160010191906065565b90939004341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060011160e3575060015b5f5b8181146101295780607402838201600402600401805490600101805490600101805490600101549260601b84529083601401528260340152906054015260010160e5565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd")
+ // SystemAddress is where the system-transaction is sent from as per EIP-4788
+ SystemAddress = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe")
+ // HistoryStorageAddress is where the historical block hashes are stored.
+ HistoryStorageAddress = common.HexToAddress("0x0aae40965e6800cd9b1f4b05ff21581047e3f91e")
+ // HistoryStorageCode is the code with getters for historical block hashes.
+ HistoryStorageCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460575767ffffffffffffffff5f3511605357600143035f3511604b575f35612000014311604b57611fff5f3516545f5260205ff35b5f5f5260205ff35b5f5ffd5b5f35611fff60014303165500")
)
diff --git a/params/taiko_config.go b/params/taiko_config.go
index b94a9c786ccc..6908471d81b0 100644
--- a/params/taiko_config.go
+++ b/params/taiko_config.go
@@ -35,7 +35,6 @@ var networkIDToChainConfig = map[*big.Int]*ChainConfig{
HeklaNetworkID: TaikoChainConfig,
MainnetChainConfig.ChainID: MainnetChainConfig,
SepoliaChainConfig.ChainID: SepoliaChainConfig,
- GoerliChainConfig.ChainID: GoerliChainConfig,
TestChainConfig.ChainID: TestChainConfig,
NonActivatedConfig.ChainID: NonActivatedConfig,
}
diff --git a/params/taiko_config_test.go b/params/taiko_config_test.go
index 713cb1e4ee43..f82864732500 100644
--- a/params/taiko_config_test.go
+++ b/params/taiko_config_test.go
@@ -71,11 +71,6 @@ func TestNetworkIDToChainConfigOrDefault(t *testing.T) {
SepoliaChainConfig.ChainID,
SepoliaChainConfig,
},
- {
- "goerli",
- GoerliChainConfig.ChainID,
- GoerliChainConfig,
- },
{
"doesntExist",
big.NewInt(89390218390),
diff --git a/params/version.go b/params/version.go
index a2c258ff58cb..b5c0dff7c0aa 100644
--- a/params/version.go
+++ b/params/version.go
@@ -22,8 +22,8 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
- VersionMinor = 13 // Minor version component of the current release
- VersionPatch = 15 // Patch version component of the current release
+ VersionMinor = 14 // Minor version component of the current release
+ VersionPatch = 11 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 6c1a4f8f6c00..eac92ff54c8c 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -568,8 +568,6 @@ func (b *unsubscribeBlocker) readBatch() ([]*jsonrpcMessage, bool, error) {
// not respond.
// It reproducers the issue https://github.com/ethereum/go-ethereum/issues/30156
func TestUnsubscribeTimeout(t *testing.T) {
- t.Parallel()
-
srv := NewServer()
srv.RegisterName("nftest", new(notificationTestService))
diff --git a/signer/core/apitypes/signed_data_internal_test.go b/signer/core/apitypes/signed_data_internal_test.go
index 1a14b35ef242..70d6a5858d65 100644
--- a/signer/core/apitypes/signed_data_internal_test.go
+++ b/signer/core/apitypes/signed_data_internal_test.go
@@ -250,42 +250,45 @@ func TestConvertAddressDataToSlice(t *testing.T) {
func TestTypedDataArrayValidate(t *testing.T) {
t.Parallel()
- type testDataInput struct {
- Name string `json:"name"`
- Domain TypedDataDomain `json:"domain"`
- PrimaryType string `json:"primaryType"`
- Types Types `json:"types"`
- Message TypedDataMessage `json:"data"`
- Digest string `json:"digest"`
+ typedData := TypedData{
+ Types: Types{
+ "BulkOrder": []Type{
+ // Should be able to accept fixed size arrays
+ {Name: "tree", Type: "OrderComponents[2][2]"},
+ },
+ "OrderComponents": []Type{
+ {Name: "offerer", Type: "address"},
+ {Name: "amount", Type: "uint8"},
+ },
+ "EIP712Domain": []Type{
+ {Name: "name", Type: "string"},
+ {Name: "version", Type: "string"},
+ {Name: "chainId", Type: "uint8"},
+ {Name: "verifyingContract", Type: "address"},
+ },
+ },
+ PrimaryType: "BulkOrder",
+ Domain: TypedDataDomain{
+ VerifyingContract: "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC",
+ },
+ Message: TypedDataMessage{},
}
- fc, err := os.ReadFile("./testdata/typed-data.json")
- require.NoError(t, err, "error reading test data file")
-
- var tests []testDataInput
- err = json.Unmarshal(fc, &tests)
- require.NoError(t, err, "error unmarshalling test data file contents")
-
- for _, tc := range tests {
- t.Run(tc.Name, func(t *testing.T) {
- t.Parallel()
- td := TypedData{
- Types: tc.Types,
- PrimaryType: tc.PrimaryType,
- Domain: tc.Domain,
- Message: tc.Message,
- }
+ if err := typedData.validate(); err != nil {
+ t.Errorf("expected typed data to pass validation, got: %v", err)
+ }
- domainSeparator, tErr := td.HashStruct("EIP712Domain", td.Domain.Map())
- assert.NoError(t, tErr, "failed to hash domain separator: %v", tErr)
+ // Should be able to accept dynamic arrays
+ typedData.Types["BulkOrder"][0].Type = "OrderComponents[]"
- messageHash, tErr := td.HashStruct(td.PrimaryType, td.Message)
- assert.NoError(t, tErr, "failed to hash message: %v", tErr)
+ if err := typedData.validate(); err != nil {
+ t.Errorf("expected typed data to pass validation, got: %v", err)
+ }
- digest := crypto.Keccak256Hash([]byte(fmt.Sprintf("%s%s%s", "\x19\x01", string(domainSeparator), string(messageHash))))
- assert.Equal(t, tc.Digest, digest.String(), "digest doesn't not match")
+ // Should be able to accept standard types
+ typedData.Types["BulkOrder"][0].Type = "OrderComponents"
- assert.NoError(t, td.validate(), "validation failed", tErr)
- })
+ if err := typedData.validate(); err != nil {
+ t.Errorf("expected typed data to pass validation, got: %v", err)
}
}
diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go
index 2ae182279a84..bb30d47d05a2 100644
--- a/signer/core/apitypes/types.go
+++ b/signer/core/apitypes/types.go
@@ -335,7 +335,11 @@ func (t *Type) isArray() bool {
// typeName returns the canonical name of the type. If the type is 'Person[]' or 'Person[2]', then
// this method returns 'Person'
func (t *Type) typeName() string {
- return strings.Split(t.Type, "[")[0]
+ if strings.Contains(t.Type, "[") {
+ re := regexp.MustCompile(`\[\d*\]`)
+ return re.ReplaceAllString(t.Type, "")
+ }
+ return t.Type
}
type Types map[string][]Type
@@ -386,7 +390,7 @@ func (typedData *TypedData) HashStruct(primaryType string, data TypedDataMessage
// Dependencies returns an array of custom types ordered by their hierarchical reference tree
func (typedData *TypedData) Dependencies(primaryType string, found []string) []string {
- primaryType = strings.Split(primaryType, "[")[0]
+ primaryType = strings.TrimSuffix(primaryType, "[]")
if slices.Contains(found, primaryType) {
return found
@@ -887,8 +891,7 @@ func init() {
// Checks if the primitive value is valid
func isPrimitiveTypeValid(primitiveType string) bool {
- input := strings.Split(primitiveType, "[")[0]
- _, ok := validPrimitiveTypes[input]
+ _, ok := validPrimitiveTypes[primitiveType]
return ok
}
diff --git a/signer/core/apitypes/types_test.go b/signer/core/apitypes/types_test.go
index 22bbeba19ec4..796e02ba73df 100644
--- a/signer/core/apitypes/types_test.go
+++ b/signer/core/apitypes/types_test.go
@@ -142,94 +142,3 @@ func TestBlobTxs(t *testing.T) {
}
t.Logf("tx %v", string(data))
}
-
-func TestType_IsArray(t *testing.T) {
- t.Parallel()
- // Expected positives
- for i, tc := range []Type{
- {
- Name: "type1",
- Type: "int24[]",
- },
- {
- Name: "type2",
- Type: "int24[2]",
- },
- {
- Name: "type3",
- Type: "int24[2][2][2]",
- },
- } {
- if !tc.isArray() {
- t.Errorf("test %d: expected '%v' to be an array", i, tc)
- }
- }
- // Expected negatives
- for i, tc := range []Type{
- {
- Name: "type1",
- Type: "int24",
- },
- {
- Name: "type2",
- Type: "uint88",
- },
- {
- Name: "type3",
- Type: "bytes32",
- },
- } {
- if tc.isArray() {
- t.Errorf("test %d: expected '%v' to not be an array", i, tc)
- }
- }
-}
-
-func TestType_TypeName(t *testing.T) {
- t.Parallel()
-
- for i, tc := range []struct {
- Input Type
- Expected string
- }{
- {
- Input: Type{
- Name: "type1",
- Type: "int24[]",
- },
- Expected: "int24",
- },
- {
- Input: Type{
- Name: "type2",
- Type: "int26[2][2][2]",
- },
- Expected: "int26",
- },
- {
- Input: Type{
- Name: "type3",
- Type: "int24",
- },
- Expected: "int24",
- },
- {
- Input: Type{
- Name: "type4",
- Type: "uint88",
- },
- Expected: "uint88",
- },
- {
- Input: Type{
- Name: "type5",
- Type: "bytes32[2]",
- },
- Expected: "bytes32",
- },
- } {
- if tc.Input.typeName() != tc.Expected {
- t.Errorf("test %d: expected typeName value of '%v' but got '%v'", i, tc.Expected, tc.Input)
- }
- }
-}
diff --git a/tests/fuzzers/bls12381/bls12381_fuzz.go b/tests/fuzzers/bls12381/bls12381_fuzz.go
index a3e0e9f72b0a..997946f7d2fa 100644
--- a/tests/fuzzers/bls12381/bls12381_fuzz.go
+++ b/tests/fuzzers/bls12381/bls12381_fuzz.go
@@ -31,33 +31,42 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-381/fp"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/ethereum/go-ethereum/common"
+ bls12381 "github.com/kilic/bls12-381"
blst "github.com/supranational/blst/bindings/go"
)
func fuzzG1SubgroupChecks(data []byte) int {
input := bytes.NewReader(data)
- cpG1, blG1, err := getG1Points(input)
+ kpG1, cpG1, blG1, err := getG1Points(input)
if err != nil {
return 0
}
+ inSubGroupKilic := bls12381.NewG1().InCorrectSubgroup(kpG1)
inSubGroupGnark := cpG1.IsInSubGroup()
inSubGroupBLST := blG1.InG1()
- if inSubGroupGnark != inSubGroupBLST {
- panic(fmt.Sprintf("differing subgroup check, gnark %v, blst %v", inSubGroupGnark, inSubGroupBLST))
+ if inSubGroupKilic != inSubGroupGnark {
+ panic(fmt.Sprintf("differing subgroup check, kilic %v, gnark %v", inSubGroupKilic, inSubGroupGnark))
+ }
+ if inSubGroupKilic != inSubGroupBLST {
+ panic(fmt.Sprintf("differing subgroup check, kilic %v, blst %v", inSubGroupKilic, inSubGroupBLST))
}
return 1
}
func fuzzG2SubgroupChecks(data []byte) int {
input := bytes.NewReader(data)
- gpG2, blG2, err := getG2Points(input)
+ kpG2, cpG2, blG2, err := getG2Points(input)
if err != nil {
return 0
}
- inSubGroupGnark := gpG2.IsInSubGroup()
+ inSubGroupKilic := bls12381.NewG2().InCorrectSubgroup(kpG2)
+ inSubGroupGnark := cpG2.IsInSubGroup()
inSubGroupBLST := blG2.InG2()
- if inSubGroupGnark != inSubGroupBLST {
- panic(fmt.Sprintf("differing subgroup check, gnark %v, blst %v", inSubGroupGnark, inSubGroupBLST))
+ if inSubGroupKilic != inSubGroupGnark {
+ panic(fmt.Sprintf("differing subgroup check, kilic %v, gnark %v", inSubGroupKilic, inSubGroupGnark))
+ }
+ if inSubGroupKilic != inSubGroupBLST {
+ panic(fmt.Sprintf("differing subgroup check, kilic %v, blst %v", inSubGroupKilic, inSubGroupBLST))
}
return 1
}
@@ -77,6 +86,11 @@ func fuzzCrossPairing(data []byte) int {
return 0
}
+ // compute pairing using geth
+ engine := bls12381.NewEngine()
+ engine.AddPair(kpG1, kpG2)
+ kResult := engine.Result()
+
// compute pairing using gnark
cResult, err := gnark.Pair([]gnark.G1Affine{*cpG1}, []gnark.G2Affine{*cpG2})
if err != nil {
@@ -177,6 +191,7 @@ func fuzzCrossG2Add(data []byte) int {
func fuzzCrossG1MultiExp(data []byte) int {
var (
input = bytes.NewReader(data)
+ gethScalars []*bls12381.Fr
gnarkScalars []fr.Element
gnarkPoints []gnark.G1Affine
blstScalars []*blst.Scalar
@@ -194,6 +209,10 @@ func fuzzCrossG1MultiExp(data []byte) int {
if err != nil {
break
}
+ gethScalars = append(gethScalars, bls12381.NewFr().FromBytes(s.Bytes()))
+ var gnarkScalar = &fr.Element{}
+ gnarkScalar = gnarkScalar.SetBigInt(s)
+ gnarkScalars = append(gnarkScalars, *gnarkScalar)
gnarkScalar := new(fr.Element).SetBigInt(s)
gnarkScalars = append(gnarkScalars, *gnarkScalar)
diff --git a/tests/fuzzers/bls12381/bls12381_test.go b/tests/fuzzers/bls12381/bls12381_test.go
index d4e5e20e04f7..3ad6f5f14d4a 100644
--- a/tests/fuzzers/bls12381/bls12381_test.go
+++ b/tests/fuzzers/bls12381/bls12381_test.go
@@ -116,15 +116,3 @@ func FuzzG2SubgroupChecks(f *testing.F) {
fuzzG2SubgroupChecks(data)
})
}
-
-func FuzzG2Mul(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- fuzz(blsG2Mul, data)
- })
-}
-
-func FuzzG1Mul(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- fuzz(blsG1Mul, data)
- })
-}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index af2cb63d9461..53472f0e4561 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -228,10 +228,10 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo
// RunNoVerify runs a specific subtest and returns the statedb and post-state root.
// Remember to call state.Close after verifying the test result!
-func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (st StateTestState, root common.Hash, gasUsed uint64, err error) {
+func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (st StateTestState, root common.Hash, err error) {
config, eips, err := GetChainConfig(subtest.Fork)
if err != nil {
- return st, common.Hash{}, 0, UnsupportedForkError{subtest.Fork}
+ return st, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
@@ -250,7 +250,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post, baseFee)
if err != nil {
- return st, common.Hash{}, 0, err
+ return st, common.Hash{}, err
}
{ // Blob transactions may be present after the Cancun fork.
@@ -260,7 +260,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
// Here, we just do this shortcut smaller fix, since state tests do not
// utilize those codepaths
if len(msg.BlobHashes)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
- return st, common.Hash{}, 0, errors.New("blob gas exceeds maximum")
+ return st, common.Hash{}, errors.New("blob gas exceeds maximum")
}
}
@@ -269,10 +269,10 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
var ttx types.Transaction
err := ttx.UnmarshalBinary(post.TxBytes)
if err != nil {
- return st, common.Hash{}, 0, err
+ return st, common.Hash{}, err
}
if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil {
- return st, common.Hash{}, 0, err
+ return st, common.Hash{}, err
}
}
@@ -308,7 +308,6 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
if tracer := evm.Config.Tracer; tracer != nil && tracer.OnTxEnd != nil {
evm.Config.Tracer.OnTxEnd(nil, err)
}
- return st, common.Hash{}, 0, err
}
// Add 0-value mining reward. This only makes a difference in the cases
// where
@@ -323,7 +322,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
receipt := &types.Receipt{GasUsed: vmRet.UsedGas}
tracer.OnTxEnd(receipt, nil)
}
- return st, root, vmRet.UsedGas, nil
+ return st, root, err
}
func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index f53b10758f31..f7b0ec5942fc 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -61,7 +61,7 @@ func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db da
// StateTrie is not safe for concurrent use.
type StateTrie struct {
trie Trie
- db database.NodeDatabase
+ db database.Database
preimages preimageStore
hashKeyBuf [common.HashLength]byte
secKeyCache map[string][]byte
diff --git a/trie/trie.go b/trie/trie.go
index e3f3f39248d9..da7872dbe04a 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -648,9 +648,7 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
for _, path := range t.tracer.deletedNodes() {
nodes.AddNode([]byte(path), trienode.NewDeleted())
}
- // If the number of changes is below 100, we let one thread handle it
- t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root, t.uncommitted > 100)
- t.uncommitted = 0
+ t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root)
return rootHash, nodes
}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index 047a7a4bd85d..7debe6ecbc4c 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -153,15 +153,6 @@ func (set *NodeSet) Size() (int, int) {
return set.updates, set.deletes
}
-// HashSet returns a set of trie nodes keyed by node hash.
-func (set *NodeSet) HashSet() map[common.Hash][]byte {
- ret := make(map[common.Hash][]byte, len(set.Nodes))
- for _, n := range set.Nodes {
- ret[n.Hash] = n.Blob
- }
- return ret
-}
-
// Summary returns a string-representation of the NodeSet.
func (set *NodeSet) Summary() string {
var out = new(strings.Builder)
diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go
index 4294f5e4be11..e165ba3479e7 100644
--- a/trie/utils/verkle.go
+++ b/trie/utils/verkle.go
@@ -200,7 +200,7 @@ func CodeChunkKey(address []byte, chunk *uint256.Int) []byte {
return GetTreeKey(address, treeIndex, subIndex)
}
-func StorageIndex(storageKey []byte) (*uint256.Int, byte) {
+func StorageIndex(bytes []byte) (*uint256.Int, byte) {
// If the storage slot is in the header, we need to add the header offset.
var key uint256.Int
key.SetBytes(storageKey)
diff --git a/trie/verkle.go b/trie/verkle.go
index 2e4d62cd1041..e09811e0dd92 100644
--- a/trie/verkle.go
+++ b/trie/verkle.go
@@ -241,6 +241,44 @@ func (t *VerkleTrie) RollBackAccount(addr common.Address) error {
return nil
}
+// RollBackAccount removes the account info + code from the tree, unlike DeleteAccount
+// that will overwrite it with 0s. The first 64 storage slots are also removed.
+func (t *VerkleTrie) RollBackAccount(addr common.Address) error {
+ var (
+ evaluatedAddr = t.cache.Get(addr.Bytes())
+ basicDataKey = utils.BasicDataKeyWithEvaluatedAddress(evaluatedAddr)
+ )
+ basicDataBytes, err := t.root.Get(basicDataKey, t.nodeResolver)
+ if err != nil {
+ return fmt.Errorf("rollback: error finding code size: %w", err)
+ }
+ if len(basicDataBytes) == 0 {
+ return errors.New("rollback: basic data is not existent")
+ }
+ // The code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present
+ // before the code size to support bigger integers in the future.
+ // LittleEndian.Uint32(...) expects 4-bytes, so we need to shift the offset 1-byte to the left.
+ codeSize := binary.BigEndian.Uint32(basicDataBytes[utils.BasicDataCodeSizeOffset-1:])
+
+ // Delete the account header + first 64 slots + first 128 code chunks
+ _, err = t.root.(*verkle.InternalNode).DeleteAtStem(basicDataKey[:31], t.nodeResolver)
+ if err != nil {
+ return fmt.Errorf("error rolling back account header: %w", err)
+ }
+
+ // Delete all further code
+ for i, chunknr := uint64(31*128), uint64(128); i < uint64(codeSize); i, chunknr = i+31*256, chunknr+256 {
+ // evaluate group key at the start of a new group
+ offset := uint256.NewInt(chunknr)
+ key := utils.CodeChunkKeyWithEvaluatedAddress(evaluatedAddr, offset)
+
+ if _, err = t.root.(*verkle.InternalNode).DeleteAtStem(key[:], t.nodeResolver); err != nil {
+ return fmt.Errorf("error deleting code chunk stem (addr=%x, offset=%d) error: %w", addr[:], offset, err)
+ }
+ }
+ return nil
+}
+
// DeleteStorage implements state.Trie, deleting the specified storage slot from
// the trie. If the storage slot was not existent in the trie, no error will be
// returned. If the trie is corrupted, an error will be returned.
@@ -312,19 +350,21 @@ func (t *VerkleTrie) IsVerkle() bool {
// Proof builds and returns the verkle multiproof for keys, built against
// the pre tree. The post tree is passed in order to add the post values
// to that proof.
-func (t *VerkleTrie) Proof(posttrie *VerkleTrie, keys [][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) {
+func (t *VerkleTrie) Proof(posttrie *VerkleTrie, keys [][]byte, resolver verkle.NodeResolverFn) (*verkle.VerkleProof, verkle.StateDiff, error) {
var postroot verkle.VerkleNode
if posttrie != nil {
postroot = posttrie.root
}
- proof, _, _, _, err := verkle.MakeVerkleMultiProof(t.root, postroot, keys, t.FlatdbNodeResolver)
+ proof, _, _, _, err := verkle.MakeVerkleMultiProof(t.root, postroot, keys, resolver)
if err != nil {
return nil, nil, err
}
+
p, kvps, err := verkle.SerializeProof(proof)
if err != nil {
return nil, nil, err
}
+
return p, kvps, nil
}
diff --git a/triedb/database.go b/triedb/database.go
index d3eca57b54dd..618a154bc723 100644
--- a/triedb/database.go
+++ b/triedb/database.go
@@ -56,10 +56,6 @@ var VerkleDefaults = &Config{
// backend defines the methods needed to access/update trie nodes in different
// state scheme.
type backend interface {
- // NodeReader returns a reader for accessing trie nodes within the specified state.
- // An error will be returned if the specified state is not available.
- NodeReader(root common.Hash) (database.NodeReader, error)
-
// Initialized returns an indicator if the state data is already initialized
// according to the state scheme.
Initialized(genesisRoot common.Hash) bool
@@ -77,6 +73,10 @@ type backend interface {
// Close closes the trie database backend and releases all held resources.
Close() error
+
+ // Reader returns a reader for accessing all trie nodes with provided state
+ // root. An error will be returned if the requested state is not available.
+ Reader(root common.Hash) (database.Reader, error)
}
// Database is the wrapper of the underlying backend which is shared by different
@@ -116,10 +116,10 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
return db
}
-// NodeReader returns a reader for accessing trie nodes within the specified state.
-// An error will be returned if the specified state is not available.
-func (db *Database) NodeReader(blockRoot common.Hash) (database.NodeReader, error) {
- return db.backend.NodeReader(blockRoot)
+// Reader returns a reader for accessing all trie nodes with provided state root.
+// An error will be returned if the requested state is not available.
+func (db *Database) Reader(blockRoot common.Hash) (database.Reader, error) {
+ return db.backend.Reader(blockRoot)
}
// Update performs a state transition by committing dirty nodes contained in the
diff --git a/triedb/database/database.go b/triedb/database/database.go
index cd7ec1d9314e..86cc8c234c68 100644
--- a/triedb/database/database.go
+++ b/triedb/database/database.go
@@ -16,10 +16,7 @@
package database
-import (
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
-)
+import "github.com/ethereum/go-ethereum/common"
// NodeReader wraps the Node method of a backing trie reader.
type NodeReader interface {
@@ -32,9 +29,9 @@ type NodeReader interface {
Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
-// NodeDatabase wraps the methods of a backing trie store.
-type NodeDatabase interface {
- // NodeReader returns a node reader associated with the specific state.
+// Database wraps the methods of a backing trie store.
+type Database interface {
+ // Reader returns a node reader associated with the specific state.
// An error will be returned if the specified state is not available.
NodeReader(stateRoot common.Hash) (NodeReader, error)
}
diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go
index 5de7805c3144..38495c0afa33 100644
--- a/triedb/hashdb/database.go
+++ b/triedb/hashdb/database.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
"github.com/ethereum/go-ethereum/triedb/database"
)
@@ -615,9 +616,9 @@ func (db *Database) Close() error {
return nil
}
-// NodeReader returns a reader for accessing trie nodes within the specified state.
-// An error will be returned if the specified state is not available.
-func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
+// Reader retrieves a node reader belonging to the given state root.
+// An error will be returned if the requested state is not available.
+func (db *Database) Reader(root common.Hash) (database.Reader, error) {
if _, err := db.node(root); err != nil {
return nil, fmt.Errorf("state %#x is not available, %v", root, err)
}
diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index 48d46c7b0899..4fd62aff0810 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -56,6 +56,11 @@ var (
maxDiffLayers = 128
)
+var (
+ // maxDiffLayers is the maximum diff layers allowed in the layer tree.
+ maxDiffLayers = 128
+)
+
// layer is the interface implemented by all state layers which includes some
// public methods and some additional methods for internal usage.
type layer interface {
@@ -63,9 +68,7 @@ type layer interface {
// if the read operation exits abnormally. Specifically, if the layer is
// already stale.
//
- // Note:
- // - the returned node is not a copy, please don't modify it.
- // - no error will be returned if the requested node is not found in database.
+ // Note, no error will be returned if the requested node is not found in database.
node(owner common.Hash, path []byte, depth int) ([]byte, common.Hash, *nodeLoc, error)
// rootHash returns the root hash for which this layer was made.
@@ -145,15 +148,15 @@ type Database struct {
// readOnly is the flag whether the mutation is allowed to be applied.
// It will be set automatically when the database is journaled during
// the shutdown to reject all following unexpected mutations.
- readOnly bool // Flag if database is opened in read only mode
- waitSync bool // Flag if database is deactivated due to initial state sync
- isVerkle bool // Flag if database is used for verkle tree
-
- config *Config // Configuration for database
- diskdb ethdb.Database // Persistent storage for matured trie nodes
- tree *layerTree // The group for all known layers
- freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests
- lock sync.RWMutex // Lock to prevent mutations from happening at the same time
+ readOnly bool // Flag if database is opened in read only mode
+ waitSync bool // Flag if database is deactivated due to initial state sync
+ isVerkle bool // Flag if database is used for verkle tree
+ bufferSize int // Memory allowance (in bytes) for caching dirty nodes
+ config *Config // Configuration for database
+ diskdb ethdb.Database // Persistent storage for matured trie nodes
+ tree *layerTree // The group for all known layers
+ freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests
+ lock sync.RWMutex // Lock to prevent mutations from happening at the same time
}
// New attempts to load an already existing layer from a persistent key-value
@@ -174,10 +177,11 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database {
diskdb = rawdb.NewTable(diskdb, string(rawdb.VerklePrefix))
}
db := &Database{
- readOnly: config.ReadOnly,
- isVerkle: isVerkle,
- config: config,
- diskdb: diskdb,
+ readOnly: config.ReadOnly,
+ isVerkle: isVerkle,
+ bufferSize: config.DirtyCacheSize,
+ config: config,
+ diskdb: diskdb,
}
// Construct the layer tree by resolving the in-disk singleton state
// and in-memory layer journal.
@@ -186,7 +190,7 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database {
// Repair the state history, which might not be aligned with the state
// in the key-value store due to an unclean shutdown.
if err := db.repairHistory(); err != nil {
- log.Crit("Failed to repair state history", "err", err)
+ log.Crit("Failed to repair pathdb", "err", err)
}
// Disable database in case node is still in the initial state sync stage.
if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly {
@@ -194,11 +198,6 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database {
log.Crit("Failed to disable database", "err", err) // impossible to happen
}
}
- fields := config.fields()
- if db.isVerkle {
- fields = append(fields, "verkle", true)
- }
- log.Info("Initialized path database", fields...)
return db
}
@@ -495,6 +494,19 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
return inited
}
+// SetBufferSize sets the node buffer size to the provided value(in bytes).
+func (db *Database) SetBufferSize(size int) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ if size > maxBufferSize {
+ log.Info("Capped node buffer size", "provided", common.StorageSize(size), "adjusted", common.StorageSize(maxBufferSize))
+ size = maxBufferSize
+ }
+ db.bufferSize = size
+ return db.tree.bottom().setBufferSize(db.bufferSize)
+}
+
// modifyAllowed returns the indicator if mutation is allowed. This function
// assumes the db.lock is already held.
func (db *Database) modifyAllowed() error {
diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go
index 61e0b0928e36..95360ee67a1d 100644
--- a/triedb/pathdb/database_test.go
+++ b/triedb/pathdb/database_test.go
@@ -107,9 +107,9 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
db = New(disk, &Config{
- StateHistory: historyLimit,
- CleanCacheSize: 16 * 1024,
- WriteBufferSize: 16 * 1024,
+ StateHistory: historyLimit,
+ CleanCacheSize: 16 * 1024,
+ DirtyCacheSize: 16 * 1024,
}, false)
obj = &tester{
db: db,
@@ -309,7 +309,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
delete(t.storages, addrHash)
}
}
- return root, ctx.nodes, NewStateSetWithOrigin(ctx.accountOrigin, ctx.storageOrigin)
+ return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin)
}
// lastHash returns the latest root hash, or empty if nothing is cached.
diff --git a/triedb/pathdb/difflayer.go b/triedb/pathdb/difflayer.go
index ecc318614f5c..d923e597baf7 100644
--- a/triedb/pathdb/difflayer.go
+++ b/triedb/pathdb/difflayer.go
@@ -87,10 +87,13 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
// If the trie node is known locally, return it
n, ok := dl.nodes.node(owner, path)
if ok {
- dirtyNodeHitMeter.Mark(1)
- dirtyNodeHitDepthHist.Update(int64(depth))
- dirtyNodeReadMeter.Mark(int64(len(n.Blob)))
- return n.Blob, n.Hash, &nodeLoc{loc: locDiffLayer, depth: depth}, nil
+ n, ok := subset[string(path)]
+ if ok {
+ dirtyHitMeter.Mark(1)
+ dirtyNodeHitDepthHist.Update(int64(depth))
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
+ return n.Blob, n.Hash, &nodeLoc{loc: locDiffLayer, depth: depth}, nil
+ }
}
// Trie node unknown to this layer, resolve from parent
return dl.parent.node(owner, path, depth+1)
diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go
index 61e8b4e0641d..45ef25d1752b 100644
--- a/triedb/pathdb/difflayer_test.go
+++ b/triedb/pathdb/difflayer_test.go
@@ -30,7 +30,7 @@ import (
func emptyLayer() *diskLayer {
return &diskLayer{
db: New(rawdb.NewMemoryDatabase(), nil, false),
- buffer: newBuffer(defaultBufferSize, nil, 0),
+ buffer: newNodeBuffer(DefaultBufferSize, nil, 0),
}
}
diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
index edbe63096853..b5e860cfcc85 100644
--- a/triedb/pathdb/disklayer.go
+++ b/triedb/pathdb/disklayer.go
@@ -25,6 +25,8 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
)
// diskLayer is a low level persistent layer built on top of a key-value store.
@@ -106,22 +108,22 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
// layer as stale.
n, found := dl.buffer.node(owner, path)
if found {
- dirtyNodeHitMeter.Mark(1)
- dirtyNodeReadMeter.Mark(int64(len(n.Blob)))
+ dirtyHitMeter.Mark(1)
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
dirtyNodeHitDepthHist.Update(int64(depth))
return n.Blob, n.Hash, &nodeLoc{loc: locDirtyCache, depth: depth}, nil
}
- dirtyNodeMissMeter.Mark(1)
+ dirtyMissMeter.Mark(1)
// Try to retrieve the trie node from the clean memory cache
h := newHasher()
defer h.release()
- key := nodeCacheKey(owner, path)
- if dl.nodes != nil {
- if blob := dl.nodes.Get(nil, key); len(blob) > 0 {
- cleanNodeHitMeter.Mark(1)
- cleanNodeReadMeter.Mark(int64(len(blob)))
+ key := cacheKey(owner, path)
+ if dl.cleans != nil {
+ if blob := dl.cleans.Get(nil, key); len(blob) > 0 {
+ cleanHitMeter.Mark(1)
+ cleanReadMeter.Mark(int64(len(blob)))
return blob, h.hash(blob), &nodeLoc{loc: locCleanCache, depth: depth}, nil
}
cleanNodeMissMeter.Mark(1)
@@ -133,9 +135,9 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
} else {
blob = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path)
}
- if dl.nodes != nil && len(blob) > 0 {
- dl.nodes.Set(key, blob)
- cleanNodeWriteMeter.Mark(int64(len(blob)))
+ if dl.cleans != nil && len(blob) > 0 {
+ dl.cleans.Set(key, blob)
+ cleanWriteMeter.Mark(int64(len(blob)))
}
return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil
}
diff --git a/triedb/pathdb/execute.go b/triedb/pathdb/execute.go
index e24d0710f3da..9074e4debf26 100644
--- a/triedb/pathdb/execute.go
+++ b/triedb/pathdb/execute.go
@@ -43,7 +43,7 @@ type context struct {
// apply processes the given state diffs, updates the corresponding post-state
// and returns the trie nodes that have been modified.
-func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
+func apply(db database.Database, prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
tr, err := trie.New(trie.TrieID(postRoot), db)
if err != nil {
return nil, err
@@ -80,7 +80,7 @@ func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash,
// updateAccount the account was present in prev-state, and may or may not
// existent in post-state. Apply the reverse diff and verify if the storage
// root matches the one in prev-state account.
-func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address) error {
+func updateAccount(ctx *context, db database.Database, addr common.Address) error {
// The account was present in prev-state, decode it from the
// 'slim-rlp' format bytes.
h := newHasher()
@@ -141,7 +141,7 @@ func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address)
// deleteAccount the account was not present in prev-state, and is expected
// to be existent in post-state. Apply the reverse diff and verify if the
// account and storage is wiped out correctly.
-func deleteAccount(ctx *context, db database.NodeDatabase, addr common.Address) error {
+func deleteAccount(ctx *context, db database.Database, addr common.Address) error {
// The account must be existent in post-state, load the account.
h := newHasher()
defer h.release()
diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go
index e1cd98115374..d7fefd241bc1 100644
--- a/triedb/pathdb/history.go
+++ b/triedb/pathdb/history.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/triestate"
"golang.org/x/exp/maps"
)
@@ -244,12 +245,12 @@ type history struct {
// newHistory constructs the state history object with provided state change set.
func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *history {
var (
- accountList = maps.Keys(accounts)
+ accountList = maps.Keys(states.Accounts)
storageList = make(map[common.Address][]common.Hash)
)
slices.SortFunc(accountList, common.Address.Cmp)
- for addr, slots := range storages {
+ for addr, slots := range states.Storages {
slist := maps.Keys(slots)
slices.SortFunc(slist, common.Hash.Cmp)
storageList[addr] = slist
diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go
index d430706dee8a..2f42310eab2f 100644
--- a/triedb/pathdb/history_test.go
+++ b/triedb/pathdb/history_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/testrand"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/triestate"
)
// randomStateSet generates a random state change set.
@@ -46,12 +47,11 @@ func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[co
account := generateAccount(types.EmptyRootHash)
accounts[addr] = types.SlimAccountRLP(account)
}
- return accounts, storages
+ return triestate.New(accounts, storages)
}
func makeHistory() *history {
- accounts, storages := randomStateSet(3)
- return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages)
+ return newHistory(testrand.Hash(), types.EmptyRootHash, 0, randomStateSet(3))
}
func makeHistories(n int) []*history {
@@ -61,8 +61,7 @@ func makeHistories(n int) []*history {
)
for i := 0; i < n; i++ {
root := testrand.Hash()
- accounts, storages := randomStateSet(3)
- h := newHistory(root, parent, uint64(i), accounts, storages)
+ h := newHistory(root, parent, uint64(i), randomStateSet(3))
parent = root
result = append(result, h)
}
diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go
index 70fa1fb9f8f1..2f4b1290d54b 100644
--- a/triedb/pathdb/journal.go
+++ b/triedb/pathdb/journal.go
@@ -47,6 +47,32 @@ var (
// - Version 1: storage.Incomplete field is removed
const journalVersion uint64 = 1
+// journalNode represents a trie node persisted in the journal.
+type journalNode struct {
+ Path []byte // Path of the node in the trie
+ Blob []byte // RLP-encoded trie node blob, nil means the node is deleted
+}
+
+// journalNodes represents a list trie nodes belong to a single account
+// or the main account trie.
+type journalNodes struct {
+ Owner common.Hash
+ Nodes []journalNode
+}
+
+// journalAccounts represents a list accounts belong to the layer.
+type journalAccounts struct {
+ Addresses []common.Address
+ Accounts [][]byte
+}
+
+// journalStorage represents a list of storage slots belong to an account.
+type journalStorage struct {
+ Account common.Address
+ Hashes []common.Hash
+ Slots [][]byte
+}
+
// loadJournal tries to parse the layer journal from the disk.
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
journal := rawdb.ReadTrieJournal(db.diskdb)
@@ -164,7 +190,34 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
if err := stateSet.decode(r); err != nil {
return nil, err
}
- return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, &nodes, &stateSet), r)
+ // Read state changes from journal
+ var (
+ jaccounts journalAccounts
+ jstorages []journalStorage
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ )
+ if err := r.Decode(&jaccounts); err != nil {
+ return nil, fmt.Errorf("load diff accounts: %v", err)
+ }
+ for i, addr := range jaccounts.Addresses {
+ accounts[addr] = jaccounts.Accounts[i]
+ }
+ if err := r.Decode(&jstorages); err != nil {
+ return nil, fmt.Errorf("load diff storages: %v", err)
+ }
+ for _, entry := range jstorages {
+ set := make(map[common.Hash][]byte)
+ for i, h := range entry.Hashes {
+ if len(entry.Slots[i]) > 0 {
+ set[h] = entry.Slots[i]
+ } else {
+ set[h] = nil
+ }
+ }
+ storages[entry.Account] = set
+ }
+ return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages)), r)
}
// journal implements the layer interface, marshaling the un-flushed trie nodes
@@ -218,7 +271,19 @@ func (dl *diffLayer) journal(w io.Writer) error {
if err := dl.states.encode(w); err != nil {
return err
}
- log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block)
+ storage := make([]journalStorage, 0, len(dl.states.Storages))
+ for addr, slots := range dl.states.Storages {
+ entry := journalStorage{Account: addr}
+ for slotHash, slot := range slots {
+ entry.Hashes = append(entry.Hashes, slotHash)
+ entry.Slots = append(entry.Slots, slot)
+ }
+ storage = append(storage, entry)
+ }
+ if err := rlp.Encode(w, storage); err != nil {
+ return err
+ }
+ log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block, "nodes", len(dl.nodes))
return nil
}
diff --git a/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go
new file mode 100644
index 000000000000..d3492602c8b7
--- /dev/null
+++ b/triedb/pathdb/nodebuffer.go
@@ -0,0 +1,283 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "fmt"
+ "maps"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
+// nodebuffer is a collection of modified trie nodes to aggregate the disk
+// write. The content of the nodebuffer must be checked before diving into
+// disk (since it basically is not-yet-written data).
+type nodebuffer struct {
+ layers uint64 // The number of diff layers aggregated inside
+ size uint64 // The size of aggregated writes
+ limit uint64 // The maximum memory allowance in bytes
+ nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path
+}
+
+// newNodeBuffer initializes the node buffer with the provided nodes.
+func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer {
+ if nodes == nil {
+ nodes = make(map[common.Hash]map[string]*trienode.Node)
+ }
+ var size uint64
+ for _, subset := range nodes {
+ for path, n := range subset {
+ size += uint64(len(n.Blob) + len(path))
+ }
+ }
+ return &nodebuffer{
+ layers: layers,
+ nodes: nodes,
+ size: size,
+ limit: uint64(limit),
+ }
+}
+
+// node retrieves the trie node with given node info.
+func (b *nodebuffer) node(owner common.Hash, path []byte) (*trienode.Node, bool) {
+ subset, ok := b.nodes[owner]
+ if !ok {
+ return nil, false
+ }
+ n, ok := subset[string(path)]
+ if !ok {
+ return nil, false
+ }
+ return n, true
+}
+
+// commit merges the dirty nodes into the nodebuffer. This operation won't take
+// the ownership of the nodes map which belongs to the bottom-most diff layer.
+// It will just hold the node references from the given map which are safe to
+// copy.
+func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer {
+ var (
+ delta int64
+ overwrite int64
+ overwriteSize int64
+ )
+ for owner, subset := range nodes {
+ current, exist := b.nodes[owner]
+ if !exist {
+ // Allocate a new map for the subset instead of claiming it directly
+ // from the passed map to avoid potential concurrent map read/write.
+ // The nodes belong to original diff layer are still accessible even
+ // after merging, thus the ownership of nodes map should still belong
+ // to original layer and any mutation on it should be prevented.
+ for path, n := range subset {
+ delta += int64(len(n.Blob) + len(path))
+ }
+ b.nodes[owner] = maps.Clone(subset)
+ continue
+ }
+ for path, n := range subset {
+ if orig, exist := current[path]; !exist {
+ delta += int64(len(n.Blob) + len(path))
+ } else {
+ delta += int64(len(n.Blob) - len(orig.Blob))
+ overwrite++
+ overwriteSize += int64(len(orig.Blob) + len(path))
+ }
+ current[path] = n
+ }
+ b.nodes[owner] = current
+ }
+ b.updateSize(delta)
+ b.layers++
+ gcNodesMeter.Mark(overwrite)
+ gcBytesMeter.Mark(overwriteSize)
+ return b
+}
+
+// revert is the reverse operation of commit. It also merges the provided nodes
+// into the nodebuffer, the difference is that the provided node set should
+// revert the changes made by the last state transition.
+func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error {
+ // Short circuit if no embedded state transition to revert.
+ if b.layers == 0 {
+ return errStateUnrecoverable
+ }
+ b.layers--
+
+ // Reset the entire buffer if only a single transition left.
+ if b.layers == 0 {
+ b.reset()
+ return nil
+ }
+ var delta int64
+ for owner, subset := range nodes {
+ current, ok := b.nodes[owner]
+ if !ok {
+ panic(fmt.Sprintf("non-existent subset (%x)", owner))
+ }
+ for path, n := range subset {
+ orig, ok := current[path]
+ if !ok {
+ // There is a special case in MPT that one child is removed from
+ // a fullNode which only has two children, and then a new child
+ // with different position is immediately inserted into the fullNode.
+ // In this case, the clean child of the fullNode will also be
+ // marked as dirty because of node collapse and expansion.
+ //
+ // In case of database rollback, don't panic if this "clean"
+ // node occurs which is not present in buffer.
+ var blob []byte
+ if owner == (common.Hash{}) {
+ blob = rawdb.ReadAccountTrieNode(db, []byte(path))
+ } else {
+ blob = rawdb.ReadStorageTrieNode(db, owner, []byte(path))
+ }
+ // Ignore the clean node in the case described above.
+ if bytes.Equal(blob, n.Blob) {
+ continue
+ }
+ panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex()))
+ }
+ current[path] = n
+ delta += int64(len(n.Blob)) - int64(len(orig.Blob))
+ }
+ }
+ b.updateSize(delta)
+ return nil
+}
+
+// updateSize updates the total cache size by the given delta.
+func (b *nodebuffer) updateSize(delta int64) {
+ size := int64(b.size) + delta
+ if size >= 0 {
+ b.size = uint64(size)
+ return
+ }
+ s := b.size
+ b.size = 0
+ log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta))
+}
+
+// reset cleans up the disk cache.
+func (b *nodebuffer) reset() {
+ b.layers = 0
+ b.size = 0
+ b.nodes = make(map[common.Hash]map[string]*trienode.Node)
+}
+
+// empty returns an indicator if nodebuffer contains any state transition inside.
+func (b *nodebuffer) empty() bool {
+ return b.layers == 0
+}
+
+// setSize sets the buffer size to the provided number, and invokes a flush
+// operation if the current memory usage exceeds the new limit.
+func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error {
+ b.limit = uint64(size)
+ return b.flush(db, clean, id, false)
+}
+
+// allocBatch returns a database batch with pre-allocated buffer.
+func (b *nodebuffer) allocBatch(db ethdb.KeyValueStore) ethdb.Batch {
+ var metasize int
+ for owner, nodes := range b.nodes {
+ if owner == (common.Hash{}) {
+ metasize += len(nodes) * len(rawdb.TrieNodeAccountPrefix) // database key prefix
+ } else {
+ metasize += len(nodes) * (len(rawdb.TrieNodeStoragePrefix) + common.HashLength) // database key prefix + owner
+ }
+ }
+ return db.NewBatchWithSize((metasize + int(b.size)) * 11 / 10) // extra 10% for potential pebble internal stuff
+}
+
+// flush persists the in-memory dirty trie node into the disk if the configured
+// memory threshold is reached. Note, all data must be written atomically.
+func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error {
+ if b.size <= b.limit && !force {
+ return nil
+ }
+ // Ensure the target state id is aligned with the internal counter.
+ head := rawdb.ReadPersistentStateID(db)
+ if head+b.layers != id {
+ return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id)
+ }
+ var (
+ start = time.Now()
+ batch = b.allocBatch(db)
+ )
+ nodes := writeNodes(batch, b.nodes, clean)
+ rawdb.WritePersistentStateID(batch, id)
+
+ // Flush all mutations in a single batch
+ size := batch.ValueSize()
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ commitBytesMeter.Mark(int64(size))
+ commitNodesMeter.Mark(int64(nodes))
+ commitTimeTimer.UpdateSince(start)
+ log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start)))
+ b.reset()
+ return nil
+}
+
+// writeNodes writes the trie nodes into the provided database batch.
+// Note this function will also inject all the newly written nodes
+// into clean cache.
+func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) {
+ for owner, subset := range nodes {
+ for path, n := range subset {
+ if n.IsDeleted() {
+ if owner == (common.Hash{}) {
+ rawdb.DeleteAccountTrieNode(batch, []byte(path))
+ } else {
+ rawdb.DeleteStorageTrieNode(batch, owner, []byte(path))
+ }
+ if clean != nil {
+ clean.Del(cacheKey(owner, []byte(path)))
+ }
+ } else {
+ if owner == (common.Hash{}) {
+ rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob)
+ } else {
+ rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob)
+ }
+ if clean != nil {
+ clean.Set(cacheKey(owner, []byte(path)), n.Blob)
+ }
+ }
+ }
+ total += len(subset)
+ }
+ return total
+}
+
+// cacheKey constructs the unique key of clean cache.
+func cacheKey(owner common.Hash, path []byte) []byte {
+ if owner == (common.Hash{}) {
+ return path
+ }
+ return append(owner.Bytes(), path...)
+}
diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go
index 2ca4a0205b9b..6a58493ba694 100644
--- a/triedb/pathdb/reader.go
+++ b/triedb/pathdb/reader.go
@@ -45,14 +45,14 @@ func (loc *nodeLoc) string() string {
return fmt.Sprintf("loc: %s, depth: %d", loc.loc, loc.depth)
}
-// reader implements the database.NodeReader interface, providing the functionalities to
+// reader implements the database.Reader interface, providing the functionalities to
// retrieve trie nodes by wrapping the internal state layer.
type reader struct {
layer layer
noHashCheck bool
}
-// Node implements database.NodeReader interface, retrieving the node with specified
+// Node implements database.Reader interface, retrieving the node with specified
// node info. Don't modify the returned byte slice since it's not deep-copied
// and still be referenced by database.
func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
@@ -84,8 +84,8 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
return blob, nil
}
-// NodeReader retrieves a layer belonging to the given state root.
-func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
+// Reader retrieves a layer belonging to the given state root.
+func (db *Database) Reader(root common.Hash) (database.Reader, error) {
layer := db.tree.get(root)
if layer == nil {
return nil, fmt.Errorf("state %#x is not available", root)
diff --git a/version/version.go b/version/version.go
deleted file mode 100644
index f010adf0353e..000000000000
--- a/version/version.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package version
-
-const (
- Major = 1 // Major version component of the current release
- Minor = 14 // Minor version component of the current release
- Patch = 12 // Patch version component of the current release
- Meta = "stable" // Version metadata to append to the version string
-)