Skip to content

Commit

Permalink
[iota-indexer] Remove mysql-feature gated code from indexer, ungate p…
Browse files Browse the repository at this point in the history
…ostgres-feature code (#4954)

* Remove mysql-feature gated code from indexer, ungate postgres-feature code

* Remove empty default features from Cargo.tomls

* Simplify code

* Remove TiDB readme section
  • Loading branch information
tomxey authored and lzpap committed Jan 30, 2025
1 parent 69ee18b commit 5a20ba1
Show file tree
Hide file tree
Showing 44 changed files with 396 additions and 1,676 deletions.
60 changes: 1 addition & 59 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions crates/iota-cluster-test/src/cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ impl Cluster for LocalNewCluster {
(options.pg_address.clone(), indexer_address)
{
// Start in writer mode
start_test_indexer::<diesel::PgConnection>(
start_test_indexer(
Some(pg_address.clone()),
fullnode_url.clone(),
ReaderWriterConfig::writer_mode(None),
Expand All @@ -252,7 +252,7 @@ impl Cluster for LocalNewCluster {
.await;

// Start in reader mode
start_test_indexer::<diesel::PgConnection>(
start_test_indexer(
Some(pg_address),
fullnode_url.clone(),
ReaderWriterConfig::reader_mode(indexer_address.to_string()),
Expand Down
5 changes: 1 addition & 4 deletions crates/iota-graphql-rpc/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ bin-version.workspace = true
iota-graphql-config.workspace = true
iota-graphql-rpc-client.workspace = true
iota-graphql-rpc-headers.workspace = true
iota-indexer = { workspace = true, default-features = true }
iota-indexer.workspace = true
iota-json-rpc.workspace = true
iota-json-rpc-types.workspace = true
iota-metrics.workspace = true
Expand Down Expand Up @@ -95,10 +95,7 @@ iota-test-transaction-builder.workspace = true
simulacrum.workspace = true

[features]
default = ["pg_backend", "postgres-feature"]
postgres-feature = ["diesel/postgres", "diesel/postgres_backend"]
pg_integration = []
pg_backend = []

[package.metadata.cargo-udeps.ignore]
development = ["serial_test", "simulacrum"]
9 changes: 4 additions & 5 deletions crates/iota-graphql-rpc/src/context_data/db_data_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

use std::{collections::BTreeMap, time::Duration};

use diesel::PgConnection;
use iota_indexer::{
apis::GovernanceReadApi, db::ConnectionPoolConfig, indexer_reader::IndexerReader,
};
Expand All @@ -20,11 +19,11 @@ use crate::{
};

pub(crate) struct PgManager {
pub inner: IndexerReader<PgConnection>,
pub inner: IndexerReader,
}

impl PgManager {
pub(crate) fn new(inner: IndexerReader<PgConnection>) -> Self {
pub(crate) fn new(inner: IndexerReader) -> Self {
Self { inner }
}

Expand All @@ -34,11 +33,11 @@ impl PgManager {
db_url: impl Into<String>,
pool_size: u32,
timeout_ms: u64,
) -> Result<IndexerReader<PgConnection>, Error> {
) -> Result<IndexerReader, Error> {
let mut config = ConnectionPoolConfig::default();
config.set_pool_size(pool_size);
config.set_statement_timeout(Duration::from_millis(timeout_ms));
IndexerReader::<PgConnection>::new_with_config(db_url, config)
IndexerReader::new_with_config(db_url, config)
.map_err(|e| Error::Internal(format!("Failed to create reader: {e}")))
}
}
Expand Down
10 changes: 3 additions & 7 deletions crates/iota-graphql-rpc/src/data/pg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use crate::{config::Limits, data::QueryExecutor, error::Error, metrics::Metrics}

#[derive(Clone)]
pub(crate) struct PgExecutor {
pub inner: IndexerReader<diesel::PgConnection>,
pub inner: IndexerReader,
pub limits: Limits,
pub metrics: Metrics,
}
Expand All @@ -34,11 +34,7 @@ pub(crate) struct PgConnection<'c> {
pub(crate) struct ByteaLiteral<'a>(pub &'a [u8]);

impl PgExecutor {
pub(crate) fn new(
inner: IndexerReader<diesel::PgConnection>,
limits: Limits,
metrics: Metrics,
) -> Self {
pub(crate) fn new(inner: IndexerReader, limits: Limits, metrics: Metrics) -> Self {
Self {
inner,
limits,
Expand Down Expand Up @@ -220,7 +216,7 @@ mod tests {
#[test]
fn test_query_cost() {
let connection_config = ConnectionConfig::default();
let pool = new_connection_pool::<diesel::PgConnection>(
let pool = new_connection_pool(
&connection_config.db_url,
Some(connection_config.db_pool_size),
)
Expand Down
6 changes: 3 additions & 3 deletions crates/iota-graphql-rpc/src/test_infra/cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ pub const DEFAULT_INTERNAL_DATA_SOURCE_PORT: u16 = 3000;

pub struct ExecutorCluster {
pub executor_server_handle: JoinHandle<()>,
pub indexer_store: PgIndexerStore<diesel::PgConnection>,
pub indexer_store: PgIndexerStore,
pub indexer_join_handle: JoinHandle<Result<(), IndexerError>>,
pub graphql_server_join_handle: JoinHandle<()>,
pub graphql_client: SimpleClient,
Expand All @@ -44,7 +44,7 @@ pub struct ExecutorCluster {

pub struct Cluster {
pub validator_fullnode_handle: TestCluster,
pub indexer_store: PgIndexerStore<diesel::PgConnection>,
pub indexer_store: PgIndexerStore,
pub indexer_join_handle: JoinHandle<Result<(), IndexerError>>,
pub graphql_server_join_handle: JoinHandle<()>,
pub graphql_client: SimpleClient,
Expand Down Expand Up @@ -348,7 +348,7 @@ impl ExecutorCluster {
self.cancellation_token.cancel();
let _ = join!(self.graphql_server_join_handle, self.indexer_join_handle);
let db_url = self.graphql_connection_config.db_url.clone();
force_delete_database::<diesel::PgConnection>(db_url).await;
force_delete_database(db_url).await;
}

pub async fn force_objects_snapshot_catchup(&self, start_cp: u64, end_cp: u64) {
Expand Down
8 changes: 1 addition & 7 deletions crates/iota-graphql-rpc/src/types/event/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,6 @@ impl Event {
tx_sequence_number: stored_tx.tx_sequence_number,
event_sequence_number: idx as i64,
transaction_digest: stored_tx.transaction_digest.clone(),
#[cfg(feature = "postgres-feature")]
senders: vec![Some(native_event.sender.to_vec())],
package: native_event.package_id.to_vec(),
module: native_event.transaction_module.to_string(),
Expand All @@ -269,12 +268,7 @@ impl Event {
stored: StoredEvent,
checkpoint_viewed_at: u64,
) -> Result<Self, Error> {
let Some(Some(sender_bytes)) = ({
#[cfg(feature = "postgres-feature")]
{
stored.senders.first()
}
}) else {
let Some(Some(sender_bytes)) = stored.senders.first() else {
return Err(Error::Internal("No senders found for event".to_string()));
};
let sender = NativeIotaAddress::from_bytes(sender_bytes)
Expand Down
7 changes: 1 addition & 6 deletions crates/iota-indexer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,13 @@ bcs.workspace = true
cached.workspace = true
chrono.workspace = true
clap.workspace = true
diesel = { workspace = true, optional = true }
diesel = { workspace = true, features = ["postgres", "postgres_backend"] }
diesel_migrations = "2.0.0"
downcast = "0.11.0"
fastcrypto = { workspace = true, features = ["copy_key"] }
futures.workspace = true
itertools.workspace = true
jsonrpsee.workspace = true
mysqlclient-sys = { version = "0.4", optional = true }
prometheus.workspace = true
rayon.workspace = true
regex.workspace = true
Expand Down Expand Up @@ -60,10 +59,6 @@ telemetry-subscribers.workspace = true
[features]
pg_integration = []
shared_test_runtime = []
default = ["postgres-feature"]
postgres-feature = ["diesel/postgres", "diesel/postgres_backend"]
mysql-feature = ["diesel/mysql", "diesel/mysql_backend", "dep:mysqlclient-sys"]
bundled-mysql = ["mysqlclient-sys?/bundled"]

[dev-dependencies]
# external dependencies
Expand Down
53 changes: 0 additions & 53 deletions crates/iota-indexer/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -120,56 +120,3 @@ For a better testing experience is possible to use [nextest](https://nexte.st/)
# run tests requiring only postgres integration
cargo nextest run --features pg_integration --test-threads 1
```

## Steps to run locally with TiDB (experimental)

### Prerequisites

1. Install TiDB

```sh
curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh
```

2. Install a compatible version of MySQL (At the time of writing, this is MySQL 8.0 -- note that 8.3 is incompatible).

```sh
brew install mysql@8.0
```

3. Install a version of `diesel_cli` that supports MySQL (and probably also Postgres). This version of the CLI needs to be built against the version of MySQL that was installed in the previous step (compatible with the local installation of TiDB, 8.0.37 at time of writing).

```sh
MYSQLCLIENT_LIB_DIR=/opt/homebrew/Cellar/mysql@8.0/8.0.37/lib/ cargo install diesel_cli --no-default-features --features postgres --features mysql --force
```

### Run the indexer

1.Run TiDB

```sh
tiup playground
```

2.Verify tidb is running by connecting to it using the mysql client, create database `test`

```sh
mysql --comments --host 127.0.0.1 --port 4000 -u root
create database test;
```

3.DB setup, under `iota/crates/iota-indexer` run:

```sh
# an example DATABASE_URL is "mysql://root:password@127.0.0.1:4000/test"
diesel setup --database-url="<DATABASE_URL>" --migration-dir='migrations/mysql'
diesel database reset --database-url="<DATABASE_URL>" --migration-dir='migrations/mysql'
```

Note that you need an existing database for this to work. Using the DATABASE_URL example in the comment of the previous code, replace `test` with the name of your database.
4. Run indexer as a writer, which pulls data from fullnode and writes data to DB

```sh
# Change the RPC_CLIENT_URL to http://0.0.0.0:9000 to run indexer against local validator & fullnode
cargo run --bin iota-indexer --features mysql-feature --no-default-features -- --db-url "<DATABASE_URL>" --rpc-client-url "https://api.devnet.iota.cafe:443" --fullnode-sync-worker --reset-db
```
6 changes: 3 additions & 3 deletions crates/iota-indexer/examples/index_genesis_transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0

use clap::Parser;
use diesel::{ExpressionMethods, PgConnection, QueryDsl, RunQueryDsl};
use diesel::{ExpressionMethods, QueryDsl, RunQueryDsl};
use iota_genesis_builder::{Builder as GenesisBuilder, SnapshotSource, SnapshotUrl};
use iota_indexer::{
db::{self, reset_database},
Expand Down Expand Up @@ -107,7 +107,7 @@ pub async fn main() -> Result<(), IndexerError> {
let expected_transactions = bcs::to_bytes(&db_txn.sender_signed_data).unwrap();
let expected_effects = bcs::to_bytes(&db_txn.effects).unwrap();

let pg_store = create_pg_store::<PgConnection>(DEFAULT_DB_URL.to_string().into(), true);
let pg_store = create_pg_store(DEFAULT_DB_URL.to_string().into(), true);
reset_database(&mut pg_store.blocking_cp().get().unwrap()).unwrap();
pg_store.persist_transactions(vec![db_txn]).await.unwrap();

Expand All @@ -121,7 +121,7 @@ pub async fn main() -> Result<(), IndexerError> {
.set_genesis_large_object_as_inner_data(&pg_store.blocking_cp())
.unwrap();

let reader = IndexerReader::<PgConnection>::new(DEFAULT_DB_URL.to_owned())?;
let reader = IndexerReader::new(DEFAULT_DB_URL.to_owned())?;
// We just want to verify that the call succeeds.
let _coin_metadata = reader
.get_coin_metadata_in_blocking_task("0x2::iota::IOTA".parse().unwrap())
Expand Down
Loading

0 comments on commit 5a20ba1

Please sign in to comment.