Skip to content

Commit 9429aeb

Browse files
committed
fix clippy and refactor some naming.
1 parent c2f35ae commit 9429aeb

File tree

1 file changed

+19
-21
lines changed

1 file changed

+19
-21
lines changed

integration_tests/src/database.rs

+19-21
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ use std::{
1919
fs::File,
2020
process::{Child, Command},
2121
sync::Arc,
22-
time::{Duration, Instant},
22+
time::Duration,
2323
};
2424

2525
use async_trait::async_trait;
@@ -30,7 +30,6 @@ use ceresdb_client::{
3030
};
3131
use reqwest::{ClientBuilder, Url};
3232
use sqlness::{Database, QueryContext};
33-
use uuid::Timestamp;
3433

3534
const SERVER_GRPC_ENDPOINT_ENV: &str = "CERESDB_SERVER_GRPC_ENDPOINT";
3635
const SERVER_HTTP_ENDPOINT_ENV: &str = "CERESDB_SERVER_HTTP_ENDPOINT";
@@ -83,7 +82,7 @@ pub struct CeresDBCluster {
8382

8483
/// Used in meta health check
8584
db_client: Arc<dyn DbClient>,
86-
health_check_sql: String,
85+
meta_stable_check_sql: String,
8786
}
8887

8988
impl CeresDBServer {
@@ -123,15 +122,15 @@ impl Backend for CeresDBServer {
123122
}
124123

125124
impl CeresDBCluster {
126-
async fn check_meta_health(&self) -> bool {
125+
async fn check_meta_stable(&self) -> bool {
127126
let query_ctx = RpcContext {
128127
database: Some("public".to_string()),
129128
timeout: None,
130129
};
131130

132131
let query_req = Request {
133132
tables: vec![],
134-
sql: self.health_check_sql.clone(),
133+
sql: self.meta_stable_check_sql.clone(),
135134
};
136135

137136
let result = self.db_client.sql_query(&query_ctx, &query_req).await;
@@ -175,51 +174,50 @@ impl Backend for CeresDBCluster {
175174
let server0 = CeresDBServer::spawn(ceresdb_bin.clone(), ceresdb_config_0, stdout0);
176175
let server1 = CeresDBServer::spawn(ceresdb_bin, ceresdb_config_1, stdout1);
177176

178-
// Health check context
177+
// Meta stable check context
179178
let endpoint = env::var(SERVER_GRPC_ENDPOINT_ENV).unwrap_or_else(|_| {
180179
panic!("Cannot read server endpoint from env {SERVER_GRPC_ENDPOINT_ENV:?}")
181180
});
182181
let db_client = Builder::new(endpoint, Mode::Proxy).build();
183182

184-
let health_check_sql = format!(
185-
r#"CREATE TABLE `health_check_{}`
183+
let meta_stable_check_sql = format!(
184+
r#"CREATE TABLE `stable_check_{}`
186185
(`name` string TAG, `value` double NOT NULL, `t` timestamp NOT NULL, TIMESTAMP KEY(t))"#,
187-
"asfdasfadsfad"
186+
uuid::Uuid::new_v4()
188187
);
189188

190189
Self {
191190
server0,
192191
server1,
193192
ceresmeta_process,
194193
db_client,
195-
health_check_sql,
194+
meta_stable_check_sql,
196195
}
197196
}
198197

199198
async fn wait_for_ready(&self) {
200-
println!("wait for cluster service initialized...\n");
201-
tokio::time::sleep(Duration::from_secs(
202-
20 as u64,
203-
))
204-
.await;
205-
206-
println!("wait for cluster service stable begin...\n");
199+
println!("wait for cluster service initialized...");
200+
tokio::time::sleep(Duration::from_secs(20_u64)).await;
201+
202+
println!("wait for cluster service stable begin...");
207203
let mut wait_cnt = 0;
208204
let wait_max = 6;
209205
loop {
210206
if wait_cnt >= wait_max {
211-
println!("wait too long for cluster service stable, maybe somethings went wrong...");
207+
println!(
208+
"wait too long for cluster service stable, maybe somethings went wrong..."
209+
);
212210
return;
213211
}
214212

215-
if self.check_meta_health().await {
216-
println!("wait cluster service stable finished...\n");
213+
if self.check_meta_stable().await {
214+
println!("wait for cluster service stable finished...");
217215
return;
218216
}
219217

220218
wait_cnt += 1;
221219
let has_waited = wait_cnt * CLUSTER_CERESDB_HEALTH_CHECK_INTERVAL_SECONDS;
222-
println!("waiting for cluster service stable, has_waited:{has_waited}s\n");
220+
println!("waiting for cluster service stable, has_waited:{has_waited}s");
223221
tokio::time::sleep(Duration::from_secs(
224222
CLUSTER_CERESDB_HEALTH_CHECK_INTERVAL_SECONDS as u64,
225223
))

0 commit comments

Comments
 (0)