Skip to content

Commit 1ebce65

Browse files
authored
feat: impl mysql query with proxy (apache#886)
1 parent 61e4b7c commit 1ebce65

File tree

9 files changed

+46
-353
lines changed

9 files changed

+46
-353
lines changed

integration_tests/mysql/basic.sh

+6
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,9 @@
55
mysql -h 127.0.0.1 -P 3307 -e 'show tables'
66

77
mysql -h 127.0.0.1 -P 3307 -e 'select 1, now()'
8+
9+
mysql -h 127.0.0.1 -P 3307 -e 'CREATE TABLE `demo`(`name`string TAG,`id` int TAG,`value` double NOT NULL,`t` timestamp NOT NULL,TIMESTAMP KEY(t)) ENGINE = Analytic with(enable_ttl=false)'
10+
11+
mysql -h 127.0.0.1 -P 3307 -e 'insert into demo (name,value,t)values("ceresdb",1,1683280523000)'
12+
13+
mysql -h 127.0.0.1 -P 3307 -e 'select * from demo'

proxy/src/grpc/write.rs

+3-89
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,7 @@ use std::{cmp::max, collections::HashMap, time::Instant};
44

55
use ceresdbproto::storage::{
66
storage_service_client::StorageServiceClient, RouteRequest, WriteRequest, WriteResponse,
7-
WriteTableRequest,
87
};
9-
use cluster::config::SchemaConfig;
108
use common_types::request_id::RequestId;
119
use common_util::error::BoxError;
1210
use futures::{future::try_join_all, FutureExt};
@@ -20,12 +18,12 @@ use snafu::{OptionExt, ResultExt};
2018
use tonic::transport::Channel;
2119

2220
use crate::{
23-
create_table, error,
21+
error,
2422
error::{build_ok_header, ErrNoCause, ErrWithCause, InternalNoCause, Result},
25-
execute_add_columns_plan, execute_plan, find_new_columns,
23+
execute_plan,
2624
forward::{ForwardResult, ForwarderRef},
2725
instance::InstanceRef,
28-
try_get_table, write_table_request_to_insert_plan, Context, Proxy,
26+
Context, Proxy,
2927
};
3028

3129
#[derive(Debug)]
@@ -301,90 +299,6 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
301299
}
302300
}
303301

304-
// TODO: use write_request_to_insert_plan in proxy, and remove following code.
305-
pub async fn write_request_to_insert_plan<Q: QueryExecutor + 'static>(
306-
instance: InstanceRef<Q>,
307-
table_requests: Vec<WriteTableRequest>,
308-
schema_config: Option<&SchemaConfig>,
309-
write_context: WriteContext,
310-
) -> Result<Vec<InsertPlan>> {
311-
let mut plan_vec = Vec::with_capacity(table_requests.len());
312-
313-
let WriteContext {
314-
request_id,
315-
catalog,
316-
schema,
317-
deadline,
318-
auto_create_table,
319-
} = write_context;
320-
let schema_config = schema_config.cloned().unwrap_or_default();
321-
for write_table_req in table_requests {
322-
let table_name = &write_table_req.table;
323-
let mut table = try_get_table(&catalog, &schema, instance.clone(), table_name)?;
324-
325-
match table.clone() {
326-
None => {
327-
if auto_create_table {
328-
create_table(
329-
request_id,
330-
&catalog,
331-
&schema,
332-
instance.clone(),
333-
&write_table_req,
334-
&schema_config,
335-
deadline,
336-
)
337-
.await?;
338-
// try to get table again
339-
table = try_get_table(&catalog, &schema, instance.clone(), table_name)?;
340-
}
341-
}
342-
Some(t) => {
343-
if auto_create_table {
344-
// The reasons for making the decision to add columns before writing are as
345-
// follows:
346-
// * If judged based on the error message returned, it may cause data that has
347-
// already been successfully written to be written again and affect the
348-
// accuracy of the data.
349-
// * Currently, the decision to add columns is made at the request level, not at
350-
// the row level, so the cost is relatively small.
351-
let table_schema = t.schema();
352-
let columns =
353-
find_new_columns(&table_schema, &schema_config, &write_table_req)?;
354-
if !columns.is_empty() {
355-
execute_add_columns_plan(
356-
request_id,
357-
&catalog,
358-
&schema,
359-
instance.clone(),
360-
t,
361-
columns,
362-
deadline,
363-
)
364-
.await?;
365-
}
366-
}
367-
}
368-
}
369-
370-
match table {
371-
Some(table) => {
372-
let plan = write_table_request_to_insert_plan(table, write_table_req)?;
373-
plan_vec.push(plan);
374-
}
375-
None => {
376-
return ErrNoCause {
377-
code: StatusCode::BAD_REQUEST,
378-
msg: format!("Table not found, schema:{schema}, table:{table_name}"),
379-
}
380-
.fail();
381-
}
382-
}
383-
}
384-
385-
Ok(plan_vec)
386-
}
387-
388302
pub async fn execute_insert_plan<Q: QueryExecutor + 'static>(
389303
request_id: RequestId,
390304
catalog: &str,

proxy/src/handlers/mod.rs

-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
55
pub mod admin;
66
pub(crate) mod error;
7-
pub mod query;
87
pub mod route;
98

109
mod prelude {

proxy/src/handlers/query.rs

-218
This file was deleted.

proxy/src/influxdb/mod.rs

+8-12
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ use crate::{
2424
context::RequestContext,
2525
error::{ErrNoCause, ErrWithCause, Internal, Result},
2626
execute_plan,
27-
grpc::write::{execute_insert_plan, write_request_to_insert_plan, WriteContext},
27+
grpc::write::{execute_insert_plan, WriteContext},
2828
influxdb::types::{
2929
convert_influxql_output, convert_write_request, InfluxqlRequest, InfluxqlResponse,
3030
WriteRequest, WriteResponse,
@@ -63,17 +63,13 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
6363
let write_context =
6464
WriteContext::new(request_id, deadline, catalog.clone(), schema.clone());
6565

66-
let plans = write_request_to_insert_plan(
67-
self.instance.clone(),
68-
convert_write_request(req)?,
69-
schema_config,
70-
write_context,
71-
)
72-
.await
73-
.box_err()
74-
.with_context(|| Internal {
75-
msg: "write request to insert plan",
76-
})?;
66+
let plans = self
67+
.write_request_to_insert_plan(convert_write_request(req)?, schema_config, write_context)
68+
.await
69+
.box_err()
70+
.with_context(|| Internal {
71+
msg: "write request to insert plan",
72+
})?;
7773

7874
let mut success = 0;
7975
for insert_plan in plans {

0 commit comments

Comments
 (0)