Skip to content

Commit d90a94d

Browse files
authoredMay 24, 2023
refactor: find new columns to improve write performance (#918)
## Related Issues Closes # ## Detailed Changes Refactor find new columns to improve write performance. * Skip building column schema when columns already exists. ## Test Plan TSBS. ## Benchmark | | auto_create_table = false | auto_create_table = true| performance loss | |:--------:|:-------------------------:|:------------------------:|:-----------------:| | Before | 171273.03 rows/s | 152436.35 rows/s | 11% | | After | 171699.82 rows/s | 164791.73 rows/s | 4.1% |
1 parent 391ea85 commit d90a94d

File tree

4 files changed

+260
-175
lines changed

4 files changed

+260
-175
lines changed
 

‎integration_tests/sdk/go/issue-779.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import (
77
)
88

99
func checkAutoAddColumnsWithCreateTable(ctx context.Context, client ceresdb.Client) error {
10-
timestampName := "t"
10+
timestampName := "timestamp"
1111

1212
err := dropTable(ctx, client)
1313
if err != nil {

‎integration_tests/sdk/go/util.go

+8-8
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import (
1010
const table = "godemo"
1111

1212
func createTable(ctx context.Context, client ceresdb.Client, timestampName string) error {
13-
_, err := ddl(ctx, client, fmt.Sprintf("create table %s (%s timestamp not null, name string tag, value int64,TIMESTAMP KEY(t))", table, timestampName))
13+
_, err := ddl(ctx, client, fmt.Sprintf("create table %s (`%s` timestamp not null, name string tag, value int64,TIMESTAMP KEY(%s))", table, timestampName, timestampName))
1414
return err
1515
}
1616

@@ -60,9 +60,13 @@ func ensureRow(expectedVals []ceresdb.Value, actualRow []ceresdb.Column) error {
6060
}
6161

6262
func query(ctx context.Context, client ceresdb.Client, ts int64, timestampName string, addNewColumn bool) error {
63+
sql := fmt.Sprintf("select timestamp, name, value from %s where %s = %d order by name", table, timestampName, ts)
64+
if addNewColumn {
65+
sql = fmt.Sprintf("select timestamp, name, value, new_tag, new_field from %s where %s = %d order by name", table, timestampName, ts)
66+
}
6367
resp, err := client.SQLQuery(ctx, ceresdb.SQLQueryRequest{
6468
Tables: []string{table},
65-
SQL: fmt.Sprintf("select * from %s where %s = %d order by name", table, timestampName, ts),
69+
SQL: sql,
6670
})
6771
if err != nil {
6872
return err
@@ -73,23 +77,19 @@ func query(ctx context.Context, client ceresdb.Client, ts int64, timestampName s
7377
}
7478

7579
row0 := []ceresdb.Value{
76-
ceresdb.NewUint64Value(4024844655630594205),
7780
ceresdb.NewInt64Value(ts),
7881
ceresdb.NewStringValue("tag-0"),
7982
ceresdb.NewInt64Value(0)}
8083

8184
row1 := []ceresdb.Value{
82-
ceresdb.NewUint64Value(14230010170561829440),
8385
ceresdb.NewInt64Value(ts),
8486
ceresdb.NewStringValue("tag-1"),
8587
ceresdb.NewInt64Value(1),
8688
}
8789

8890
if addNewColumn {
89-
row0[0] = ceresdb.NewUint64Value(8341999341185504339)
90-
row1[0] = ceresdb.NewUint64Value(4452331151453582498)
91-
row0 = append(row0, ceresdb.NewInt64Value(0), ceresdb.NewStringValue("new-tag-0"))
92-
row1 = append(row1, ceresdb.NewInt64Value(1), ceresdb.NewStringValue("new-tag-1"))
91+
row0 = append(row0, ceresdb.NewStringValue("new-tag-0"), ceresdb.NewInt64Value(0))
92+
row1 = append(row1, ceresdb.NewStringValue("new-tag-1"), ceresdb.NewInt64Value(1))
9393
}
9494

9595
if err := ensureRow(row0,

‎proxy/src/write.rs

+248-164
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ use std::{
1010

1111
use bytes::Bytes;
1212
use ceresdbproto::storage::{
13-
storage_service_client::StorageServiceClient, value, RouteRequest, WriteRequest,
13+
storage_service_client::StorageServiceClient, value, RouteRequest, Value, WriteRequest,
1414
WriteResponse as WriteResponsePB, WriteSeriesEntry, WriteTableRequest,
1515
};
1616
use cluster::config::SchemaConfig;
@@ -34,7 +34,7 @@ use query_engine::executor::Executor as QueryExecutor;
3434
use query_frontend::{
3535
frontend::{Context as FrontendContext, Frontend},
3636
plan::{AlterTableOperation, AlterTablePlan, InsertPlan, Plan},
37-
planner::build_schema_from_write_table_request,
37+
planner::{build_column_schema, try_get_data_type_from_value},
3838
provider::CatalogMetaProvider,
3939
};
4040
use router::endpoint::Endpoint;
@@ -43,7 +43,7 @@ use table_engine::table::TableRef;
4343
use tonic::transport::Channel;
4444

4545
use crate::{
46-
error::{ErrNoCause, ErrWithCause, InternalNoCause, Result},
46+
error::{ErrNoCause, ErrWithCause, Internal, InternalNoCause, Result},
4747
forward::{ForwardResult, ForwarderRef},
4848
Context, Proxy,
4949
};
@@ -477,14 +477,6 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
477477
code: StatusCode::BAD_REQUEST,
478478
})?;
479479
let schema = req_ctx.database;
480-
let schema_config = self
481-
.schema_config_provider
482-
.schema_config(&schema)
483-
.box_err()
484-
.with_context(|| ErrWithCause {
485-
code: StatusCode::INTERNAL_SERVER_ERROR,
486-
msg: format!("Fail to fetch schema config, schema_name:{schema}"),
487-
})?;
488480

489481
debug!(
490482
"Local write begin, catalog:{catalog}, schema:{schema}, request_id:{request_id}, first_table:{:?}, num_tables:{}",
@@ -503,7 +495,7 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
503495
};
504496

505497
let plan_vec = self
506-
.write_request_to_insert_plan(req.table_requests, schema_config, write_context)
498+
.write_request_to_insert_plan(req.table_requests, write_context)
507499
.await?;
508500

509501
let mut success = 0;
@@ -522,7 +514,6 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
522514
async fn write_request_to_insert_plan(
523515
&self,
524516
table_requests: Vec<WriteTableRequest>,
525-
schema_config: Option<&SchemaConfig>,
526517
write_context: WriteContext,
527518
) -> Result<Vec<InsertPlan>> {
528519
let mut plan_vec = Vec::with_capacity(table_requests.len());
@@ -534,7 +525,6 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
534525
deadline,
535526
auto_create_table,
536527
} = write_context;
537-
let schema_config = schema_config.cloned().unwrap_or_default();
538528
for write_table_req in table_requests {
539529
let table_name = &write_table_req.table;
540530
self.maybe_open_partition_table_if_not_exist(&catalog, &schema, table_name)
@@ -555,7 +545,7 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
555545
// * Currently, the decision to add columns is made at the request level, not at
556546
// the row level, so the cost is relatively small.
557547
let table_schema = table.schema();
558-
let columns = find_new_columns(&table_schema, &schema_config, &write_table_req)?;
548+
let columns = find_new_columns(&table_schema, &write_table_req)?;
559549
if !columns.is_empty() {
560550
self.execute_add_columns_plan(
561551
request_id,
@@ -668,32 +658,93 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
668658

669659
fn find_new_columns(
670660
schema: &Schema,
671-
schema_config: &SchemaConfig,
672-
write_req: &WriteTableRequest,
661+
write_table_req: &WriteTableRequest,
673662
) -> Result<Vec<ColumnSchema>> {
674-
let new_schema = build_schema_from_write_table_request(schema_config, write_req)
663+
let WriteTableRequest {
664+
table,
665+
field_names,
666+
tag_names,
667+
entries: write_entries,
668+
} = write_table_req;
669+
670+
let mut columns: HashMap<_, ColumnSchema> = HashMap::new();
671+
for write_entry in write_entries {
672+
// Parse tags.
673+
for tag in &write_entry.tags {
674+
let name_index = tag.name_index as usize;
675+
ensure!(
676+
name_index < tag_names.len(),
677+
InternalNoCause {
678+
msg: format!(
679+
"Tag {tag:?} is not found in tag_names:{tag_names:?}, table:{table}",
680+
),
681+
}
682+
);
683+
684+
let tag_name = &tag_names[name_index];
685+
686+
build_column(&mut columns, schema, tag_name, &tag.value, true)?;
687+
}
688+
689+
// Parse fields.
690+
for field_group in &write_entry.field_groups {
691+
for field in &field_group.fields {
692+
let field_index = field.name_index as usize;
693+
ensure!(
694+
field_index < field_names.len(),
695+
InternalNoCause {
696+
msg: format!(
697+
"Field {field:?} is not found in field_names:{field_names:?}, table:{table}",
698+
),
699+
}
700+
);
701+
let field_name = &field_names[field.name_index as usize];
702+
build_column(&mut columns, schema, field_name, &field.value, false)?;
703+
}
704+
}
705+
}
706+
707+
Ok(columns.into_iter().map(|v| v.1).collect())
708+
}
709+
710+
fn build_column<'a>(
711+
columns: &mut HashMap<&'a str, ColumnSchema>,
712+
schema: &Schema,
713+
name: &'a str,
714+
value: &Option<Value>,
715+
is_tag: bool,
716+
) -> Result<()> {
717+
// Skip adding columns, the following cases:
718+
// 1. Column already exists.
719+
// 2. The new column has been added.
720+
if schema.index_of(name).is_some() || columns.get(name).is_some() {
721+
return Ok(());
722+
}
723+
724+
let column_value = value
725+
.as_ref()
726+
.with_context(|| InternalNoCause {
727+
msg: format!("Column value is needed, column:{name}"),
728+
})?
729+
.value
730+
.as_ref()
731+
.with_context(|| InternalNoCause {
732+
msg: format!("Column value type is not supported, column:{name}"),
733+
})?;
734+
735+
let data_type = try_get_data_type_from_value(column_value)
675736
.box_err()
676-
.context(ErrWithCause {
677-
code: StatusCode::INTERNAL_SERVER_ERROR,
678-
msg: "Build schema from write table request failed",
737+
.context(Internal {
738+
msg: "Failed to get data type",
679739
})?;
680740

681-
let columns = new_schema.columns();
682-
let old_columns = schema.columns();
683-
684-
// find new columns:
685-
// 1. timestamp column can't be a new column;
686-
// 2. column not in old schema is a new column.
687-
let new_columns = columns
688-
.iter()
689-
.enumerate()
690-
.filter(|(idx, column)| {
691-
*idx != new_schema.timestamp_index()
692-
&& !old_columns.iter().any(|c| c.name == column.name)
693-
})
694-
.map(|(_, column)| column.clone())
695-
.collect();
696-
Ok(new_columns)
741+
let column_schema = build_column_schema(name, data_type, is_tag)
742+
.box_err()
743+
.context(Internal {
744+
msg: "Failed to build column schema",
745+
})?;
746+
columns.insert(name, column_schema);
747+
Ok(())
697748
}
698749

699750
fn write_table_request_to_insert_plan(
@@ -898,7 +949,7 @@ fn convert_proto_value_to_datum(
898949
mod test {
899950
use ceresdbproto::storage::{value, Field, FieldGroup, Tag, Value, WriteSeriesEntry};
900951
use common_types::{
901-
column_schema::{self, ColumnSchema},
952+
column_schema::{self},
902953
datum::{Datum, DatumKind},
903954
row::Row,
904955
schema::Builder,
@@ -908,45 +959,136 @@ mod test {
908959

909960
use super::*;
910961

911-
const TAG_K: &str = "tagk";
912-
const TAG_V: &str = "tagv";
913-
const TAG_K1: &str = "tagk1";
914-
const TAG_V1: &str = "tagv1";
915-
const FIELD_NAME: &str = "field";
916-
const FIELD_NAME1: &str = "field1";
917-
const FIELD_VALUE_STRING: &str = "stringValue";
962+
const NAME_COL1: &str = "col1";
963+
const NAME_NEW_COL1: &str = "new_col1";
964+
const NAME_COL2: &str = "col2";
965+
const NAME_COL3: &str = "col3";
966+
const NAME_COL4: &str = "col4";
967+
const NAME_COL5: &str = "col5";
918968

919-
// tag_names field_names write_entry
920-
fn generate_write_entry() -> (Schema, Vec<String>, Vec<String>, WriteSeriesEntry) {
921-
let tag_names = vec![TAG_K.to_string(), TAG_K1.to_string()];
922-
let field_names = vec![FIELD_NAME.to_string(), FIELD_NAME1.to_string()];
969+
#[test]
970+
fn test_write_entry_to_row_group() {
971+
let (schema, tag_names, field_names, write_entry) = generate_write_entry();
972+
let rows =
973+
write_entry_to_rows("test_table", &schema, &tag_names, &field_names, write_entry)
974+
.unwrap();
975+
let row0 = vec![
976+
Datum::Timestamp(Timestamp::new(1000)),
977+
Datum::String(NAME_COL1.into()),
978+
Datum::String(NAME_COL2.into()),
979+
Datum::Int64(100),
980+
Datum::Null,
981+
];
982+
let row1 = vec![
983+
Datum::Timestamp(Timestamp::new(2000)),
984+
Datum::String(NAME_COL1.into()),
985+
Datum::String(NAME_COL2.into()),
986+
Datum::Null,
987+
Datum::Int64(10),
988+
];
989+
let row2 = vec![
990+
Datum::Timestamp(Timestamp::new(3000)),
991+
Datum::String(NAME_COL1.into()),
992+
Datum::String(NAME_COL2.into()),
993+
Datum::Null,
994+
Datum::Int64(10),
995+
];
923996

924-
let tag = Tag {
925-
name_index: 0,
926-
value: Some(Value {
927-
value: Some(value::Value::StringValue(TAG_V.to_string())),
928-
}),
929-
};
930-
let tag1 = Tag {
931-
name_index: 1,
997+
let expect_rows = vec![
998+
Row::from_datums(row0),
999+
Row::from_datums(row1),
1000+
Row::from_datums(row2),
1001+
];
1002+
assert_eq!(rows, expect_rows);
1003+
}
1004+
1005+
#[test]
1006+
fn test_find_new_columns() {
1007+
let write_table_request = generate_write_table_request();
1008+
let schema = build_schema();
1009+
let new_columns = find_new_columns(&schema, &write_table_request)
1010+
.unwrap()
1011+
.into_iter()
1012+
.map(|v| (v.name.clone(), v))
1013+
.collect::<HashMap<_, _>>();
1014+
1015+
assert_eq!(new_columns.len(), 2);
1016+
assert!(new_columns.get(NAME_NEW_COL1).is_some());
1017+
assert!(new_columns.get(NAME_NEW_COL1).unwrap().is_tag);
1018+
assert!(new_columns.get(NAME_COL5).is_some());
1019+
assert!(!new_columns.get(NAME_COL5).unwrap().is_tag);
1020+
}
1021+
1022+
fn build_schema() -> Schema {
1023+
Builder::new()
1024+
.auto_increment_column_id(true)
1025+
.add_key_column(
1026+
column_schema::Builder::new(
1027+
TIMESTAMP_COLUMN_NAME.to_string(),
1028+
DatumKind::Timestamp,
1029+
)
1030+
.build()
1031+
.unwrap(),
1032+
)
1033+
.unwrap()
1034+
.add_key_column(
1035+
column_schema::Builder::new(NAME_COL1.to_string(), DatumKind::String)
1036+
.is_tag(true)
1037+
.build()
1038+
.unwrap(),
1039+
)
1040+
.unwrap()
1041+
.add_key_column(
1042+
column_schema::Builder::new(NAME_COL2.to_string(), DatumKind::String)
1043+
.is_tag(true)
1044+
.build()
1045+
.unwrap(),
1046+
)
1047+
.unwrap()
1048+
.add_normal_column(
1049+
column_schema::Builder::new(NAME_COL3.to_string(), DatumKind::Int64)
1050+
.build()
1051+
.unwrap(),
1052+
)
1053+
.unwrap()
1054+
.add_normal_column(
1055+
column_schema::Builder::new(NAME_COL4.to_string(), DatumKind::Int64)
1056+
.build()
1057+
.unwrap(),
1058+
)
1059+
.unwrap()
1060+
.build()
1061+
.unwrap()
1062+
}
1063+
1064+
fn make_tag(name_index: u32, val: &str) -> Tag {
1065+
Tag {
1066+
name_index,
9321067
value: Some(Value {
933-
value: Some(value::Value::StringValue(TAG_V1.to_string())),
1068+
value: Some(value::Value::StringValue(val.to_string())),
9341069
}),
935-
};
1070+
}
1071+
}
1072+
1073+
fn make_field(name_index: u32, val: value::Value) -> Field {
1074+
Field {
1075+
name_index,
1076+
value: Some(Value { value: Some(val) }),
1077+
}
1078+
}
1079+
1080+
// tag_names field_names write_entry
1081+
fn generate_write_entry() -> (Schema, Vec<String>, Vec<String>, WriteSeriesEntry) {
1082+
let tag_names = vec![NAME_COL1.to_string(), NAME_COL2.to_string()];
1083+
let field_names = vec![NAME_COL3.to_string(), NAME_COL4.to_string()];
1084+
1085+
let tag = make_tag(0, NAME_COL1);
1086+
let tag1 = make_tag(1, NAME_COL2);
9361087
let tags = vec![tag, tag1];
9371088

938-
let field = Field {
939-
name_index: 0,
940-
value: Some(Value {
941-
value: Some(value::Value::Float64Value(100.0)),
942-
}),
943-
};
944-
let field1 = Field {
945-
name_index: 1,
946-
value: Some(Value {
947-
value: Some(value::Value::StringValue(FIELD_VALUE_STRING.to_string())),
948-
}),
949-
};
1089+
let field = make_field(0, value::Value::Int64Value(100));
1090+
let field1 = make_field(1, value::Value::Int64Value(10));
1091+
9501092
let field_group = FieldGroup {
9511093
timestamp: 1000,
9521094
fields: vec![field],
@@ -965,102 +1107,44 @@ mod test {
9651107
field_groups: vec![field_group, field_group1, field_group2],
9661108
};
9671109

968-
let schema_builder = Builder::new();
969-
let schema = schema_builder
970-
.auto_increment_column_id(true)
971-
.add_key_column(ColumnSchema {
972-
id: column_schema::COLUMN_ID_UNINIT,
973-
name: TIMESTAMP_COLUMN_NAME.to_string(),
974-
data_type: DatumKind::Timestamp,
975-
is_nullable: false,
976-
is_tag: false,
977-
comment: String::new(),
978-
escaped_name: TIMESTAMP_COLUMN_NAME.escape_debug().to_string(),
979-
default_value: None,
980-
})
981-
.unwrap()
982-
.add_key_column(ColumnSchema {
983-
id: column_schema::COLUMN_ID_UNINIT,
984-
name: TAG_K.to_string(),
985-
data_type: DatumKind::String,
986-
is_nullable: false,
987-
is_tag: true,
988-
comment: String::new(),
989-
escaped_name: TAG_K.escape_debug().to_string(),
990-
default_value: None,
991-
})
992-
.unwrap()
993-
.add_normal_column(ColumnSchema {
994-
id: column_schema::COLUMN_ID_UNINIT,
995-
name: TAG_K1.to_string(),
996-
data_type: DatumKind::String,
997-
is_nullable: false,
998-
is_tag: true,
999-
comment: String::new(),
1000-
escaped_name: TAG_K1.escape_debug().to_string(),
1001-
default_value: None,
1002-
})
1003-
.unwrap()
1004-
.add_normal_column(ColumnSchema {
1005-
id: column_schema::COLUMN_ID_UNINIT,
1006-
name: FIELD_NAME.to_string(),
1007-
data_type: DatumKind::Double,
1008-
is_nullable: true,
1009-
is_tag: false,
1010-
comment: String::new(),
1011-
escaped_name: FIELD_NAME.escape_debug().to_string(),
1012-
default_value: None,
1013-
})
1014-
.unwrap()
1015-
.add_normal_column(ColumnSchema {
1016-
id: column_schema::COLUMN_ID_UNINIT,
1017-
name: FIELD_NAME1.to_string(),
1018-
data_type: DatumKind::String,
1019-
is_nullable: true,
1020-
is_tag: false,
1021-
comment: String::new(),
1022-
escaped_name: FIELD_NAME1.escape_debug().to_string(),
1023-
default_value: None,
1024-
})
1025-
.unwrap()
1026-
.build()
1027-
.unwrap();
1110+
let schema = build_schema();
10281111
(schema, tag_names, field_names, write_entry)
10291112
}
10301113

1031-
#[test]
1032-
fn test_write_entry_to_row_group() {
1033-
let (schema, tag_names, field_names, write_entry) = generate_write_entry();
1034-
let rows =
1035-
write_entry_to_rows("test_table", &schema, &tag_names, &field_names, write_entry)
1036-
.unwrap();
1037-
let row0 = vec![
1038-
Datum::Timestamp(Timestamp::new(1000)),
1039-
Datum::String(TAG_V.into()),
1040-
Datum::String(TAG_V1.into()),
1041-
Datum::Double(100.0),
1042-
Datum::Null,
1043-
];
1044-
let row1 = vec![
1045-
Datum::Timestamp(Timestamp::new(2000)),
1046-
Datum::String(TAG_V.into()),
1047-
Datum::String(TAG_V1.into()),
1048-
Datum::Null,
1049-
Datum::String(FIELD_VALUE_STRING.into()),
1050-
];
1051-
let row2 = vec![
1052-
Datum::Timestamp(Timestamp::new(3000)),
1053-
Datum::String(TAG_V.into()),
1054-
Datum::String(TAG_V1.into()),
1055-
Datum::Null,
1056-
Datum::String(FIELD_VALUE_STRING.into()),
1057-
];
1114+
fn generate_write_table_request() -> WriteTableRequest {
1115+
let tag1 = make_tag(0, NAME_NEW_COL1);
1116+
let tag2 = make_tag(1, NAME_COL1);
1117+
let tags = vec![tag1, tag2];
10581118

1059-
let expect_rows = vec![
1060-
Row::from_datums(row0),
1061-
Row::from_datums(row1),
1062-
Row::from_datums(row2),
1063-
];
1064-
assert_eq!(rows, expect_rows);
1119+
let field1 = make_field(0, value::Value::Int64Value(100));
1120+
let field2 = make_field(1, value::Value::Int64Value(10));
1121+
1122+
let field_group1 = FieldGroup {
1123+
timestamp: 1000,
1124+
fields: vec![field1.clone(), field2.clone()],
1125+
};
1126+
let field_group2 = FieldGroup {
1127+
timestamp: 2000,
1128+
fields: vec![field1],
1129+
};
1130+
let field_group3 = FieldGroup {
1131+
timestamp: 3000,
1132+
fields: vec![field2],
1133+
};
1134+
1135+
let write_entry = WriteSeriesEntry {
1136+
tags,
1137+
field_groups: vec![field_group1, field_group2, field_group3],
1138+
};
1139+
1140+
let tag_names = vec![NAME_NEW_COL1.to_string(), NAME_COL1.to_string()];
1141+
let field_names = vec![NAME_COL3.to_string(), NAME_COL5.to_string()];
1142+
1143+
WriteTableRequest {
1144+
table: "test".to_string(),
1145+
tag_names,
1146+
field_names,
1147+
entries: vec![write_entry],
1148+
}
10651149
}
10661150
}

‎query_frontend/src/planner.rs

+3-2
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ impl<'a, P: MetaProvider> Planner<'a, P> {
363363
}
364364
}
365365

366-
fn build_column_schema(
366+
pub fn build_column_schema(
367367
column_name: &str,
368368
data_type: DatumKind,
369369
is_tag: bool,
@@ -537,7 +537,7 @@ fn ensure_data_type_compatible(
537537
Ok(())
538538
}
539539

540-
fn try_get_data_type_from_value(value: &PbValue) -> Result<DatumKind> {
540+
pub fn try_get_data_type_from_value(value: &PbValue) -> Result<DatumKind> {
541541
match value {
542542
PbValue::Float64Value(_) => Ok(DatumKind::Double),
543543
PbValue::StringValue(_) => Ok(DatumKind::String),
@@ -555,6 +555,7 @@ fn try_get_data_type_from_value(value: &PbValue) -> Result<DatumKind> {
555555
PbValue::VarbinaryValue(_) => Ok(DatumKind::Varbinary),
556556
}
557557
}
558+
558559
/// A planner wraps the datafusion's logical planner, and delegate sql like
559560
/// select/explain to datafusion's planner.
560561
pub(crate) struct PlannerDelegate<'a, P: MetaProvider> {

0 commit comments

Comments
 (0)
Please sign in to comment.