Skip to content

Commit be9f50e

Browse files
authoredMar 22, 2023
feat: auto add column (#749)
* feat: introduce proxy module (#715) * impl route service with proxy * impl write service with proxy * remove forward module in proxy * refactor code * add tests in write * feat: impl query with proxy (#717) * refactor: refactor proxy module (#726) * refactor: refactor proxy module * cargo fmt * refactor by CR * Feat proxy prom query (#727) * feat: impl prom query with proxy * refactor code * feat: impl stream write with proxy (#737) * feat: impl stream query with proxy (#742) * feat: impl stream query with proxy * refactor by CR * feat: introduce proxy module * refactor code * add header in storage service * feat: impl storage service with proxy * make CI happy * refactor code * refactor code * refactor by CR * feat: automatically create non-existent columns during insertion * test: add autoAddColumns test in go sdk * refactor code * refactor by CR * refactor by CR
1 parent cba54b3 commit be9f50e

File tree

8 files changed

+259
-150
lines changed

8 files changed

+259
-150
lines changed
 

‎integration_tests/sdk/go/main.go

+76-26
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,21 @@ func init() {
1919
}
2020
}
2121

22-
func write(ctx context.Context, client ceresdb.Client, ts int64) error {
22+
func write(ctx context.Context, client ceresdb.Client, ts int64, addNewColumn bool) error {
2323
points := make([]ceresdb.Point, 0, 2)
2424
for i := 0; i < 2; i++ {
25-
point, err := ceresdb.NewPointBuilder(table).
25+
builder := ceresdb.NewPointBuilder(table).
2626
SetTimestamp(ts).
2727
AddTag("name", ceresdb.NewStringValue(fmt.Sprintf("tag-%d", i))).
28-
AddField("value", ceresdb.NewInt64Value(int64(i))).
29-
Build()
28+
AddField("value", ceresdb.NewInt64Value(int64(i)))
29+
30+
if addNewColumn {
31+
builder = builder.AddTag("new_tag", ceresdb.NewStringValue(fmt.Sprintf("new-tag-%d", i))).
32+
AddField("new_field", ceresdb.NewInt64Value(int64(i)))
33+
}
34+
35+
point, err := builder.Build()
36+
3037
if err != nil {
3138
return err
3239
}
@@ -57,10 +64,10 @@ func ensureRow(expectedVals []ceresdb.Value, actualRow []ceresdb.Column) error {
5764

5865
}
5966

60-
func query(ctx context.Context, client ceresdb.Client, ts int64) error {
67+
func query(ctx context.Context, client ceresdb.Client, ts int64, addNewColumn bool) error {
6168
resp, err := client.SQLQuery(ctx, ceresdb.SQLQueryRequest{
6269
Tables: []string{table},
63-
SQL: fmt.Sprintf("select * from %s where timestamp = %d", table, ts),
70+
SQL: fmt.Sprintf("select * from %s where timestamp = %d order by name", table, ts),
6471
})
6572
if err != nil {
6673
return err
@@ -70,21 +77,32 @@ func query(ctx context.Context, client ceresdb.Client, ts int64) error {
7077
return fmt.Errorf("expect 2 rows, current: %+v", len(resp.Rows))
7178
}
7279

73-
if err := ensureRow([]ceresdb.Value{
80+
row0 := []ceresdb.Value{
7481
ceresdb.NewUint64Value(4024844655630594205),
7582
ceresdb.NewInt64Value(ts),
7683
ceresdb.NewStringValue("tag-0"),
77-
ceresdb.NewInt64Value(0),
78-
}, resp.Rows[0].Columns()); err != nil {
79-
return err
80-
}
84+
ceresdb.NewInt64Value(0)}
8185

82-
return ensureRow([]ceresdb.Value{
86+
row1 := []ceresdb.Value{
8387
ceresdb.NewUint64Value(14230010170561829440),
8488
ceresdb.NewInt64Value(ts),
8589
ceresdb.NewStringValue("tag-1"),
8690
ceresdb.NewInt64Value(1),
87-
}, resp.Rows[1].Columns())
91+
}
92+
93+
if addNewColumn {
94+
row0[0] = ceresdb.NewUint64Value(8341999341185504339)
95+
row1[0] = ceresdb.NewUint64Value(4452331151453582498)
96+
row0 = append(row0, ceresdb.NewInt64Value(0), ceresdb.NewStringValue("new-tag-0"))
97+
row1 = append(row1, ceresdb.NewInt64Value(1), ceresdb.NewStringValue("new-tag-1"))
98+
}
99+
100+
if err := ensureRow(row0,
101+
resp.Rows[0].Columns()); err != nil {
102+
return err
103+
}
104+
105+
return ensureRow(row1, resp.Rows[1].Columns())
88106
}
89107

90108
func ddl(ctx context.Context, client ceresdb.Client, sql string) (uint32, error) {
@@ -99,6 +117,48 @@ func ddl(ctx context.Context, client ceresdb.Client, sql string) (uint32, error)
99117
return resp.AffectedRows, nil
100118
}
101119

120+
func checkAutoCreateTable(ctx context.Context, client ceresdb.Client) error {
121+
if _, err := ddl(ctx, client, "drop table if exists "+table); err != nil {
122+
return err
123+
}
124+
125+
ts := currentMS()
126+
if err := write(ctx, client, ts, false); err != nil {
127+
return err
128+
}
129+
130+
if err := query(ctx, client, ts, false); err != nil {
131+
return err
132+
}
133+
134+
return nil
135+
}
136+
137+
func checkAutoAddColumns(ctx context.Context, client ceresdb.Client) error {
138+
ts := currentMS()
139+
if err := write(ctx, client, ts, true); err != nil {
140+
return err
141+
}
142+
143+
if err := query(ctx, client, ts, true); err != nil {
144+
return err
145+
}
146+
147+
return nil
148+
}
149+
150+
func dropTable(ctx context.Context, client ceresdb.Client) error {
151+
affected, err := ddl(ctx, client, "drop table "+table)
152+
if err != nil {
153+
return err
154+
}
155+
156+
if affected != 0 {
157+
panic(fmt.Sprintf("drop table expected 0, actual is %d", affected))
158+
}
159+
return nil
160+
}
161+
102162
func main() {
103163
fmt.Printf("Begin test, endpoint %s...\n", endpoint)
104164

@@ -110,28 +170,18 @@ func main() {
110170
}
111171

112172
ctx := context.TODO()
113-
if _, err := ddl(ctx, client, "drop table if exists "+table); err != nil {
114-
panic(err)
115-
}
116-
117-
ts := currentMS()
118-
if err := write(ctx, client, ts); err != nil {
173+
if err = checkAutoCreateTable(ctx, client); err != nil {
119174
panic(err)
120175
}
121176

122-
if err := query(ctx, client, ts); err != nil {
177+
if err = checkAutoAddColumns(ctx, client); err != nil {
123178
panic(err)
124179
}
125180

126-
affected, err := ddl(ctx, client, "drop table "+table)
127-
if err != nil {
181+
if err = dropTable(ctx, client); err != nil {
128182
panic(err)
129183
}
130184

131-
if affected != 0 {
132-
panic(fmt.Sprintf("drop table expected 0, actual is %d", affected))
133-
}
134-
135185
fmt.Println("Test done")
136186
}
137187

‎server/src/handlers/influxdb.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ use crate::{
2626
context::RequestContext,
2727
handlers,
2828
instance::InstanceRef,
29-
proxy::grpc::write::{execute_plan, write_request_to_insert_plan, WriteContext},
29+
proxy::grpc::write::{execute_insert_plan, write_request_to_insert_plan, WriteContext},
3030
schema_config_provider::SchemaConfigProviderRef,
3131
};
3232

@@ -120,7 +120,7 @@ impl<Q: QueryExecutor + 'static> InfluxDb<Q> {
120120

121121
let mut success = 0;
122122
for insert_plan in plans {
123-
success += execute_plan(
123+
success += execute_insert_plan(
124124
request_id,
125125
catalog,
126126
schema,

‎server/src/handlers/prom.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ use crate::{
3030
context::RequestContext,
3131
handlers,
3232
instance::InstanceRef,
33-
proxy::grpc::write::{execute_plan, write_request_to_insert_plan, WriteContext},
33+
proxy::grpc::write::{execute_insert_plan, write_request_to_insert_plan, WriteContext},
3434
schema_config_provider::SchemaConfigProviderRef,
3535
};
3636

@@ -257,7 +257,7 @@ impl<Q: QueryExecutor + 'static> RemoteStorage for CeresDBStorage<Q> {
257257

258258
let mut success = 0;
259259
for insert_plan in plans {
260-
success += execute_plan(
260+
success += execute_insert_plan(
261261
request_id,
262262
catalog,
263263
schema,

‎server/src/proxy/grpc/prom_query.rs

+8-5
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ use common_types::{
2020
use common_util::error::BoxError;
2121
use http::StatusCode;
2222
use interpreters::{context::Context as InterpreterContext, factory::Factory, interpreter::Output};
23-
use log::info;
23+
use log::{error, info};
2424
use query_engine::executor::{Executor as QueryExecutor, RecordBatchVec};
2525
use snafu::{ensure, OptionExt, ResultExt};
2626
use sql::{
@@ -42,10 +42,13 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
4242
req: PrometheusQueryRequest,
4343
) -> PrometheusQueryResponse {
4444
match self.handle_prom_query_internal(ctx, req).await {
45-
Err(e) => PrometheusQueryResponse {
46-
header: Some(error::build_err_header(e)),
47-
..Default::default()
48-
},
45+
Err(e) => {
46+
error!("Failed to handle prom query, err:{e}");
47+
PrometheusQueryResponse {
48+
header: Some(error::build_err_header(e)),
49+
..Default::default()
50+
}
51+
}
4952
Ok(v) => v,
5053
}
5154
}

‎server/src/proxy/grpc/route.rs

+2
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
use ceresdbproto::storage::{RouteRequest, RouteResponse};
44
use common_util::error::BoxError;
55
use http::StatusCode;
6+
use log::error;
67
use query_engine::executor::Executor as QueryExecutor;
78
use snafu::ResultExt;
89

@@ -23,6 +24,7 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
2324
let mut resp = RouteResponse::default();
2425
match routes {
2526
Err(e) => {
27+
error!("Failed to handle route, err:{e}");
2628
resp.header = Some(error::build_err_header(e));
2729
}
2830
Ok(v) => {

‎server/src/proxy/grpc/sql_query.rs

+8-4
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,13 @@ const STREAM_QUERY_CHANNEL_LEN: usize = 20;
4040
impl<Q: QueryExecutor + 'static> Proxy<Q> {
4141
pub async fn handle_sql_query(&self, ctx: Context, req: SqlQueryRequest) -> SqlQueryResponse {
4242
match self.handle_sql_query_internal(ctx, req).await {
43-
Err(e) => SqlQueryResponse {
44-
header: Some(error::build_err_header(e)),
45-
..Default::default()
46-
},
43+
Err(e) => {
44+
error!("Failed to handle sql query, err:{e}");
45+
SqlQueryResponse {
46+
header: Some(error::build_err_header(e)),
47+
..Default::default()
48+
}
49+
}
4750
Ok(v) => v,
4851
}
4952
}
@@ -55,6 +58,7 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
5558
) -> BoxStream<'static, SqlQueryResponse> {
5659
match self.clone().handle_stream_query_internal(ctx, req).await {
5760
Err(e) => stream::once(async {
61+
error!("Failed to handle stream sql query, err:{e}");
5862
SqlQueryResponse {
5963
header: Some(error::build_err_header(e)),
6064
..Default::default()

‎server/src/proxy/grpc/write.rs

+160-110
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ use ceresdbproto::storage::{
1111
};
1212
use cluster::config::SchemaConfig;
1313
use common_types::{
14+
column_schema::ColumnSchema,
1415
datum::{Datum, DatumKind},
1516
request_id::RequestId,
1617
row::{Row, RowGroupBuilder},
@@ -20,12 +21,13 @@ use common_types::{
2021
use common_util::error::BoxError;
2122
use http::StatusCode;
2223
use interpreters::{context::Context as InterpreterContext, factory::Factory, interpreter::Output};
23-
use log::debug;
24+
use log::{debug, error, info};
2425
use query_engine::executor::Executor as QueryExecutor;
2526
use snafu::{ensure, OptionExt, ResultExt};
2627
use sql::{
2728
frontend::{Context as FrontendContext, Frontend},
28-
plan::{InsertPlan, Plan},
29+
plan::{AlterTableOperation, AlterTablePlan, InsertPlan, Plan},
30+
planner::build_schema_from_write_table_request,
2931
provider::CatalogMetaProvider,
3032
};
3133
use table_engine::table::TableRef;
@@ -69,10 +71,13 @@ impl WriteContext {
6971
impl<Q: QueryExecutor + 'static> Proxy<Q> {
7072
pub async fn handle_write(&self, ctx: Context, req: WriteRequest) -> WriteResponse {
7173
match self.handle_write_internal(ctx, req).await {
72-
Err(e) => WriteResponse {
73-
header: Some(error::build_err_header(e)),
74-
..Default::default()
75-
},
74+
Err(e) => {
75+
error!("Failed to handle write, err:{e}");
76+
WriteResponse {
77+
header: Some(error::build_err_header(e)),
78+
..Default::default()
79+
}
80+
}
7681
Ok(v) => v,
7782
}
7883
}
@@ -126,7 +131,7 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
126131

127132
let mut success = 0;
128133
for insert_plan in plan_vec {
129-
success += execute_plan(
134+
success += execute_insert_plan(
130135
request_id,
131136
catalog,
132137
&schema,
@@ -167,26 +172,54 @@ pub async fn write_request_to_insert_plan<Q: QueryExecutor + 'static>(
167172
deadline,
168173
auto_create_table,
169174
} = write_context;
170-
175+
let schema_config = schema_config.cloned().unwrap_or_default();
171176
for write_table_req in table_requests {
172177
let table_name = &write_table_req.table;
173178
let mut table = try_get_table(&catalog, &schema, instance.clone(), table_name)?;
174179

175-
if table.is_none() && auto_create_table {
176-
// TODO: remove this clone?
177-
let schema_config = schema_config.cloned().unwrap_or_default();
178-
create_table(
179-
request_id,
180-
&catalog,
181-
&schema,
182-
instance.clone(),
183-
&write_table_req,
184-
&schema_config,
185-
deadline,
186-
)
187-
.await?;
188-
// try to get table again
189-
table = try_get_table(&catalog, &schema, instance.clone(), table_name)?;
180+
match table.clone() {
181+
None => {
182+
if auto_create_table {
183+
create_table(
184+
request_id,
185+
&catalog,
186+
&schema,
187+
instance.clone(),
188+
&write_table_req,
189+
&schema_config,
190+
deadline,
191+
)
192+
.await?;
193+
// try to get table again
194+
table = try_get_table(&catalog, &schema, instance.clone(), table_name)?;
195+
}
196+
}
197+
Some(t) => {
198+
if auto_create_table {
199+
// The reasons for making the decision to add columns before writing are as
200+
// follows:
201+
// * If judged based on the error message returned, it may cause data that has
202+
// already been successfully written to be written again and affect the
203+
// accuracy of the data.
204+
// * Currently, the decision to add columns is made at the request level, not at
205+
// the row level, so the cost is relatively small.
206+
let table_schema = t.schema();
207+
let columns =
208+
find_new_columns(&table_schema, &schema_config, &write_table_req)?;
209+
if !columns.is_empty() {
210+
execute_add_columns_plan(
211+
request_id,
212+
&catalog,
213+
&schema,
214+
instance.clone(),
215+
t,
216+
columns,
217+
deadline,
218+
)
219+
.await?;
220+
}
221+
}
222+
}
190223
}
191224

192225
match table {
@@ -207,7 +240,7 @@ pub async fn write_request_to_insert_plan<Q: QueryExecutor + 'static>(
207240
Ok(plan_vec)
208241
}
209242

210-
pub async fn execute_plan<Q: QueryExecutor + 'static>(
243+
pub async fn execute_insert_plan<Q: QueryExecutor + 'static>(
211244
request_id: RequestId,
212245
catalog: &str,
213246
schema: &str,
@@ -221,50 +254,15 @@ pub async fn execute_plan<Q: QueryExecutor + 'static>(
221254
insert_plan.rows.num_rows()
222255
);
223256
let plan = Plan::Insert(insert_plan);
224-
225-
instance
226-
.limiter
227-
.try_limit(&plan)
228-
.box_err()
229-
.context(ErrWithCause {
230-
code: StatusCode::INTERNAL_SERVER_ERROR,
231-
msg: "Insert is blocked",
232-
})?;
233-
234-
let interpreter_ctx = InterpreterContext::builder(request_id, deadline)
235-
// Use current ctx's catalog and schema as default catalog and schema
236-
.default_catalog_and_schema(catalog.to_string(), schema.to_string())
237-
.build();
238-
let interpreter_factory = Factory::new(
239-
instance.query_executor.clone(),
240-
instance.catalog_manager.clone(),
241-
instance.table_engine.clone(),
242-
instance.table_manipulator.clone(),
243-
);
244-
let interpreter = interpreter_factory
245-
.create(interpreter_ctx, plan)
246-
.box_err()
247-
.context(ErrWithCause {
248-
code: StatusCode::INTERNAL_SERVER_ERROR,
249-
msg: "Failed to create interpreter",
250-
})?;
251-
252-
interpreter
253-
.execute()
254-
.await
255-
.box_err()
256-
.context(ErrWithCause {
257-
code: StatusCode::INTERNAL_SERVER_ERROR,
258-
msg: "Failed to execute interpreter",
259-
})
260-
.and_then(|output| match output {
261-
Output::AffectedRows(n) => Ok(n),
262-
Output::Records(_) => ErrNoCause {
263-
code: StatusCode::BAD_REQUEST,
264-
msg: "Invalid output type, expect AffectedRows, found Records",
265-
}
266-
.fail(),
267-
})
257+
let output = execute_plan(request_id, catalog, schema, instance, plan, deadline).await;
258+
output.and_then(|output| match output {
259+
Output::AffectedRows(n) => Ok(n),
260+
Output::Records(_) => ErrNoCause {
261+
code: StatusCode::BAD_REQUEST,
262+
msg: "Invalid output type, expect AffectedRows, found Records",
263+
}
264+
.fail(),
265+
})
268266
}
269267

270268
fn try_get_table<Q: QueryExecutor + 'static>(
@@ -333,49 +331,15 @@ async fn create_table<Q: QueryExecutor + 'static>(
333331

334332
debug!("Grpc handle create table begin, plan:{:?}", plan);
335333

336-
instance
337-
.limiter
338-
.try_limit(&plan)
339-
.box_err()
340-
.context(ErrWithCause {
341-
code: StatusCode::INTERNAL_SERVER_ERROR,
342-
msg: "Create table is blocked",
343-
})?;
344-
345-
let interpreter_ctx = InterpreterContext::builder(request_id, deadline)
346-
// Use current ctx's catalog and schema as default catalog and schema
347-
.default_catalog_and_schema(catalog.to_string(), schema.to_string())
348-
.build();
349-
let interpreter_factory = Factory::new(
350-
instance.query_executor.clone(),
351-
instance.catalog_manager.clone(),
352-
instance.table_engine.clone(),
353-
instance.table_manipulator.clone(),
354-
);
355-
let interpreter = interpreter_factory
356-
.create(interpreter_ctx, plan)
357-
.box_err()
358-
.context(ErrWithCause {
359-
code: StatusCode::INTERNAL_SERVER_ERROR,
360-
msg: "Failed to create interpreter",
361-
})?;
362-
363-
interpreter
364-
.execute()
365-
.await
366-
.box_err()
367-
.context(ErrWithCause {
334+
let output = execute_plan(request_id, catalog, schema, instance, plan, deadline).await;
335+
output.and_then(|output| match output {
336+
Output::AffectedRows(_) => Ok(()),
337+
Output::Records(_) => ErrNoCause {
368338
code: StatusCode::INTERNAL_SERVER_ERROR,
369-
msg: "Failed to execute interpreter",
370-
})
371-
.and_then(|output| match output {
372-
Output::AffectedRows(_) => Ok(()),
373-
Output::Records(_) => ErrNoCause {
374-
code: StatusCode::INTERNAL_SERVER_ERROR,
375-
msg: "Invalid output type, expect AffectedRows, found Records",
376-
}
377-
.fail(),
378-
})
339+
msg: "Invalid output type, expect AffectedRows, found Records",
340+
}
341+
.fail(),
342+
})
379343
}
380344

381345
fn write_table_request_to_insert_plan(
@@ -576,6 +540,92 @@ fn convert_proto_value_to_datum(
576540
}
577541
}
578542

543+
fn find_new_columns(
544+
schema: &Schema,
545+
schema_config: &SchemaConfig,
546+
write_req: &WriteTableRequest,
547+
) -> Result<Vec<ColumnSchema>> {
548+
let new_schema = build_schema_from_write_table_request(schema_config, write_req)
549+
.box_err()
550+
.context(ErrWithCause {
551+
code: StatusCode::INTERNAL_SERVER_ERROR,
552+
msg: "Build schema from write table request failed",
553+
})?;
554+
555+
let columns = new_schema.columns();
556+
let old_columns = schema.columns();
557+
558+
let new_columns = columns
559+
.iter()
560+
.filter(|column| !old_columns.iter().any(|c| c.name == column.name))
561+
.cloned()
562+
.collect();
563+
Ok(new_columns)
564+
}
565+
566+
async fn execute_add_columns_plan<Q: QueryExecutor + 'static>(
567+
request_id: RequestId,
568+
catalog: &str,
569+
schema: &str,
570+
instance: InstanceRef<Q>,
571+
table: TableRef,
572+
columns: Vec<ColumnSchema>,
573+
deadline: Option<Instant>,
574+
) -> Result<()> {
575+
let table_name = table.name().to_string();
576+
info!("Add columns start, request_id:{request_id}, table:{table_name}, columns:{columns:?}");
577+
578+
let plan = Plan::AlterTable(AlterTablePlan {
579+
table,
580+
operations: AlterTableOperation::AddColumn(columns),
581+
});
582+
let _ = execute_plan(request_id, catalog, schema, instance, plan, deadline).await?;
583+
584+
info!("Add columns success, request_id:{request_id}, table:{table_name}");
585+
Ok(())
586+
}
587+
588+
async fn execute_plan<Q: QueryExecutor + 'static>(
589+
request_id: RequestId,
590+
catalog: &str,
591+
schema: &str,
592+
instance: InstanceRef<Q>,
593+
plan: Plan,
594+
deadline: Option<Instant>,
595+
) -> Result<Output> {
596+
instance
597+
.limiter
598+
.try_limit(&plan)
599+
.box_err()
600+
.context(ErrWithCause {
601+
code: StatusCode::INTERNAL_SERVER_ERROR,
602+
msg: "Request is blocked",
603+
})?;
604+
605+
let interpreter_ctx = InterpreterContext::builder(request_id, deadline)
606+
// Use current ctx's catalog and schema as default catalog and schema
607+
.default_catalog_and_schema(catalog.to_string(), schema.to_string())
608+
.build();
609+
let interpreter_factory = Factory::new(
610+
instance.query_executor.clone(),
611+
instance.catalog_manager.clone(),
612+
instance.table_engine.clone(),
613+
instance.table_manipulator.clone(),
614+
);
615+
let interpreter = interpreter_factory
616+
.create(interpreter_ctx, plan)
617+
.box_err()
618+
.context(ErrWithCause {
619+
code: StatusCode::INTERNAL_SERVER_ERROR,
620+
msg: "Failed to create interpreter",
621+
})?;
622+
623+
interpreter.execute().await.box_err().context(ErrWithCause {
624+
code: StatusCode::INTERNAL_SERVER_ERROR,
625+
msg: "Failed to execute interpreter",
626+
})
627+
}
628+
579629
#[cfg(test)]
580630
mod test {
581631
use ceresdbproto::storage::{Field, FieldGroup, Tag, Value};

‎sql/src/planner.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,7 @@ fn build_column_schema(
368368
})
369369
}
370370

371-
fn build_schema_from_write_table_request(
371+
pub fn build_schema_from_write_table_request(
372372
schema_config: &SchemaConfig,
373373
write_table_req: &WriteTableRequest,
374374
) -> Result<Schema> {

0 commit comments

Comments
 (0)
Please sign in to comment.