@@ -4,9 +4,7 @@ use std::{cmp::max, collections::HashMap, time::Instant};
4
4
5
5
use ceresdbproto:: storage:: {
6
6
storage_service_client:: StorageServiceClient , RouteRequest , WriteRequest , WriteResponse ,
7
- WriteTableRequest ,
8
7
} ;
9
- use cluster:: config:: SchemaConfig ;
10
8
use common_types:: request_id:: RequestId ;
11
9
use common_util:: error:: BoxError ;
12
10
use futures:: { future:: try_join_all, FutureExt } ;
@@ -20,12 +18,12 @@ use snafu::{OptionExt, ResultExt};
20
18
use tonic:: transport:: Channel ;
21
19
22
20
use crate :: {
23
- create_table , error,
21
+ error,
24
22
error:: { build_ok_header, ErrNoCause , ErrWithCause , InternalNoCause , Result } ,
25
- execute_add_columns_plan , execute_plan, find_new_columns ,
23
+ execute_plan,
26
24
forward:: { ForwardResult , ForwarderRef } ,
27
25
instance:: InstanceRef ,
28
- try_get_table , write_table_request_to_insert_plan , Context , Proxy ,
26
+ Context , Proxy ,
29
27
} ;
30
28
31
29
#[ derive( Debug ) ]
@@ -301,90 +299,6 @@ impl<Q: QueryExecutor + 'static> Proxy<Q> {
301
299
}
302
300
}
303
301
304
- // TODO: use write_request_to_insert_plan in proxy, and remove following code.
305
- pub async fn write_request_to_insert_plan < Q : QueryExecutor + ' static > (
306
- instance : InstanceRef < Q > ,
307
- table_requests : Vec < WriteTableRequest > ,
308
- schema_config : Option < & SchemaConfig > ,
309
- write_context : WriteContext ,
310
- ) -> Result < Vec < InsertPlan > > {
311
- let mut plan_vec = Vec :: with_capacity ( table_requests. len ( ) ) ;
312
-
313
- let WriteContext {
314
- request_id,
315
- catalog,
316
- schema,
317
- deadline,
318
- auto_create_table,
319
- } = write_context;
320
- let schema_config = schema_config. cloned ( ) . unwrap_or_default ( ) ;
321
- for write_table_req in table_requests {
322
- let table_name = & write_table_req. table ;
323
- let mut table = try_get_table ( & catalog, & schema, instance. clone ( ) , table_name) ?;
324
-
325
- match table. clone ( ) {
326
- None => {
327
- if auto_create_table {
328
- create_table (
329
- request_id,
330
- & catalog,
331
- & schema,
332
- instance. clone ( ) ,
333
- & write_table_req,
334
- & schema_config,
335
- deadline,
336
- )
337
- . await ?;
338
- // try to get table again
339
- table = try_get_table ( & catalog, & schema, instance. clone ( ) , table_name) ?;
340
- }
341
- }
342
- Some ( t) => {
343
- if auto_create_table {
344
- // The reasons for making the decision to add columns before writing are as
345
- // follows:
346
- // * If judged based on the error message returned, it may cause data that has
347
- // already been successfully written to be written again and affect the
348
- // accuracy of the data.
349
- // * Currently, the decision to add columns is made at the request level, not at
350
- // the row level, so the cost is relatively small.
351
- let table_schema = t. schema ( ) ;
352
- let columns =
353
- find_new_columns ( & table_schema, & schema_config, & write_table_req) ?;
354
- if !columns. is_empty ( ) {
355
- execute_add_columns_plan (
356
- request_id,
357
- & catalog,
358
- & schema,
359
- instance. clone ( ) ,
360
- t,
361
- columns,
362
- deadline,
363
- )
364
- . await ?;
365
- }
366
- }
367
- }
368
- }
369
-
370
- match table {
371
- Some ( table) => {
372
- let plan = write_table_request_to_insert_plan ( table, write_table_req) ?;
373
- plan_vec. push ( plan) ;
374
- }
375
- None => {
376
- return ErrNoCause {
377
- code : StatusCode :: BAD_REQUEST ,
378
- msg : format ! ( "Table not found, schema:{schema}, table:{table_name}" ) ,
379
- }
380
- . fail ( ) ;
381
- }
382
- }
383
- }
384
-
385
- Ok ( plan_vec)
386
- }
387
-
388
302
pub async fn execute_insert_plan < Q : QueryExecutor + ' static > (
389
303
request_id : RequestId ,
390
304
catalog : & str ,
0 commit comments