@@ -433,13 +433,14 @@ void write_index(const Index* idx, IOWriter* f) {
433
433
WRITE1 (idxr->code_size );
434
434
WRITEVECTOR (idxr->codes );
435
435
} else if (
436
- auto * idxr = dynamic_cast <const IndexLocalSearchQuantizer*>(idx)) {
436
+ auto * idxr_2 =
437
+ dynamic_cast <const IndexLocalSearchQuantizer*>(idx)) {
437
438
uint32_t h = fourcc (" IxLS" );
438
439
WRITE1 (h);
439
440
write_index_header (idx, f);
440
- write_LocalSearchQuantizer (&idxr ->lsq , f);
441
- WRITE1 (idxr ->code_size );
442
- WRITEVECTOR (idxr ->codes );
441
+ write_LocalSearchQuantizer (&idxr_2 ->lsq , f);
442
+ WRITE1 (idxr_2 ->code_size );
443
+ WRITEVECTOR (idxr_2 ->codes );
443
444
} else if (
444
445
const IndexProductResidualQuantizer* idxpr =
445
446
dynamic_cast <const IndexProductResidualQuantizer*>(idx)) {
@@ -572,26 +573,26 @@ void write_index(const Index* idx, IOWriter* f) {
572
573
573
574
write_InvertedLists (ivaqfs->invlists , f);
574
575
} else if (
575
- const ResidualCoarseQuantizer* idxr =
576
+ const ResidualCoarseQuantizer* idxr_2 =
576
577
dynamic_cast <const ResidualCoarseQuantizer*>(idx)) {
577
578
uint32_t h = fourcc (" ImRQ" );
578
579
WRITE1 (h);
579
580
write_index_header (idx, f);
580
- write_ResidualQuantizer (&idxr ->rq , f);
581
- WRITE1 (idxr ->beam_factor );
581
+ write_ResidualQuantizer (&idxr_2 ->rq , f);
582
+ WRITE1 (idxr_2 ->beam_factor );
582
583
} else if (
583
- const Index2Layer* idxp = dynamic_cast <const Index2Layer*>(idx)) {
584
+ const Index2Layer* idxp_2 = dynamic_cast <const Index2Layer*>(idx)) {
584
585
uint32_t h = fourcc (" Ix2L" );
585
586
WRITE1 (h);
586
587
write_index_header (idx, f);
587
- write_index (idxp ->q1 .quantizer , f);
588
- WRITE1 (idxp ->q1 .nlist );
589
- WRITE1 (idxp ->q1 .quantizer_trains_alone );
590
- write_ProductQuantizer (&idxp ->pq , f);
591
- WRITE1 (idxp ->code_size_1 );
592
- WRITE1 (idxp ->code_size_2 );
593
- WRITE1 (idxp ->code_size );
594
- WRITEVECTOR (idxp ->codes );
588
+ write_index (idxp_2 ->q1 .quantizer , f);
589
+ WRITE1 (idxp_2 ->q1 .nlist );
590
+ WRITE1 (idxp_2 ->q1 .quantizer_trains_alone );
591
+ write_ProductQuantizer (&idxp_2 ->pq , f);
592
+ WRITE1 (idxp_2 ->code_size_1 );
593
+ WRITE1 (idxp_2 ->code_size_2 );
594
+ WRITE1 (idxp_2 ->code_size );
595
+ WRITEVECTOR (idxp_2 ->codes );
595
596
} else if (
596
597
const IndexScalarQuantizer* idxs =
597
598
dynamic_cast <const IndexScalarQuantizer*>(idx)) {
@@ -601,15 +602,16 @@ void write_index(const Index* idx, IOWriter* f) {
601
602
write_ScalarQuantizer (&idxs->sq , f);
602
603
WRITEVECTOR (idxs->codes );
603
604
} else if (
604
- const IndexLattice* idxl = dynamic_cast <const IndexLattice*>(idx)) {
605
+ const IndexLattice* idxl_2 =
606
+ dynamic_cast <const IndexLattice*>(idx)) {
605
607
uint32_t h = fourcc (" IxLa" );
606
608
WRITE1 (h);
607
- WRITE1 (idxl ->d );
608
- WRITE1 (idxl ->nsq );
609
- WRITE1 (idxl ->scale_nbit );
610
- WRITE1 (idxl ->zn_sphere_codec .r2 );
609
+ WRITE1 (idxl_2 ->d );
610
+ WRITE1 (idxl_2 ->nsq );
611
+ WRITE1 (idxl_2 ->scale_nbit );
612
+ WRITE1 (idxl_2 ->zn_sphere_codec .r2 );
611
613
write_index_header (idx, f);
612
- WRITEVECTOR (idxl ->trained );
614
+ WRITEVECTOR (idxl_2 ->trained );
613
615
} else if (
614
616
const IndexIVFFlatDedup* ivfl =
615
617
dynamic_cast <const IndexIVFFlatDedup*>(idx)) {
@@ -628,11 +630,12 @@ void write_index(const Index* idx, IOWriter* f) {
628
630
}
629
631
write_InvertedLists (ivfl->invlists , f);
630
632
} else if (
631
- const IndexIVFFlat* ivfl = dynamic_cast <const IndexIVFFlat*>(idx)) {
633
+ const IndexIVFFlat* ivfl_2 =
634
+ dynamic_cast <const IndexIVFFlat*>(idx)) {
632
635
uint32_t h = fourcc (" IwFl" );
633
636
WRITE1 (h);
634
- write_ivf_header (ivfl , f);
635
- write_InvertedLists (ivfl ->invlists , f);
637
+ write_ivf_header (ivfl_2 , f);
638
+ write_InvertedLists (ivfl_2 ->invlists , f);
636
639
} else if (
637
640
const IndexIVFScalarQuantizer* ivsc =
638
641
dynamic_cast <const IndexIVFScalarQuantizer*>(idx)) {
@@ -806,19 +809,19 @@ void write_index(const Index* idx, IOWriter* f) {
806
809
WRITE1 (idxpqfs->M2 );
807
810
WRITEVECTOR (idxpqfs->codes );
808
811
} else if (
809
- const IndexIVFPQFastScan* ivpq =
812
+ const IndexIVFPQFastScan* ivpq_2 =
810
813
dynamic_cast <const IndexIVFPQFastScan*>(idx)) {
811
814
uint32_t h = fourcc (" IwPf" );
812
815
WRITE1 (h);
813
- write_ivf_header (ivpq , f);
814
- WRITE1 (ivpq ->by_residual );
815
- WRITE1 (ivpq ->code_size );
816
- WRITE1 (ivpq ->bbs );
817
- WRITE1 (ivpq ->M2 );
818
- WRITE1 (ivpq ->implem );
819
- WRITE1 (ivpq ->qbs2 );
820
- write_ProductQuantizer (&ivpq ->pq , f);
821
- write_InvertedLists (ivpq ->invlists , f);
816
+ write_ivf_header (ivpq_2 , f);
817
+ WRITE1 (ivpq_2 ->by_residual );
818
+ WRITE1 (ivpq_2 ->code_size );
819
+ WRITE1 (ivpq_2 ->bbs );
820
+ WRITE1 (ivpq_2 ->M2 );
821
+ WRITE1 (ivpq_2 ->implem );
822
+ WRITE1 (ivpq_2 ->qbs2 );
823
+ write_ProductQuantizer (&ivpq_2 ->pq , f);
824
+ write_InvertedLists (ivpq_2 ->invlists , f);
822
825
} else if (
823
826
const IndexRowwiseMinMax* imm =
824
827
dynamic_cast <const IndexRowwiseMinMax*>(idx)) {
@@ -828,13 +831,13 @@ void write_index(const Index* idx, IOWriter* f) {
828
831
write_index_header (imm, f);
829
832
write_index (imm->index , f);
830
833
} else if (
831
- const IndexRowwiseMinMaxFP16* imm =
834
+ const IndexRowwiseMinMaxFP16* imm_2 =
832
835
dynamic_cast <const IndexRowwiseMinMaxFP16*>(idx)) {
833
836
// IndexRowwiseMinmaxHalf
834
837
uint32_t h = fourcc (" IRMh" );
835
838
WRITE1 (h);
836
- write_index_header (imm , f);
837
- write_index (imm ->index , f);
839
+ write_index_header (imm_2 , f);
840
+ write_index (imm_2 ->index , f);
838
841
} else {
839
842
FAISS_THROW_MSG (" don't know how to serialize this type of index" );
840
843
}
0 commit comments