-
Notifications
You must be signed in to change notification settings - Fork 252
/
Copy pathflatten_cfg.rs
1502 lines (1318 loc) · 58 KB
/
flatten_cfg.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//! The flatten cfg optimization pass "flattens" the entire control flow graph into a single block.
//! This includes branches in the CFG with non-constant conditions. Flattening these requires
//! special handling for operations with side-effects and can lead to a loss of information since
//! the jmpif will no longer be in the program. As a result, this pass should usually be towards or
//! at the end of the optimization passes. Note that this pass will also perform unexpectedly if
//! loops are still present in the program. Since the pass sees a normal jmpif, it will attempt to
//! merge both blocks, but no actual looping will occur.
//!
//! This pass is also known to produce some extra instructions which may go unused (usually 'Not')
//! while merging branches. These extra instructions can be cleaned up by a later dead instruction
//! elimination (DIE) pass.
//!
//! Though CFG information is lost during this pass, some key information is retained in the form
//! of `EnableSideEffect` instructions. Each time the flattening pass enters and exits a branch of
//! a jmpif, an instruction is inserted to capture a condition that is analogous to the activeness
//! of the program point. For example:
//!
//! b0(v0: u1):
//! jmpif v0, then: b1, else: b2
//! b1():
//! v1 = call f0
//! jmp b3(v1)
//! ... blocks b2 & b3 ...
//!
//! Would brace the call instruction as such:
//! enable_side_effects v0
//! v1 = call f0
//! enable_side_effects u1 1
//!
//! (Note: we restore to "true" to indicate that this program point is not nested within any
//! other branches.)
//!
//! When we are flattening a block that was reached via a jmpif with a non-constant condition c,
//! the following transformations of certain instructions within the block are expected:
//!
//! 1. A constraint is multiplied by the condition and changes the constraint to
//! an equality with c:
//!
//! constrain v0
//! ============
//! v1 = mul v0, c
//! v2 = eq v1, c
//! constrain v2
//!
//! 2. If we reach the end block of the branch created by the jmpif instruction, its block parameters
//! will be merged. To merge the jmp arguments of the then and else branches, the formula
//! `c * then_arg + !c * else_arg` is used for each argument.
//!
//! b0(v0: u1, v1: Field, v2: Field):
//! jmpif v0, then: b1, else: b2
//! b1():
//! jmp b3(v1)
//! b2():
//! jmp b3(v2)
//! b3(v3: Field):
//! ... b3 instructions ...
//! =========================
//! b0(v0: u1, v1: Field, v2: Field):
//! v3 = mul v0, v1
//! v4 = not v0
//! v5 = mul v4, v2
//! v6 = add v3, v5
//! ... b3 instructions ...
//!
//! 3. After being stored to in at least one predecessor of a block with multiple predecessors, the
//! value of a memory address is the value it had in both branches combined via c * a + !c * b.
//! Note that the following example is simplified to remove extra load instructions and combine
//! the separate merged stores for each branch into one store. See the next example for a
//! non-simplified version with address offsets.
//!
//! b0(v0: u1):
//! v1 = allocate 1 Field
//! jmpif v0, then: b1, else: b2
//! b1():
//! store v1, Field 5
//! ... b1 instructions ...
//! jmp b3
//! b2():
//! store v1, Field 7
//! ... b2 instructions ...
//! jmp b3
//! b3():
//! ... b3 instructions ...
//! =========================
//! b0():
//! v1 = allocate 1 Field
//! store v1, Field 5
//! ... b1 instructions ...
//! store v1, Field 7
//! ... b2 instructions ...
//! v2 = mul v0, Field 5
//! v3 = not v0
//! v4 = mul v3, Field 7
//! v5 = add v2, v4
//! store v1, v5
//! ... b3 instructions ...
//!
//! Note that if the ValueId of the address stored to is not the same, two merging store
//! instructions will be made - one to each address. This is the case even if both addresses refer
//! to the same address internally. This can happen when they are equivalent offsets:
//!
//! b0(v0: u1, v1: ref)
//! jmpif v0, then: b1, else: b2
//! b1():
//! v2 = add v1, Field 1
//! store Field 11 in v2
//! ... b1 instructions ...
//! b2():
//! v3 = add v1, Field 1
//! store Field 12 in v3
//! ... b2 instructions ...
//!
//! In this example, both store instructions store to an offset of 1 from v1, but because the
//! ValueIds differ (v2 and v3), two store instructions will be created:
//!
//! b0(v0: u1, v1: ref)
//! v2 = add v1, Field 1
//! v3 = load v2 (new load)
//! store Field 11 in v2
//! ... b1 instructions ...
//! v4 = not v0 (new not)
//! v5 = add v1, Field 1
//! v6 = load v5 (new load)
//! store Field 12 in v5
//! ... b2 instructions ...
//! v7 = mul v0, Field 11
//! v8 = mul v4, v3
//! v9 = add v7, v8
//! store v9 at v2 (new store)
//! v10 = mul v0, v6
//! v11 = mul v4, Field 12
//! v12 = add v10, v11
//! store v12 at v5 (new store)
use fxhash::FxHashMap as HashMap;
use std::collections::{BTreeMap, HashSet};
use acvm::FieldElement;
use iter_extended::vecmap;
use crate::ssa::{
ir::{
basic_block::BasicBlockId,
cfg::ControlFlowGraph,
dfg::{CallStack, InsertInstructionResult},
function::Function,
function_inserter::FunctionInserter,
instruction::{BinaryOp, Instruction, InstructionId, Intrinsic, TerminatorInstruction},
types::Type,
value::{Value, ValueId},
},
ssa_gen::Ssa,
};
mod branch_analysis;
pub(crate) mod value_merger;
use value_merger::ValueMerger;
impl Ssa {
/// Flattens the control flow graph of main such that the function is left with a
/// single block containing all instructions and no more control-flow.
///
/// This pass will modify any instructions with side effects in particular, often multiplying
/// them by jump conditions to maintain correctness even when all branches of a jmpif are inlined.
/// For more information, see the module-level comment at the top of this file.
#[tracing::instrument(level = "trace", skip(self))]
pub(crate) fn flatten_cfg(mut self) -> Ssa {
flatten_function_cfg(self.main_mut());
self
}
}
struct Context<'f> {
inserter: FunctionInserter<'f>,
/// This ControlFlowGraph is the graph from before the function was modified by this flattening pass.
cfg: ControlFlowGraph,
/// Maps start of branch -> end of branch
branch_ends: HashMap<BasicBlockId, BasicBlockId>,
/// Maps an address to the old and new value of the element at that address
/// These only hold stores for one block at a time and is cleared
/// between inlining of branches.
store_values: HashMap<ValueId, Store>,
/// Maps an address to the old and new value of the element at that address
/// The difference between this map and store_values is that this stores
/// the old and new value of an element from the outer block whose jmpif
/// terminator is being flattened.
///
/// This map persists throughout the flattening process, where addresses
/// are overwritten as new stores are found. This overwriting is the desired behavior,
/// as we want the most update to date value to be stored at a given address as
/// we walk through blocks to flatten.
outer_block_stores: HashMap<ValueId, ValueId>,
/// Stores all allocations local to the current branch.
/// Since these branches are local to the current branch (ie. only defined within one branch of
/// an if expression), they should not be merged with their previous value or stored value in
/// the other branch since there is no such value. The ValueId here is that which is returned
/// by the allocate instruction.
local_allocations: HashSet<ValueId>,
/// A stack of each jmpif condition that was taken to reach a particular point in the program.
/// When two branches are merged back into one, this constitutes a join point, and is analogous
/// to the rest of the program after an if statement. When such a join point / end block is
/// found, the top of this conditions stack is popped since we are no longer under that
/// condition. If we are under multiple conditions (a nested if), the topmost condition is
/// the most recent condition combined with all previous conditions via `And` instructions.
conditions: Vec<(BasicBlockId, ValueId)>,
}
pub(crate) struct Store {
old_value: ValueId,
new_value: ValueId,
}
struct Branch {
condition: ValueId,
last_block: BasicBlockId,
store_values: HashMap<ValueId, Store>,
}
fn flatten_function_cfg(function: &mut Function) {
// This pass may run forever on a brillig function.
// Analyze will check if the predecessors have been processed and push the block to the back of
// the queue. This loops forever if there are still any loops present in the program.
if let crate::ssa::ir::function::RuntimeType::Brillig = function.runtime() {
return;
}
let cfg = ControlFlowGraph::with_function(function);
let branch_ends = branch_analysis::find_branch_ends(function, &cfg);
let mut context = Context {
inserter: FunctionInserter::new(function),
cfg,
store_values: HashMap::default(),
local_allocations: HashSet::new(),
branch_ends,
conditions: Vec::new(),
outer_block_stores: HashMap::default(),
};
context.flatten();
}
impl<'f> Context<'f> {
fn flatten(&mut self) {
// Start with following the terminator of the entry block since we don't
// need to flatten the entry block into itself.
self.handle_terminator(self.inserter.function.entry_block());
}
/// Check the terminator of the given block and recursively inline any blocks reachable from
/// it. Since each block from a jmpif terminator is inlined successively, we must handle
/// instructions with side effects like constrain and store specially to preserve correctness.
/// For these instructions we must keep track of what the current condition is and modify
/// the instructions according to the module-level comment at the top of this file. Note that
/// the current condition is all the jmpif conditions required to reach the current block,
/// combined via `And` instructions.
///
/// Returns the last block to be inlined. This is either the return block of the function or,
/// if self.conditions is not empty, the end block of the most recent condition.
fn handle_terminator(&mut self, block: BasicBlockId) -> BasicBlockId {
if let TerminatorInstruction::JmpIf { .. } =
self.inserter.function.dfg[block].unwrap_terminator()
{
// Find stores in the outer block and insert into the `outer_block_stores` map.
// Not using this map can lead to issues when attempting to merge slices.
// When inlining a branch end, only the then branch and the else branch are checked for stores.
// However, there are cases where we want to load a value that comes from the outer block
// that we are handling the terminator for here.
let instructions = self.inserter.function.dfg[block].instructions().to_vec();
for instruction in instructions {
let (instruction, _) = self.inserter.map_instruction(instruction);
if let Instruction::Store { address, value } = instruction {
self.outer_block_stores.insert(address, value);
}
}
}
match self.inserter.function.dfg[block].unwrap_terminator() {
TerminatorInstruction::JmpIf { condition, then_destination, else_destination } => {
let old_condition = *condition;
let then_block = *then_destination;
let else_block = *else_destination;
let then_condition = self.inserter.resolve(old_condition);
let one = FieldElement::one();
let then_branch =
self.inline_branch(block, then_block, old_condition, then_condition, one);
let else_condition =
self.insert_instruction(Instruction::Not(then_condition), CallStack::new());
let zero = FieldElement::zero();
// Make sure the else branch sees the previous values of each store
// rather than any values created in the 'then' branch.
self.undo_stores_in_then_branch(&then_branch);
let else_branch =
self.inline_branch(block, else_block, old_condition, else_condition, zero);
// We must remember to reset whether side effects are enabled when both branches
// end, in addition to resetting the value of old_condition since it is set to
// known to be true/false within the then/else branch respectively.
self.insert_current_side_effects_enabled();
// We must map back to `then_condition` here. Mapping `old_condition` to itself would
// lose any previous mappings.
self.inserter.map_value(old_condition, then_condition);
// While there is a condition on the stack we don't compile outside the condition
// until it is popped. This ensures we inline the full then and else branches
// before continuing from the end of the conditional here where they can be merged properly.
let end = self.branch_ends[&block];
self.inline_branch_end(end, then_branch, else_branch)
}
TerminatorInstruction::Jmp { destination, arguments, call_stack: _ } => {
if let Some((end_block, _)) = self.conditions.last() {
if destination == end_block {
return block;
}
}
let destination = *destination;
let arguments = vecmap(arguments.clone(), |value| self.inserter.resolve(value));
self.inline_block(destination, &arguments)
}
TerminatorInstruction::Return { return_values, call_stack } => {
let call_stack = call_stack.clone();
let return_values =
vecmap(return_values.clone(), |value| self.inserter.resolve(value));
let new_return = TerminatorInstruction::Return { return_values, call_stack };
let entry = self.inserter.function.entry_block();
self.inserter.function.dfg.set_block_terminator(entry, new_return);
block
}
}
}
/// Push a condition to the stack of conditions.
///
/// This condition should be present while we're inlining each block reachable from the 'then'
/// branch of a jmpif instruction, until the branches eventually join back together. Likewise,
/// !condition should be present while we're inlining each block reachable from the 'else'
/// branch of a jmpif instruction until the join block.
fn push_condition(&mut self, start_block: BasicBlockId, condition: ValueId) {
let end_block = self.branch_ends[&start_block];
if let Some((_, previous_condition)) = self.conditions.last() {
let and = Instruction::binary(BinaryOp::And, *previous_condition, condition);
let new_condition = self.insert_instruction(and, CallStack::new());
self.conditions.push((end_block, new_condition));
} else {
self.conditions.push((end_block, condition));
}
}
/// Insert a new instruction into the function's entry block.
/// Unlike push_instruction, this function will not map any ValueIds.
/// within the given instruction, nor will it modify self.values in any way.
fn insert_instruction(&mut self, instruction: Instruction, call_stack: CallStack) -> ValueId {
let block = self.inserter.function.entry_block();
self.inserter
.function
.dfg
.insert_instruction_and_results(instruction, block, None, call_stack)
.first()
}
/// Inserts a new instruction into the function's entry block, using the given
/// control type variables to specify result types if needed.
/// Unlike push_instruction, this function will not map any ValueIds.
/// within the given instruction, nor will it modify self.values in any way.
fn insert_instruction_with_typevars(
&mut self,
instruction: Instruction,
ctrl_typevars: Option<Vec<Type>>,
) -> InsertInstructionResult {
let block = self.inserter.function.entry_block();
self.inserter.function.dfg.insert_instruction_and_results(
instruction,
block,
ctrl_typevars,
CallStack::new(),
)
}
/// Checks the branch condition on the top of the stack and uses it to build and insert an
/// `EnableSideEffects` instruction into the entry block.
///
/// If the stack is empty, a "true" u1 constant is taken to be the active condition. This is
/// necessary for re-enabling side-effects when re-emerging to a branch depth of 0.
fn insert_current_side_effects_enabled(&mut self) {
let condition = match self.conditions.last() {
Some((_, cond)) => *cond,
None => {
self.inserter.function.dfg.make_constant(FieldElement::one(), Type::unsigned(1))
}
};
let enable_side_effects = Instruction::EnableSideEffects { condition };
self.insert_instruction_with_typevars(enable_side_effects, None);
}
/// Inline one branch of a jmpif instruction.
///
/// This will continue inlining recursively until the next end block is reached where each branch
/// of the jmpif instruction is joined back into a single block.
///
/// Within a branch of a jmpif instruction, we can assume the condition of the jmpif to be
/// always true or false, depending on which branch we're in.
///
/// Returns the ending block / join block of this branch.
fn inline_branch(
&mut self,
jmpif_block: BasicBlockId,
destination: BasicBlockId,
old_condition: ValueId,
new_condition: ValueId,
condition_value: FieldElement,
) -> Branch {
if destination == self.branch_ends[&jmpif_block] {
// If the branch destination is the same as the end of the branch, this must be the
// 'else' case of an if with no else - so there is no else branch.
Branch {
condition: new_condition,
// The last block here is somewhat arbitrary. It only matters that it has no Jmp
// args that will be merged by inline_branch_end. Since jmpifs don't have
// block arguments, it is safe to use the jmpif block here.
last_block: jmpif_block,
store_values: HashMap::default(),
}
} else {
self.push_condition(jmpif_block, new_condition);
self.insert_current_side_effects_enabled();
let old_stores = std::mem::take(&mut self.store_values);
let old_allocations = std::mem::take(&mut self.local_allocations);
// Optimization: within the then branch we know the condition to be true, so replace
// any references of it within this branch with true. Likewise, do the same with false
// with the else branch. We must be careful not to replace the condition if it is a
// known constant, otherwise we can end up setting 1 = 0 or vice-versa.
if self.inserter.function.dfg.get_numeric_constant(old_condition).is_none() {
let known_value =
self.inserter.function.dfg.make_constant(condition_value, Type::bool());
self.inserter.map_value(old_condition, known_value);
}
let final_block = self.inline_block(destination, &[]);
self.conditions.pop();
let stores_in_branch = std::mem::replace(&mut self.store_values, old_stores);
self.local_allocations = old_allocations;
Branch {
condition: new_condition,
last_block: final_block,
store_values: stores_in_branch,
}
}
}
/// Inline the ending block of a branch, the point where all blocks from a jmpif instruction
/// join back together. In particular this function must handle merging block arguments from
/// all of the join point's predecessors, and it must handle any differing side effects from
/// each branch.
///
/// Afterwards, continues inlining recursively until it finds the next end block or finds the
/// end of the function.
///
/// Returns the final block that was inlined.
fn inline_branch_end(
&mut self,
destination: BasicBlockId,
then_branch: Branch,
else_branch: Branch,
) -> BasicBlockId {
assert_eq!(self.cfg.predecessors(destination).len(), 2);
let then_args =
self.inserter.function.dfg[then_branch.last_block].terminator_arguments().to_vec();
let else_args =
self.inserter.function.dfg[else_branch.last_block].terminator_arguments().to_vec();
let params = self.inserter.function.dfg.block_parameters(destination);
assert_eq!(params.len(), then_args.len());
assert_eq!(params.len(), else_args.len());
let args = vecmap(then_args.iter().zip(else_args), |(then_arg, else_arg)| {
(self.inserter.resolve(*then_arg), self.inserter.resolve(else_arg))
});
let block = self.inserter.function.entry_block();
let mut value_merger = ValueMerger::new(
&mut self.inserter.function.dfg,
block,
Some(&self.store_values),
Some(&self.outer_block_stores),
);
// Cannot include this in the previous vecmap since it requires exclusive access to self
let args = vecmap(args, |(then_arg, else_arg)| {
value_merger.merge_values(
then_branch.condition,
else_branch.condition,
then_arg,
else_arg,
)
});
self.merge_stores(then_branch, else_branch);
// insert merge instruction
self.inline_block(destination, &args)
}
/// Merge any store instructions found in each branch.
///
/// This function relies on the 'then' branch being merged before the 'else' branch of a jmpif
/// instruction. If this ordering is changed, the ordering that store values are merged within
/// this function also needs to be changed to reflect that.
fn merge_stores(&mut self, then_branch: Branch, else_branch: Branch) {
// Address -> (then_value, else_value, value_before_the_if)
let mut new_map = BTreeMap::new();
for (address, store) in then_branch.store_values {
new_map.insert(address, (store.new_value, store.old_value, store.old_value));
}
for (address, store) in else_branch.store_values {
if let Some(entry) = new_map.get_mut(&address) {
entry.1 = store.new_value;
} else {
new_map.insert(address, (store.old_value, store.new_value, store.old_value));
}
}
let then_condition = then_branch.condition;
let else_condition = else_branch.condition;
let block = self.inserter.function.entry_block();
let mut value_merger = ValueMerger::new(
&mut self.inserter.function.dfg,
block,
Some(&self.store_values),
Some(&self.outer_block_stores),
);
// Merging must occur in a separate loop as we cannot borrow `self` as mutable while `value_merger` does
let mut new_values = HashMap::default();
for (address, (then_case, else_case, _)) in &new_map {
let value =
value_merger.merge_values(then_condition, else_condition, *then_case, *else_case);
new_values.insert(address, value);
}
// Replace stores with new merged values
for (address, (_, _, old_value)) in &new_map {
let value = new_values[address];
let address = *address;
self.insert_instruction_with_typevars(Instruction::Store { address, value }, None);
if let Some(store) = self.store_values.get_mut(&address) {
store.new_value = value;
} else {
self.store_values
.insert(address, Store { old_value: *old_value, new_value: value });
}
}
}
fn remember_store(&mut self, address: ValueId, new_value: ValueId) {
if !self.local_allocations.contains(&address) {
if let Some(store_value) = self.store_values.get_mut(&address) {
store_value.new_value = new_value;
} else {
let load = Instruction::Load { address };
let load_type = Some(vec![self.inserter.function.dfg.type_of_value(new_value)]);
let old_value = self.insert_instruction_with_typevars(load, load_type).first();
self.store_values.insert(address, Store { old_value, new_value });
}
}
}
/// Inline all instructions from the given destination block into the entry block.
/// Afterwards, check the block's terminator and continue inlining recursively.
///
/// Returns the final block that was inlined.
///
/// Expects that the `arguments` given are already translated via self.inserter.resolve.
/// If they are not, it is possible some values which no longer exist, such as block
/// parameters, will be kept in the program.
fn inline_block(&mut self, destination: BasicBlockId, arguments: &[ValueId]) -> BasicBlockId {
self.inserter.remember_block_params(destination, arguments);
// If this is not a separate variable, clippy gets confused and says the to_vec is
// unnecessary, when removing it actually causes an aliasing/mutability error.
let instructions = self.inserter.function.dfg[destination].instructions().to_vec();
for instruction in instructions {
self.push_instruction(instruction);
}
self.handle_terminator(destination)
}
/// Push the given instruction to the end of the entry block of the current function.
///
/// Note that each ValueId of the instruction will be mapped via self.inserter.resolve.
/// As a result, the instruction that will be pushed will actually be a new instruction
/// with a different InstructionId from the original. The results of the given instruction
/// will also be mapped to the results of the new instruction.
fn push_instruction(&mut self, id: InstructionId) {
let (instruction, call_stack) = self.inserter.map_instruction(id);
let instruction = self.handle_instruction_side_effects(instruction, call_stack.clone());
let is_allocate = matches!(instruction, Instruction::Allocate);
let entry = self.inserter.function.entry_block();
let results = self.inserter.push_instruction_value(instruction, id, entry, call_stack);
// Remember an allocate was created local to this branch so that we do not try to merge store
// values across branches for it later.
if is_allocate {
self.local_allocations.insert(results.first());
}
}
/// If we are currently in a branch, we need to modify constrain instructions
/// to multiply them by the branch's condition (see optimization #1 in the module comment).
fn handle_instruction_side_effects(
&mut self,
instruction: Instruction,
call_stack: CallStack,
) -> Instruction {
if let Some((_, condition)) = self.conditions.last().copied() {
match instruction {
Instruction::Constrain(lhs, rhs, message) => {
// Replace constraint `lhs == rhs` with `condition * lhs == condition * rhs`.
// Condition needs to be cast to argument type in order to multiply them together.
let argument_type = self.inserter.function.dfg.type_of_value(lhs);
// Sanity check that we're not constraining non-primitive types
assert!(matches!(argument_type, Type::Numeric(_)));
let casted_condition = self.insert_instruction(
Instruction::Cast(condition, argument_type),
call_stack.clone(),
);
let lhs = self.insert_instruction(
Instruction::binary(BinaryOp::Mul, lhs, casted_condition),
call_stack.clone(),
);
let rhs = self.insert_instruction(
Instruction::binary(BinaryOp::Mul, rhs, casted_condition),
call_stack,
);
Instruction::Constrain(lhs, rhs, message)
}
Instruction::Store { address, value } => {
self.remember_store(address, value);
Instruction::Store { address, value }
}
Instruction::RangeCheck { value, max_bit_size, assert_message } => {
// Replace value with `value * predicate` to zero out value when predicate is inactive.
// Condition needs to be cast to argument type in order to multiply them together.
let argument_type = self.inserter.function.dfg.type_of_value(value);
let casted_condition = self.insert_instruction(
Instruction::Cast(condition, argument_type),
call_stack.clone(),
);
let value = self.insert_instruction(
Instruction::binary(BinaryOp::Mul, value, casted_condition),
call_stack.clone(),
);
Instruction::RangeCheck { value, max_bit_size, assert_message }
}
Instruction::Call { func, mut arguments } => match self.inserter.function.dfg[func]
{
Value::Intrinsic(Intrinsic::ToBits(_) | Intrinsic::ToRadix(_)) => {
let field = arguments[0];
arguments.remove(0);
let argument_type = self.inserter.function.dfg.type_of_value(field);
let casted_condition = self.insert_instruction(
Instruction::Cast(condition, argument_type),
call_stack.clone(),
);
let field = self.insert_instruction(
Instruction::binary(BinaryOp::Mul, field, casted_condition),
call_stack.clone(),
);
arguments.insert(0, field);
Instruction::Call { func, arguments }
}
_ => Instruction::Call { func, arguments },
},
other => other,
}
} else {
instruction
}
}
fn undo_stores_in_then_branch(&mut self, then_branch: &Branch) {
for (address, store) in &then_branch.store_values {
let address = *address;
let value = store.old_value;
self.insert_instruction_with_typevars(Instruction::Store { address, value }, None);
}
}
}
#[cfg(test)]
mod test {
use std::rc::Rc;
use crate::ssa::{
function_builder::FunctionBuilder,
ir::{
dfg::DataFlowGraph,
function::{Function, RuntimeType},
instruction::{BinaryOp, Instruction, Intrinsic, TerminatorInstruction},
map::Id,
types::Type,
value::{Value, ValueId},
},
};
#[test]
fn basic_jmpif() {
// fn main f0 {
// b0(v0: b1):
// jmpif v0, then: b1, else: b2
// b1():
// jmp b3(Field 3)
// b2():
// jmp b3(Field 4)
// b3(v1: Field):
// return v1
// }
let main_id = Id::test_new(0);
let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir);
let b1 = builder.insert_block();
let b2 = builder.insert_block();
let b3 = builder.insert_block();
let v0 = builder.add_parameter(Type::bool());
let v1 = builder.add_block_parameter(b3, Type::field());
let three = builder.field_constant(3u128);
let four = builder.field_constant(4u128);
builder.terminate_with_jmpif(v0, b1, b2);
builder.switch_to_block(b1);
builder.terminate_with_jmp(b3, vec![three]);
builder.switch_to_block(b2);
builder.terminate_with_jmp(b3, vec![four]);
builder.switch_to_block(b3);
builder.terminate_with_return(vec![v1]);
let ssa = builder.finish();
assert_eq!(ssa.main().reachable_blocks().len(), 4);
// Expected output:
// fn main f0 {
// b0(v0: u1):
// enable_side_effects v0
// v5 = not v0
// enable_side_effects v5
// enable_side_effects u1 1
// v7 = mul v0, Field 3
// v8 = mul v5, Field 4
// v9 = add v7, v8
// return v9
// }
let ssa = ssa.flatten_cfg();
assert_eq!(ssa.main().reachable_blocks().len(), 1);
}
#[test]
fn modify_constrain() {
// fn main f0 {
// b0(v0: u1, v1: u1):
// jmpif v0, then: b1, else: b2
// b1():
// constrain v1
// jmp b2()
// b2():
// return
// }
let main_id = Id::test_new(0);
let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir);
let b1 = builder.insert_block();
let b2 = builder.insert_block();
let v0 = builder.add_parameter(Type::bool());
let v1 = builder.add_parameter(Type::bool());
let v_true = builder.numeric_constant(true, Type::bool());
builder.terminate_with_jmpif(v0, b1, b2);
builder.switch_to_block(b1);
builder.insert_constrain(v1, v_true, None);
builder.terminate_with_jmp(b2, vec![]);
builder.switch_to_block(b2);
builder.terminate_with_return(vec![]);
let ssa = builder.finish();
assert_eq!(ssa.main().reachable_blocks().len(), 3);
// Expected output:
// fn main f0 {
// b0(v0: u1, v1: u1):
// enable_side_effects v0
// v3 = mul v1, v0
// v4 = eq v3, v0
// constrain v4
// v5 = not v0
// enable_side_effects v5
// enable_side_effects u1 1
// return
// }
let ssa = ssa.flatten_cfg();
assert_eq!(ssa.main().reachable_blocks().len(), 1);
}
#[test]
fn merge_stores() {
// fn main f0 {
// b0(v0: u1, v1: &mut Field):
// jmpif v0, then: b1, else: b2
// b1():
// store v1, Field 5
// jmp b2()
// b2():
// return
// }
let main_id = Id::test_new(0);
let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir);
let b1 = builder.insert_block();
let b2 = builder.insert_block();
let v0 = builder.add_parameter(Type::bool());
let v1 = builder.add_parameter(Type::Reference(Rc::new(Type::field())));
builder.terminate_with_jmpif(v0, b1, b2);
builder.switch_to_block(b1);
let five = builder.field_constant(5u128);
builder.insert_store(v1, five);
builder.terminate_with_jmp(b2, vec![]);
builder.switch_to_block(b2);
builder.terminate_with_return(vec![]);
let ssa = builder.finish();
// Expected output:
// fn main f0 {
// b0(v0: u1, v1: reference):
// enable_side_effects v0
// v4 = load v1
// store Field 5 at v1
// v5 = not v0
// store v4 at v1
// enable_side_effects u1 1
// v6 = cast v0 as Field
// v7 = cast v5 as Field
// v8 = mul v6, Field 5
// v9 = mul v7, v4
// v10 = add v8, v9
// store v10 at v1
// return
// }
let ssa = ssa.flatten_cfg();
let main = ssa.main();
assert_eq!(main.reachable_blocks().len(), 1);
let store_count = count_instruction(main, |ins| matches!(ins, Instruction::Store { .. }));
assert_eq!(store_count, 3);
}
#[test]
fn merge_stores_with_else_block() {
// fn main f0 {
// b0(v0: u1, v1: ref):
// jmpif v0, then: b1, else: b2
// b1():
// store Field 5 in v1
// jmp b3()
// b2():
// store Field 6 in v1
// jmp b3()
// b3():
// return
// }
let main_id = Id::test_new(0);
let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir);
let b1 = builder.insert_block();
let b2 = builder.insert_block();
let b3 = builder.insert_block();
let v0 = builder.add_parameter(Type::bool());
let v1 = builder.add_parameter(Type::Reference(Rc::new(Type::field())));
builder.terminate_with_jmpif(v0, b1, b2);
builder.switch_to_block(b1);
let five = builder.field_constant(5u128);
builder.insert_store(v1, five);
builder.terminate_with_jmp(b3, vec![]);
builder.switch_to_block(b2);
let six = builder.field_constant(6u128);
builder.insert_store(v1, six);
builder.terminate_with_jmp(b3, vec![]);
builder.switch_to_block(b3);
builder.terminate_with_return(vec![]);
let ssa = builder.finish();
// Expected output:
// fn main f0 {
// b0(v0: u1, v1: reference):
// enable_side_effects v0
// v5 = load v1
// store Field 5 at v1
// v6 = not v0
// store v5 at v1
// enable_side_effects v6
// v8 = load v1
// store Field 6 at v1
// enable_side_effects u1 1
// v9 = cast v0 as Field
// v10 = cast v6 as Field
// v11 = mul v9, Field 5
// v12 = mul v10, Field 6
// v13 = add v11, v12
// store v13 at v1
// return
// }
let ssa = ssa.flatten_cfg();
let main = ssa.main();
assert_eq!(main.reachable_blocks().len(), 1);
let store_count = count_instruction(main, |ins| matches!(ins, Instruction::Store { .. }));
assert_eq!(store_count, 4);
}
fn count_instruction(function: &Function, f: impl Fn(&Instruction) -> bool) -> usize {
function.dfg[function.entry_block()]
.instructions()
.iter()
.filter(|id| f(&function.dfg[**id]))
.count()
}
#[test]
fn nested_branch_stores() {
// Here we build some SSA with control flow given by the following graph.
// To test stores in nested if statements are handled correctly this graph is
// also nested. To keep things simple, each block stores to the same address
// an integer that matches its block number. So block 2 stores the value 2,
// block 3 stores 3 and so on. Note that only blocks { 0, 1, 2, 3, 5, 6 }
// will store values. Other blocks do not store values so that we can test
// how these existing values are merged at each join point.
//
// For debugging purposes, each block also has a call to test_function with two
// arguments. The first is the block the test_function was originally in, and the
// second is the current value stored in the reference.
//
// b0 (0 stored)
// ↓
// b1 (1 stored)
// ↙ ↘
// b2 b3 (2 stored in b2) (3 stored in b3)
// ↓ |
// b4 |