From b0323c79adfcd4565725938bde307aec87fdadea Mon Sep 17 00:00:00 2001 From: "Eliyaan (Nopana)" <103932369+Eliyaan@users.noreply.github.com> Date: Tue, 4 Jul 2023 21:43:23 +0200 Subject: [PATCH] native: move for_in_stmt to stmt.v (#18705) --- vlib/v/gen/native/amd64.v | 79 ++++++++++----------------------------- vlib/v/gen/native/arm64.v | 8 ++++ vlib/v/gen/native/gen.v | 3 +- vlib/v/gen/native/stmt.v | 51 ++++++++++++++++++++++++- 4 files changed, 80 insertions(+), 61 deletions(-) diff --git a/vlib/v/gen/native/amd64.v b/vlib/v/gen/native/amd64.v index 6a90a050c72d93..b2788bd84f240f 100644 --- a/vlib/v/gen/native/amd64.v +++ b/vlib/v/gen/native/amd64.v @@ -10,7 +10,7 @@ import v.token pub struct Amd64 { mut: g &Gen = unsafe { nil } - // arm64 specific stuff for code generation + // amd64 specific stuff for code generation is_16bit_aligned bool } @@ -995,7 +995,8 @@ fn (mut c Amd64) ret() { c.g.println('ret') } -fn (mut c Amd64) push(reg Amd64Register) { +fn (mut c Amd64) push(r Register) { + reg := r as Amd64Register if int(reg) < int(Amd64Register.r8) { c.g.write8(0x50 + int(reg)) } else { @@ -1122,7 +1123,7 @@ fn (mut c Amd64) leave() { c.g.println('; label 0: return') if c.g.defer_stmts.len != 0 { // save return value - c.push(.rax) + c.push(Amd64Register.rax) for defer_stmt in c.g.defer_stmts.reverse() { name := '_defer${defer_stmt.idx_in_fn}' defer_var := c.g.get_var_offset(name) @@ -1712,14 +1713,14 @@ pub fn (mut c Amd64) call_fn(node ast.CallExpr) { is_16bit_aligned := c.is_16bit_aligned != (stack_size % 2 == 1) if !is_16bit_aligned { // dummy data - c.push(.rbp) + c.push(Amd64Register.rbp) } reg_args << ssereg_args reg_args << stack_args for i in reg_args.reverse() { if i == 0 && is_struct_return { c.lea_var_to_reg(Amd64Register.rax, return_pos) - c.push(.rax) + c.push(Amd64Register.rax) continue } c.g.expr(args[i].expr) @@ -1754,17 +1755,17 @@ pub fn (mut c Amd64) call_fn(node ast.CallExpr) { } else { match args_size[i] { 1...8 { - c.push(.rax) + c.push(Amd64Register.rax) } 9...16 { - c.push(.rdx) - c.push(.rax) + c.push(Amd64Register.rdx) + c.push(Amd64Register.rax) } else { c.add(.rax, args_size[i] - ((args_size[i] + 7) % 8 + 1)) for _ in 0 .. (args_size[i] + 7) / 8 { c.mov_deref(.rdx, .rax, ast.i64_type_idx) - c.push(.rdx) + c.push(Amd64Register.rdx) c.sub(.rax, 8) } } @@ -1854,51 +1855,6 @@ fn (mut c Amd64) call_builtin(name Builtin) i64 { return call_addr } -fn (mut c Amd64) for_in_stmt(node ast.ForInStmt) { - if node.is_range { - // for a in node.cond .. node.high { - i := c.allocate_var(node.val_var, 8, 0) // iterator variable - c.g.expr(node.cond) - c.mov_reg_to_var(LocalVar{i, ast.i64_type_idx, node.val_var}, Amd64Register.rax) // i = node.cond // initial value - start := c.g.pos() // label-begin: - start_label := c.g.labels.new_label() - c.mov_var_to_reg(Amd64Register.rbx, LocalVar{i, ast.i64_type_idx, node.val_var}) // rbx = iterator value - c.g.expr(node.high) // final value - c.cmp_reg(.rbx, .rax) // rbx = iterator, rax = max value - jump_addr := c.cjmp(.jge) // leave loop if i is beyond end - end_label := c.g.labels.new_label() - c.g.labels.patches << LabelPatch{ - id: end_label - pos: jump_addr - } - c.g.println('; jump to label ${end_label}') - c.g.labels.branches << BranchLabel{ - name: node.label - start: start_label - end: end_label - } - c.g.stmts(node.stmts) - c.g.labels.addrs[start_label] = c.g.pos() - c.g.println('; label ${start_label}') - c.inc_var(LocalVar{i, ast.i64_type_idx, node.val_var}) - c.g.labels.branches.pop() - c.jmp_back(start) - c.g.labels.addrs[end_label] = c.g.pos() - c.g.println('; label ${end_label}') - /* - } else if node.kind == .array { - } else if node.kind == .array_fixed { - } else if node.kind == .map { - } else if node.kind == .string { - } else if node.kind == .struct_ { - } else if it.kind in [.array, .string] || it.cond_type.has_flag(.variadic) { - } else if it.kind == .map { - */ - } else { - c.g.v_error('for-in statement is not yet implemented', node.pos) - } -} - fn (mut c Amd64) gen_concat_expr(node ast.ConcatExpr) { typ := node.return_type ts := c.g.table.sym(typ) @@ -3186,7 +3142,7 @@ fn (mut c Amd64) infloop() { } fn (mut c Amd64) fn_decl(node ast.FnDecl) { - c.push(.rbp) + c.push(Amd64Register.rbp) c.mov_reg(Amd64Register.rbp, Amd64Register.rsp) local_alloc_pos := c.g.pos() c.sub(.rsp, 0) @@ -3616,7 +3572,7 @@ fn (mut c Amd64) convert_int_to_string(a Register, b Register) { loop_start := c.g.pos() c.g.println('; label ${loop_label}') - c.push(.rax) + c.push(Amd64Register.rax) c.mov(Amd64Register.rdx, 0) c.mov(Amd64Register.rbx, 10) @@ -3719,7 +3675,7 @@ fn (mut c Amd64) gen_match_expr(expr ast.MatchExpr) { } else { c.g.expr(expr.cond) } - c.push(.rax) + c.push(Amd64Register.rax) mut else_label := 0 for i, branch in expr.branches { @@ -3747,7 +3703,7 @@ fn (mut c Amd64) gen_match_expr(expr ast.MatchExpr) { id: branch_labels[i] pos: then_addr } - c.push(.rdx) + c.push(Amd64Register.rdx) } else { c.g.expr(cond) @@ -3758,7 +3714,7 @@ fn (mut c Amd64) gen_match_expr(expr ast.MatchExpr) { id: branch_labels[i] pos: then_addr } - c.push(.rdx) + c.push(Amd64Register.rdx) } } } @@ -4172,6 +4128,11 @@ fn (mut c Amd64) gen_cast_expr(expr ast.CastExpr) { } } +fn (mut c Amd64) cmp_to_stack_top(reg Register) { + c.pop(.rbx) + c.cmp_reg(.rbx, reg as Amd64Register) +} + // Temporary! fn (mut c Amd64) adr(r Arm64Register, delta int) { panic('`adr` instruction not supported with amd64') diff --git a/vlib/v/gen/native/arm64.v b/vlib/v/gen/native/arm64.v index a761dfe8839e2e..c3b37a10355116 100644 --- a/vlib/v/gen/native/arm64.v +++ b/vlib/v/gen/native/arm64.v @@ -522,3 +522,11 @@ fn (mut c Arm64) call_addr_at(addr int, at i64) i64 { fn (mut c Arm64) gen_concat_expr(expr ast.ConcatExpr) { panic('Arm64.gen_concat_expr() not implemented') } + +fn (mut c Arm64) cmp_to_stack_top(reg Register) { + panic('Arm64.cmp_to_stack_top() not implemented') +} + +fn (mut c Arm64) push(r Register) { + panic('Arm64.push() not implemented') +} diff --git a/vlib/v/gen/native/gen.v b/vlib/v/gen/native/gen.v index 9752f98120884a..c00fc6a215d509 100644 --- a/vlib/v/gen/native/gen.v +++ b/vlib/v/gen/native/gen.v @@ -78,6 +78,7 @@ mut: call_fn(node ast.CallExpr) call(addr int) i64 cjmp(op JumpOp) int + cmp_to_stack_top(r Register) cmp_var_reg(var Var, reg Register, config VarConfig) cmp_var(var Var, val int, config VarConfig) cmp_zero(reg Register) @@ -86,7 +87,6 @@ mut: convert_rune_to_string(r Register, buffer int, var Var, config VarConfig) dec_var(var Var, config VarConfig) fn_decl(node ast.FnDecl) - for_in_stmt(node ast.ForInStmt) gen_asm_stmt(asm_node ast.AsmStmt) gen_assert(assert_node ast.AssertStmt) gen_cast_expr(expr ast.CastExpr) @@ -118,6 +118,7 @@ mut: mov64(r Register, val i64) movabs(reg Register, val i64) prefix_expr(node ast.PrefixExpr) + push(r Register) ret() return_stmt(node ast.Return) reverse_string(r Register) diff --git a/vlib/v/gen/native/stmt.v b/vlib/v/gen/native/stmt.v index e8cf3a03a4ec1c..6fca2dcf868ce2 100644 --- a/vlib/v/gen/native/stmt.v +++ b/vlib/v/gen/native/stmt.v @@ -63,7 +63,7 @@ fn (mut g Gen) stmt(node ast.Stmt) { // if no statements, just dont make it return } - g.code_gen.for_in_stmt(node) + g.for_in_stmt(node) } ast.ForStmt { g.for_stmt(node) @@ -274,3 +274,52 @@ fn (mut g Gen) for_stmt(node ast.ForStmt) { g.println('; label ${end_label}') g.println('jmp after for') } + +fn (mut g Gen) for_in_stmt(node ast.ForInStmt) { // Work on that + if node.is_range { + // for a in node.cond .. node.high { + i := g.code_gen.allocate_var(node.val_var, 8, 0) // iterator variable + g.expr(node.cond) // outputs the lower loop bound (initial value) to the main reg + main_reg := g.code_gen.main_reg() + g.code_gen.mov_reg_to_var(LocalVar{i, ast.i64_type_idx, node.val_var}, main_reg) // i = node.cond // initial value + + start := g.pos() // label-begin: + start_label := g.labels.new_label() + g.code_gen.mov_var_to_reg(main_reg, LocalVar{i, ast.i64_type_idx, node.val_var}) + g.code_gen.push(main_reg) // put the iterator on the stack + g.expr(node.high) // final value (upper bound) to the main reg + g.code_gen.cmp_to_stack_top(main_reg) + jump_addr := g.code_gen.cjmp(.jge) // leave loop i >= upper bound + + end_label := g.labels.new_label() + g.labels.patches << LabelPatch{ + id: end_label + pos: jump_addr + } + g.println('; jump to label ${end_label}') + g.labels.branches << BranchLabel{ + name: node.label + start: start_label + end: end_label + } + g.stmts(node.stmts) // writes the actual body of the loop + g.labels.addrs[start_label] = g.pos() + g.println('; label ${start_label}') + g.code_gen.inc_var(LocalVar{i, ast.i64_type_idx, node.val_var}) + g.labels.branches.pop() + g.code_gen.jmp_back(start) // loops + g.labels.addrs[end_label] = g.pos() + g.println('; label ${end_label}') + /* + } else if node.kind == .array { + } else if node.kind == .array_fixed { + } else if node.kind == .map { + } else if node.kind == .string { + } else if node.kind == .struct_ { + } else if it.kind in [.array, .string] || it.cond_type.has_flag(.variadic) { + } else if it.kind == .map { + */ + } else { + g.v_error('for-in statement is not yet implemented', node.pos) + } +}