Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
  • 2 commits
  • 1 file changed
  • 0 comments
  • 1 contributor

Showing 1 changed file with 65 additions and 70 deletions. Show diff stats Hide diff stats

  1. 135  software/libfpvm/lnfpus.c
135  software/libfpvm/lnfpus.c
@@ -90,7 +90,7 @@ struct pfpu_reg {
90 90
 };
91 91
 
92 92
 
93  
-static struct sched_ctx {
  93
+struct sched_ctx {
94 94
 	struct fpvm_fragment *frag;
95 95
 	struct insn insns[FPVM_MAXCODELEN];
96 96
 	struct vm_reg *regs;	/* dynamically allocated */
@@ -103,7 +103,7 @@ static struct sched_ctx {
103 103
 #ifdef REG_STATS
104 104
 	int max_regs, curr_regs;	/* allocation statistics */
105 105
 #endif
106  
-} *sc;
  106
+};
107 107
 
108 108
 
109 109
 /* ----- Register initialization ------------------------------------------- */
@@ -226,13 +226,13 @@ static void list_concat(struct list *a, struct list *b)
226 226
 /* ----- Register management ----------------------------------------------- */
227 227
 
228 228
 
229  
-static int vm_reg2idx(int reg)
  229
+static int vm_reg2idx(struct sched_ctx *sc, int reg)
230 230
 {
231 231
 	return reg >= 0 ? reg : sc->frag->nbindings-reg;
232 232
 }
233 233
 
234 234
 
235  
-static int alloc_reg(struct insn *setter)
  235
+static int alloc_reg(struct sched_ctx *sc, struct insn *setter)
236 236
 {
237 237
 	struct pfpu_reg *reg;
238 238
 	int vm_reg, pfpu_reg, vm_idx;
@@ -258,7 +258,7 @@ static int alloc_reg(struct insn *setter)
258 258
 
259 259
 	Dprintf("  alloc reg %d -> %d\n", vm_reg, pfpu_reg);
260 260
 
261  
-	vm_idx = vm_reg2idx(vm_reg);
  261
+	vm_idx = vm_reg2idx(sc, vm_reg);
262 262
 	sc->regs[vm_idx].setter = setter;
263 263
 	sc->regs[vm_idx].pfpu_reg = pfpu_reg;
264 264
 	sc->regs[vm_idx].refs = setter->num_dependants+1;
@@ -267,7 +267,7 @@ static int alloc_reg(struct insn *setter)
267 267
 }
268 268
 
269 269
 
270  
-static void put_reg(int vm_reg)
  270
+static void put_reg(struct sched_ctx *sc, int vm_reg)
271 271
 {
272 272
 	int vm_idx;
273 273
 	struct vm_reg *reg;
@@ -275,7 +275,7 @@ static void put_reg(int vm_reg)
275 275
 	if(vm_reg >= 0)
276 276
 		return;
277 277
 
278  
-	vm_idx = vm_reg2idx(vm_reg);
  278
+	vm_idx = vm_reg2idx(sc, vm_reg);
279 279
 	reg = sc->regs+vm_idx;
280 280
 
281 281
 	assert(reg->refs);
@@ -301,35 +301,31 @@ static void put_reg(int vm_reg)
301 301
 }
302 302
 
303 303
 
304  
-static int lookup_pfpu_reg(int vm_reg)
  304
+static int lookup_pfpu_reg(struct sched_ctx *sc, int vm_reg)
305 305
 {
306  
-	return vm_reg >= 0 ? vm_reg : sc->regs[vm_reg2idx(vm_reg)].pfpu_reg;
  306
+	return vm_reg >= 0 ? vm_reg :
  307
+	    sc->regs[vm_reg2idx(sc, vm_reg)].pfpu_reg;
307 308
 }
308 309
 
309 310
 
310  
-static void mark(int vm_reg)
  311
+static void mark(struct sched_ctx *sc, int vm_reg)
311 312
 {
312 313
 	if(vm_reg > 0)
313 314
 		sc->pfpu_regs[vm_reg].used = 1;
314 315
 }
315 316
 
316 317
 
317  
-static int init_registers(struct fpvm_fragment *frag,
  318
+static int init_registers(struct sched_ctx *sc, struct fpvm_fragment *frag,
318 319
     unsigned int *registers)
319 320
 {
320 321
 	int i;
321 322
 
322  
-	sc->regs =
323  
-	    calloc(frag->nbindings-frag->next_sur, sizeof(struct vm_reg));
324  
-	if(!sc->regs)
325  
-		return -1;
326  
-
327 323
 	get_registers(frag, registers);
328 324
 
329 325
 	for(i = 0; i != frag->ninstructions; i++) {
330  
-		mark(frag->code[i].opa);
331  
-		mark(frag->code[i].opb);
332  
-		mark(frag->code[i].dest);
  326
+		mark(sc, frag->code[i].opa);
  327
+		mark(sc, frag->code[i].opb);
  328
+		mark(sc, frag->code[i].dest);
333 329
 	}
334 330
 
335 331
 	list_init(&sc->unallocated);
@@ -344,12 +340,12 @@ static int init_registers(struct fpvm_fragment *frag,
344 340
 /* ----- Instruction scheduler --------------------------------------------- */
345 341
 
346 342
 
347  
-static struct vm_reg *add_data_ref(struct insn *insn, struct data_ref *ref,
348  
-    int reg_num)
  343
+static struct vm_reg *add_data_ref(struct sched_ctx *sc, struct insn *insn,
  344
+    struct data_ref *ref, int reg_num)
349 345
 {
350 346
 	struct vm_reg *reg;
351 347
 
352  
-	reg = sc->regs+vm_reg2idx(reg_num);
  348
+	reg = sc->regs+vm_reg2idx(sc, reg_num);
353 349
 	ref->insn = insn;
354 350
 	ref->dep = reg->setter;
355 351
 	if(insn->vm_insn->dest == reg_num)
@@ -369,7 +365,7 @@ static struct vm_reg *add_data_ref(struct insn *insn, struct data_ref *ref,
369 365
 }
370 366
 
371 367
 
372  
-static void init_scheduler(struct fpvm_fragment *frag)
  368
+static void init_scheduler(struct sched_ctx *sc, struct fpvm_fragment *frag)
373 369
 {
374 370
 	int i;
375 371
 	struct insn *insn;
@@ -389,16 +385,20 @@ static void init_scheduler(struct fpvm_fragment *frag)
389 385
 		list_init(&insn->dependants);
390 386
 		switch (insn->arity) {
391 387
 			case 3:
392  
-				add_data_ref(insn, &insn->cond, FPVM_REG_IFB);
  388
+				add_data_ref(sc, insn, &insn->cond,
  389
+				    FPVM_REG_IFB);
393 390
 				/* fall through */
394 391
 			case 2:
395  
-				add_data_ref(insn, &insn->opb, frag->code[i].opb);
  392
+				add_data_ref(sc, insn, &insn->opb,
  393
+				     frag->code[i].opb);
396 394
 				/* fall through */
397 395
 			case 1:
398  
-				add_data_ref(insn, &insn->opa, frag->code[i].opa);
  396
+				add_data_ref(sc, insn, &insn->opa,
  397
+				     frag->code[i].opa);
399 398
 				/* fall through */
400 399
 			case 0:
401  
-				reg = sc->regs+vm_reg2idx(frag->code[i].dest);
  400
+				reg = sc->regs+
  401
+				    vm_reg2idx(sc, frag->code[i].dest);
402 402
 				if(reg->setter) {
403 403
 					reg->setter->next_setter = insn;
404 404
 					foreach(ref, &reg->setter->dependants)
@@ -458,7 +458,7 @@ static void init_scheduler(struct fpvm_fragment *frag)
458 458
 }
459 459
 
460 460
 
461  
-static void unblock(struct insn *insn)
  461
+static void unblock(struct sched_ctx *sc, struct insn *insn)
462 462
 {
463 463
 	int slot;
464 464
 
@@ -474,32 +474,33 @@ static void unblock(struct insn *insn)
474 474
 }
475 475
 
476 476
 
477  
-static void put_reg_by_ref(struct data_ref *ref, int vm_reg)
  477
+static void put_reg_by_ref(struct sched_ctx *sc, struct data_ref *ref,
  478
+    int vm_reg)
478 479
 {
479 480
 	struct insn *setter = ref->dep;
480 481
 	struct vm_reg *reg;
481 482
 
482 483
 	if(setter) {
483  
-		put_reg(setter->vm_insn->dest);
  484
+		put_reg(sc, setter->vm_insn->dest);
484 485
 		if(setter->next_setter && setter->next_setter != ref->insn)
485  
-			unblock(setter->next_setter);
  486
+			unblock(sc, setter->next_setter);
486 487
 	} else {
487  
-		reg = sc->regs+vm_reg2idx(vm_reg);
  488
+		reg = sc->regs+vm_reg2idx(sc, vm_reg);
488 489
 		if(reg->first_setter && !reg->first_setter->rmw)
489  
-			unblock(reg->first_setter);
  490
+			unblock(sc, reg->first_setter);
490 491
 	}
491 492
 }
492 493
 
493 494
 
494  
-static void unblock_after(struct insn *insn, int cycle)
  495
+static void unblock_after(struct sched_ctx *sc, struct insn *insn, int cycle)
495 496
 {
496 497
 	if(insn->earliest <= cycle)
497 498
 		insn->earliest = cycle+1;
498  
-	unblock(insn);
  499
+	unblock(sc, insn);
499 500
 }
500 501
 
501 502
 
502  
-static int issue(struct insn *insn, unsigned *code)
  503
+static int issue(struct sched_ctx *sc, struct insn *insn, unsigned *code)
503 504
 {
504 505
 	struct data_ref *ref;
505 506
 	int end, reg;
@@ -512,15 +513,17 @@ static int issue(struct insn *insn, unsigned *code)
512 513
 
513 514
 	switch (insn->arity) {
514 515
 		case 3:
515  
-			put_reg_by_ref(&insn->cond, FPVM_REG_IFB);
  516
+			put_reg_by_ref(sc, &insn->cond, FPVM_REG_IFB);
516 517
 			/* fall through */
517 518
 		case 2:
518  
-			CODE(sc->cycle).opb = lookup_pfpu_reg(insn->vm_insn->opb);
519  
-			put_reg_by_ref(&insn->opb, insn->vm_insn->opb);
  519
+			CODE(sc->cycle).opb =
  520
+			    lookup_pfpu_reg(sc, insn->vm_insn->opb);
  521
+			put_reg_by_ref(sc, &insn->opb, insn->vm_insn->opb);
520 522
 			/* fall through */
521 523
 		case 1:
522  
-			CODE(sc->cycle).opa = lookup_pfpu_reg(insn->vm_insn->opa);
523  
-			put_reg_by_ref(&insn->opa, insn->vm_insn->opa);
  524
+			CODE(sc->cycle).opa =
  525
+			    lookup_pfpu_reg(sc, insn->vm_insn->opa);
  526
+			put_reg_by_ref(sc, &insn->opa, insn->vm_insn->opa);
524 527
 			break;
525 528
 		case 0:
526 529
 			break;
@@ -528,16 +531,16 @@ static int issue(struct insn *insn, unsigned *code)
528 531
 			abort();
529 532
 	}
530 533
 
531  
-	reg = alloc_reg(insn);
  534
+	reg = alloc_reg(sc, insn);
532 535
 	if(reg < 0)
533 536
 		return -1;
534 537
 	CODE(end).dest = reg;
535 538
 	CODE(sc->cycle).opcode = fpvm_to_pfpu(insn->vm_insn->opcode);
536 539
 
537 540
 	foreach(ref, &insn->dependants)
538  
-		unblock_after(ref->insn, end);
  541
+		unblock_after(sc, ref->insn, end);
539 542
 	if(insn->next_setter && !insn->next_setter->rmw)
540  
-		unblock_after(insn->next_setter,
  543
+		unblock_after(sc, insn->next_setter,
541 544
 		    end-insn->next_setter->latency);
542 545
 
543 546
 	return 0;
@@ -557,7 +560,7 @@ static int count(const struct list *list)
557 560
 #endif
558 561
 
559 562
 
560  
-static int schedule(unsigned int *code)
  563
+static int schedule(struct sched_ctx *sc, unsigned int *code)
561 564
 {
562 565
 	int remaining;
563 566
 	int i, last, end;
@@ -590,13 +593,13 @@ static int schedule(unsigned int *code)
590 593
 			}
591 594
 		}
592 595
 		if(best) {
593  
-			if(issue(best, code) < 0)
  596
+			if(issue(sc, best, code) < 0)
594 597
 				return -1;
595 598
 			list_del(&best->more);
596 599
 			remaining--;
597 600
 		}
598 601
 		if(CODE(i).dest)
599  
-			put_reg(sc->pfpu_regs[CODE(i).dest].vm_reg);
  602
+			put_reg(sc, sc->pfpu_regs[CODE(i).dest].vm_reg);
600 603
 	}
601 604
 
602 605
 	/*
@@ -615,42 +618,34 @@ static int schedule(unsigned int *code)
615 618
 }
616 619
 
617 620
 
618  
-static int init_scheduler_context(struct fpvm_fragment *frag,
619  
-    unsigned int *reg)
620  
-{
621  
-	sc = calloc(1, sizeof(*sc));
622  
-	if(!sc)
623  
-		return -1;
624  
-
625  
-	sc->frag = frag;
626  
-
627  
-	if(init_registers(frag, reg) < 0) {
628  
-		free(sc);
629  
-		return -1;
630  
-	}
631  
-
632  
-	init_scheduler(frag);
633  
-	return 0;
634  
-}
635  
-
636  
-
637 621
 int lnfpus_schedule(struct fpvm_fragment *frag, unsigned int *code,
638 622
     unsigned int *reg)
639 623
 {
  624
+	/*
  625
+	 * allocate context and registers on stack because standalone FN has no
  626
+	 * memory allocator
  627
+	 */
  628
+	struct sched_ctx sc;
  629
+	struct vm_reg regs[frag->nbindings-frag->next_sur];
640 630
 	pfpu_instruction vecout;
641 631
 	int res;
642 632
 
643  
-	if(init_scheduler_context(frag, reg) < 0)
  633
+	memset(&sc, 0, sizeof(sc));
  634
+	sc.frag = frag;
  635
+	sc.regs = regs;
  636
+	memset(regs, 0, sizeof(regs));
  637
+
  638
+	if(init_registers(&sc, frag, reg) < 0)
644 639
 		return -1;
  640
+	init_scheduler(&sc, frag);
  641
+
645 642
 	memset(code, 0, PFPU_PROGSIZE*sizeof(*code));
646  
-	res = schedule(code);
  643
+	res = schedule(&sc, code);
647 644
 
648 645
 #ifdef REG_STATS
649 646
 	printf("regs: %d/%d\n", sc->curr_regs, sc->max_regs);
650 647
 #endif
651 648
 
652  
-	free(sc->regs);
653  
-	free(sc);
654 649
 	if(res < 0)
655 650
 		return res;
656 651
 	if(frag->vector_mode)

No commit comments for this range

Something went wrong with that request. Please try again.