Skip to content

Commit

Permalink
CogVM source as per VMMaker.oscog-eem.3012
Browse files Browse the repository at this point in the history
Cogit: Fix a Slang slip with CogARMv8Compiler>>computeMaximumSize.
Use SP rather than NativeSPReg within CogARMv8Compiler, since
NativeSPReg is really for the outside world (the Cogit's world).
  • Loading branch information
eliotmiranda committed Aug 2, 2021
1 parent a6c56b7 commit 28ddcc2
Show file tree
Hide file tree
Showing 3 changed files with 182 additions and 158 deletions.
112 changes: 60 additions & 52 deletions spur64src/vm/cogitARMv8.c
@@ -1,9 +1,9 @@
/* Automatically generated by
CCodeGenerator VMMaker.oscog-eem.3011 uuid: c31b5783-0c4c-472a-a602-4eeb7922c5cb
CCodeGenerator VMMaker.oscog-eem.3012 uuid: b743cd66-0506-49a9-a445-95d97e2ab623
from
StackToRegisterMappingCogit VMMaker.oscog-eem.3011 uuid: c31b5783-0c4c-472a-a602-4eeb7922c5cb
StackToRegisterMappingCogit VMMaker.oscog-eem.3012 uuid: b743cd66-0506-49a9-a445-95d97e2ab623
*/
static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.3011 uuid: c31b5783-0c4c-472a-a602-4eeb7922c5cb " __DATE__ ;
static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.3012 uuid: b743cd66-0506-49a9-a445-95d97e2ab623 " __DATE__ ;
char *__cogitBuildInfo = __buildInfo;


Expand Down Expand Up @@ -3176,6 +3176,7 @@ static sqInt NoDbgRegParms
computeMaximumSize(AbstractInstruction * self_in_computeMaximumSize)
{
sqInt constant;
sqInt constant1;
sqInt imm;
sqInt imm1;
sqInt immediate;
Expand Down Expand Up @@ -3305,51 +3306,58 @@ computeMaximumSize(AbstractInstruction * self_in_computeMaximumSize)
case AndCqRR:

/* N.B. For three operand logical ops only support AndCqRR with a NativeSPReg target, used for alignment purposes. */
return ((((((((self_in_computeMaximumSize->operands))[0]) >= -1) && ((((self_in_computeMaximumSize->operands))[0]) <= 0))
? return 8;

: 0),

/* First, determine the element size. */
(imm1 = ((self_in_computeMaximumSize->operands))[0]),
(size1 = 32),
while (1) {
mask1 = (1ULL << size1) - 1;
if (!(((imm1 & mask1) != (((usqInt)(imm1)) >> size1)
? ((size1 = size1 * 2),
0)
: size1 > 2))) break;
size1 = size1 / 2;
}
(mask1 = ((usqInt)((0xFFFFFFFFFFFFFFFFULL))) >> (64 - size1)),
(imm1 = imm1 & mask1),
(isShiftedMask(self_in_computeMaximumSize, imm1)
? ((rotateCount1 = countTrailingZeros(self_in_computeMaximumSize, imm1)),
(numTrailingOnes1 = countTrailingOnes(self_in_computeMaximumSize, ((usqInt)(imm1)) >> rotateCount1)))
: ((imm1 = imm1 | (~(usqIntptr_t)mask1)),
(!(isShiftedMask(self_in_computeMaximumSize, imm1))
? return 8;
/* begin isImmNImmSImmREncodableBitmask:ifTrue:ifFalse: */
constant1 = ((self_in_computeMaximumSize->operands))[0];
if (((constant1 >= -1) && (constant1 <= 0))) {
return ((((self_in_computeMaximumSize->operands))[2]) == SP
? 12
: 8);
}

: 0),
(numLeadingOnes1 = countLeadingOnes(self_in_computeMaximumSize, imm1)),
(rotateCount1 = 64 - numLeadingOnes1),
(numTrailingOnes1 = (numLeadingOnes1 + (countTrailingOnes(self_in_computeMaximumSize, imm1))) - (64 - size1)))),
assert(size1 > rotateCount1),

/* If size has a 1 in the n'th bit, create a value that has zeroes in bits [0, n] and ones above that. */
(immr2 = (size1 - rotateCount1) & (size1 - 1)),

/* Or the CTO value into the low bits, which must be below the Nth bit mentioned above. */
(nImms1 = ((sqInt)((usqInt)((~(usqIntptr_t)(size1 - 1))) << 1))),

/* Extract the seventh bit and toggle it to create the N field. */
(nImms1 = nImms1 | (numTrailingOnes1 - 1)),
(n2 = (((nImms1) >> 6) & 1) ^ 1),
(nImms1 = nImms1 & 0x3F),
assert((decode64Immsimmr(self_in_computeMaximumSize, nImms1, immr2)) == (((usqInt) (((self_in_computeMaximumSize->operands))[0])))),
)) + (((((self_in_computeMaximumSize->operands))[2]) == NativeSPReg
? 4
: 0));
/* First, determine the element size. */
imm1 = constant1;
size1 = 32;
while (1) {
mask1 = (1ULL << size1) - 1;
if (!(((imm1 & mask1) != (((usqInt)(imm1)) >> size1)
? ((size1 = size1 * 2),
0)
: size1 > 2))) break;
size1 = size1 / 2;
}
mask1 = ((usqInt)((0xFFFFFFFFFFFFFFFFULL))) >> (64 - size1);
imm1 = imm1 & mask1;
if (isShiftedMask(self_in_computeMaximumSize, imm1)) {
rotateCount1 = countTrailingZeros(self_in_computeMaximumSize, imm1);
numTrailingOnes1 = countTrailingOnes(self_in_computeMaximumSize, ((usqInt)(imm1)) >> rotateCount1);
}
else {
imm1 = imm1 | (~(usqIntptr_t)mask1);
if (!(isShiftedMask(self_in_computeMaximumSize, imm1))) {
return ((((self_in_computeMaximumSize->operands))[2]) == SP
? 12
: 8);
}
numLeadingOnes1 = countLeadingOnes(self_in_computeMaximumSize, imm1);
rotateCount1 = 64 - numLeadingOnes1;
numTrailingOnes1 = (numLeadingOnes1 + (countTrailingOnes(self_in_computeMaximumSize, imm1))) - (64 - size1);
}
assert(size1 > rotateCount1);

/* If size has a 1 in the n'th bit, create a value that has zeroes in bits [0, n] and ones above that. */
immr2 = (size1 - rotateCount1) & (size1 - 1);

/* Or the CTO value into the low bits, which must be below the Nth bit mentioned above. */
nImms1 = ((sqInt)((usqInt)((~(usqIntptr_t)(size1 - 1))) << 1));

/* Extract the seventh bit and toggle it to create the N field. */
nImms1 = nImms1 | (numTrailingOnes1 - 1);
n2 = (((nImms1) >> 6) & 1) ^ 1;
nImms1 = nImms1 & 0x3F;
assert((decode64Immsimmr(self_in_computeMaximumSize, nImms1, immr2)) == (((usqInt) constant1)));
return ((((self_in_computeMaximumSize->operands))[2]) == SP
? 8
: 4);

case SubRR:
case SubRRR:
Expand Down Expand Up @@ -3689,7 +3697,7 @@ concretizeLogicalOpCqRDest(AbstractInstruction * self_in_concretizeLogicalOpCqRD
/* N.B. For three operand logical ops only support AndCq: const R: reg R: NativeSPReg, which is used for alignment. */
srcReg = ((self_in_concretizeLogicalOpCqRDest->operands))[1];
effectiveDestReg = ((((self_in_concretizeLogicalOpCqRDest->opcode)) == AndCqRR)
&& (destReg == NativeSPReg)
&& (destReg == SP)
? RISCTempReg
: destReg);
/* begin isImmNImmSImmREncodableBitmask:ifTrue:ifFalse: */
Expand Down Expand Up @@ -3766,7 +3774,7 @@ concretizeLogicalOpCqRDest(AbstractInstruction * self_in_concretizeLogicalOpCqRD
offset = 4;
l1: /* end isImmNImmSImmREncodableBitmask:ifTrue:ifFalse: */;
if (!((((self_in_concretizeLogicalOpCqRDest->opcode)) == AndCqRR)
&& (destReg == NativeSPReg))) {
&& (destReg == SP))) {
return offset;
}
((self_in_concretizeLogicalOpCqRDest->machineCode))[offset / 4] = (movernrd(self_in_concretizeLogicalOpCqRDest, effectiveDestReg, destReg));
Expand Down Expand Up @@ -4678,7 +4686,7 @@ dispatchConcretize(AbstractInstruction * self_in_dispatchConcretize)
((self_in_dispatchConcretize->machineCode))[0] = (((0xD6400000U) + (((sqInt)((usqInt)(XZR) << 16)))) + (((sqInt)((usqInt)(LR) << 5))));
return 4;
}
((self_in_dispatchConcretize->machineCode))[0] = (addrnrdimmshiftBy12(self_in_dispatchConcretize, NativeSPReg, NativeSPReg, offset5, 0));
((self_in_dispatchConcretize->machineCode))[0] = (addrnrdimmshiftBy12(self_in_dispatchConcretize, SP, SP, offset5, 0));
((self_in_dispatchConcretize->machineCode))[1] = (((0xD6400000U) + (((sqInt)((usqInt)(XZR) << 16)))) + (((sqInt)((usqInt)(LR) << 5))));
return 8;

Expand Down Expand Up @@ -6152,7 +6160,7 @@ genLoadCStackPointers(AbstractInstruction * self_in_genLoadCStackPointers)
sqInt operandOne1;

if (((cStackPointerAddress()) + 8) == (cFramePointerAddress())) {
genoperandoperandoperand(MoveAwRR, cStackPointerAddress(), NativeSPReg, FPReg);
genoperandoperandoperand(MoveAwRR, cStackPointerAddress(), SP, FP);
return 0;
}
/* begin gen:literal:operand: */
Expand All @@ -6168,7 +6176,7 @@ genLoadCStackPointers(AbstractInstruction * self_in_genLoadCStackPointers)
static void NoDbgRegParms
genLoadNativeSPRegWithAlignedSPReg(AbstractInstruction * self_in_genLoadNativeSPRegWithAlignedSPReg)
{
gAndCqRR((-1 - (((BytesPerWord * 2) - 1))), SPReg, NativeSPReg);
gAndCqRR((-1 - (((BytesPerWord * 2) - 1))), SPReg, SP);
}


Expand Down
116 changes: 62 additions & 54 deletions spurlowcode64src/vm/cogitARMv8.c
@@ -1,9 +1,9 @@
/* Automatically generated by
CCodeGenerator VMMaker.oscog-eem.3011 uuid: c31b5783-0c4c-472a-a602-4eeb7922c5cb
CCodeGenerator VMMaker.oscog-eem.3012 uuid: b743cd66-0506-49a9-a445-95d97e2ab623
from
StackToRegisterMappingCogit VMMaker.oscog-eem.3011 uuid: c31b5783-0c4c-472a-a602-4eeb7922c5cb
StackToRegisterMappingCogit VMMaker.oscog-eem.3012 uuid: b743cd66-0506-49a9-a445-95d97e2ab623
*/
static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.3011 uuid: c31b5783-0c4c-472a-a602-4eeb7922c5cb " __DATE__ ;
static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.3012 uuid: b743cd66-0506-49a9-a445-95d97e2ab623 " __DATE__ ;
char *__cogitBuildInfo = __buildInfo;


Expand Down Expand Up @@ -3359,6 +3359,7 @@ static sqInt NoDbgRegParms
computeMaximumSize(AbstractInstruction * self_in_computeMaximumSize)
{
sqInt constant;
sqInt constant1;
sqInt imm;
sqInt imm1;
sqInt immediate;
Expand Down Expand Up @@ -3488,51 +3489,58 @@ computeMaximumSize(AbstractInstruction * self_in_computeMaximumSize)
case AndCqRR:

/* N.B. For three operand logical ops only support AndCqRR with a NativeSPReg target, used for alignment purposes. */
return ((((((((self_in_computeMaximumSize->operands))[0]) >= -1) && ((((self_in_computeMaximumSize->operands))[0]) <= 0))
? return 8;

: 0),

/* First, determine the element size. */
(imm1 = ((self_in_computeMaximumSize->operands))[0]),
(size1 = 32),
while (1) {
mask1 = (1ULL << size1) - 1;
if (!(((imm1 & mask1) != (((usqInt)(imm1)) >> size1)
? ((size1 = size1 * 2),
0)
: size1 > 2))) break;
size1 = size1 / 2;
}
(mask1 = ((usqInt)((0xFFFFFFFFFFFFFFFFULL))) >> (64 - size1)),
(imm1 = imm1 & mask1),
(isShiftedMask(self_in_computeMaximumSize, imm1)
? ((rotateCount1 = countTrailingZeros(self_in_computeMaximumSize, imm1)),
(numTrailingOnes1 = countTrailingOnes(self_in_computeMaximumSize, ((usqInt)(imm1)) >> rotateCount1)))
: ((imm1 = imm1 | (~(usqIntptr_t)mask1)),
(!(isShiftedMask(self_in_computeMaximumSize, imm1))
? return 8;
/* begin isImmNImmSImmREncodableBitmask:ifTrue:ifFalse: */
constant1 = ((self_in_computeMaximumSize->operands))[0];
if (((constant1 >= -1) && (constant1 <= 0))) {
return ((((self_in_computeMaximumSize->operands))[2]) == SP
? 12
: 8);
}

: 0),
(numLeadingOnes1 = countLeadingOnes(self_in_computeMaximumSize, imm1)),
(rotateCount1 = 64 - numLeadingOnes1),
(numTrailingOnes1 = (numLeadingOnes1 + (countTrailingOnes(self_in_computeMaximumSize, imm1))) - (64 - size1)))),
assert(size1 > rotateCount1),

/* If size has a 1 in the n'th bit, create a value that has zeroes in bits [0, n] and ones above that. */
(immr2 = (size1 - rotateCount1) & (size1 - 1)),

/* Or the CTO value into the low bits, which must be below the Nth bit mentioned above. */
(nImms1 = ((sqInt)((usqInt)((~(usqIntptr_t)(size1 - 1))) << 1))),

/* Extract the seventh bit and toggle it to create the N field. */
(nImms1 = nImms1 | (numTrailingOnes1 - 1)),
(n2 = (((nImms1) >> 6) & 1) ^ 1),
(nImms1 = nImms1 & 0x3F),
assert((decode64Immsimmr(self_in_computeMaximumSize, nImms1, immr2)) == (((usqInt) (((self_in_computeMaximumSize->operands))[0])))),
)) + (((((self_in_computeMaximumSize->operands))[2]) == NativeSPReg
? 4
: 0));
/* First, determine the element size. */
imm1 = constant1;
size1 = 32;
while (1) {
mask1 = (1ULL << size1) - 1;
if (!(((imm1 & mask1) != (((usqInt)(imm1)) >> size1)
? ((size1 = size1 * 2),
0)
: size1 > 2))) break;
size1 = size1 / 2;
}
mask1 = ((usqInt)((0xFFFFFFFFFFFFFFFFULL))) >> (64 - size1);
imm1 = imm1 & mask1;
if (isShiftedMask(self_in_computeMaximumSize, imm1)) {
rotateCount1 = countTrailingZeros(self_in_computeMaximumSize, imm1);
numTrailingOnes1 = countTrailingOnes(self_in_computeMaximumSize, ((usqInt)(imm1)) >> rotateCount1);
}
else {
imm1 = imm1 | (~(usqIntptr_t)mask1);
if (!(isShiftedMask(self_in_computeMaximumSize, imm1))) {
return ((((self_in_computeMaximumSize->operands))[2]) == SP
? 12
: 8);
}
numLeadingOnes1 = countLeadingOnes(self_in_computeMaximumSize, imm1);
rotateCount1 = 64 - numLeadingOnes1;
numTrailingOnes1 = (numLeadingOnes1 + (countTrailingOnes(self_in_computeMaximumSize, imm1))) - (64 - size1);
}
assert(size1 > rotateCount1);

/* If size has a 1 in the n'th bit, create a value that has zeroes in bits [0, n] and ones above that. */
immr2 = (size1 - rotateCount1) & (size1 - 1);

/* Or the CTO value into the low bits, which must be below the Nth bit mentioned above. */
nImms1 = ((sqInt)((usqInt)((~(usqIntptr_t)(size1 - 1))) << 1));

/* Extract the seventh bit and toggle it to create the N field. */
nImms1 = nImms1 | (numTrailingOnes1 - 1);
n2 = (((nImms1) >> 6) & 1) ^ 1;
nImms1 = nImms1 & 0x3F;
assert((decode64Immsimmr(self_in_computeMaximumSize, nImms1, immr2)) == (((usqInt) constant1)));
return ((((self_in_computeMaximumSize->operands))[2]) == SP
? 8
: 4);

case SubRR:
case SubRRR:
Expand Down Expand Up @@ -3872,7 +3880,7 @@ concretizeLogicalOpCqRDest(AbstractInstruction * self_in_concretizeLogicalOpCqRD
/* N.B. For three operand logical ops only support AndCq: const R: reg R: NativeSPReg, which is used for alignment. */
srcReg = ((self_in_concretizeLogicalOpCqRDest->operands))[1];
effectiveDestReg = ((((self_in_concretizeLogicalOpCqRDest->opcode)) == AndCqRR)
&& (destReg == NativeSPReg)
&& (destReg == SP)
? RISCTempReg
: destReg);
/* begin isImmNImmSImmREncodableBitmask:ifTrue:ifFalse: */
Expand Down Expand Up @@ -3949,7 +3957,7 @@ concretizeLogicalOpCqRDest(AbstractInstruction * self_in_concretizeLogicalOpCqRD
offset = 4;
l1: /* end isImmNImmSImmREncodableBitmask:ifTrue:ifFalse: */;
if (!((((self_in_concretizeLogicalOpCqRDest->opcode)) == AndCqRR)
&& (destReg == NativeSPReg))) {
&& (destReg == SP))) {
return offset;
}
((self_in_concretizeLogicalOpCqRDest->machineCode))[offset / 4] = (movernrd(self_in_concretizeLogicalOpCqRDest, effectiveDestReg, destReg));
Expand Down Expand Up @@ -4861,7 +4869,7 @@ dispatchConcretize(AbstractInstruction * self_in_dispatchConcretize)
((self_in_dispatchConcretize->machineCode))[0] = (((0xD6400000U) + (((sqInt)((usqInt)(XZR) << 16)))) + (((sqInt)((usqInt)(LR) << 5))));
return 4;
}
((self_in_dispatchConcretize->machineCode))[0] = (addrnrdimmshiftBy12(self_in_dispatchConcretize, NativeSPReg, NativeSPReg, offset5, 0));
((self_in_dispatchConcretize->machineCode))[0] = (addrnrdimmshiftBy12(self_in_dispatchConcretize, SP, SP, offset5, 0));
((self_in_dispatchConcretize->machineCode))[1] = (((0xD6400000U) + (((sqInt)((usqInt)(XZR) << 16)))) + (((sqInt)((usqInt)(LR) << 5))));
return 8;

Expand Down Expand Up @@ -6335,7 +6343,7 @@ genLoadCStackPointers(AbstractInstruction * self_in_genLoadCStackPointers)
sqInt operandOne1;

if (((cStackPointerAddress()) + 8) == (cFramePointerAddress())) {
genoperandoperandoperand(MoveAwRR, cStackPointerAddress(), NativeSPReg, FPReg);
genoperandoperandoperand(MoveAwRR, cStackPointerAddress(), SP, FP);
return 0;
}
/* begin gen:literal:operand: */
Expand All @@ -6351,7 +6359,7 @@ genLoadCStackPointers(AbstractInstruction * self_in_genLoadCStackPointers)
static void NoDbgRegParms
genLoadNativeSPRegWithAlignedSPReg(AbstractInstruction * self_in_genLoadNativeSPRegWithAlignedSPReg)
{
gAndCqRR((-1 - (((BytesPerWord * 2) - 1))), SPReg, NativeSPReg);
gAndCqRR((-1 - (((BytesPerWord * 2) - 1))), SPReg, SP);
}


Expand Down Expand Up @@ -7391,7 +7399,7 @@ rewriteImm19JumpBeforetarget(AbstractInstruction * self_in_rewriteImm19JumpBefor
static sqInt NoDbgRegParms
rewriteImm26JumpBeforetarget(AbstractInstruction * self_in_rewriteImm26JumpBeforetarget, sqInt followingAddress, sqInt targetAddress)
{
usqInt instrOpcode;
sqInt instrOpcode;
sqInt mcpc;
sqInt offset;

Expand All @@ -7401,7 +7409,7 @@ rewriteImm26JumpBeforetarget(AbstractInstruction * self_in_rewriteImm26JumpBefor
instrOpcode = ((instructionBeforeAddress(self_in_rewriteImm26JumpBeforetarget, followingAddress))) >> 26;
assert((instrOpcode == 5)
|| (instrOpcode == 37));
codeLong32Atput(mcpc, (instrOpcode << 26) + (((offset) >> 2) & (0x3FFFFFF)));
codeLong32Atput(mcpc, (((sqInt)((usqInt)(instrOpcode) << 26))) + (((offset) >> 2) & (0x3FFFFFF)));
return 4;
}

Expand Down

0 comments on commit 28ddcc2

Please sign in to comment.