Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,192 @@

.mod_init_func
.p2align 3
.quad _OPENSSL_cpuid_setup

.text

.globl _OPENSSL_atomic_add

.p2align 4
_OPENSSL_atomic_add:
movl (%rdi),%eax
L$spin: leaq (%rsi,%rax,1),%r8
.byte 0xf0
cmpxchgl %r8d,(%rdi)
jne L$spin
movl %r8d,%eax
.byte 0x48,0x98
.byte 0xf3,0xc3


.globl _OPENSSL_rdtsc

.p2align 4
_OPENSSL_rdtsc:
rdtsc
shlq $32,%rdx
orq %rdx,%rax
.byte 0xf3,0xc3


.globl _OPENSSL_ia32_cpuid

.p2align 4
_OPENSSL_ia32_cpuid:
movq %rbx,%r8

xorl %eax,%eax
cpuid
movl %eax,%r11d

xorl %eax,%eax
cmpl $1970169159,%ebx
setne %al
movl %eax,%r9d
cmpl $1231384169,%edx
setne %al
orl %eax,%r9d
cmpl $1818588270,%ecx
setne %al
orl %eax,%r9d
jz L$intel

cmpl $1752462657,%ebx
setne %al
movl %eax,%r10d
cmpl $1769238117,%edx
setne %al
orl %eax,%r10d
cmpl $1145913699,%ecx
setne %al
orl %eax,%r10d
jnz L$intel


movl $2147483648,%eax
cpuid
cmpl $2147483656,%eax
jb L$intel

movl $2147483656,%eax
cpuid
movzbq %cl,%r10
incq %r10

movl $1,%eax
cpuid
btl $28,%edx
jnc L$done
shrl $16,%ebx
cmpb %r10b,%bl
ja L$done
andl $4026531839,%edx
jmp L$done

L$intel:
cmpl $4,%r11d
movl $-1,%r10d
jb L$nocacheinfo

movl $4,%eax
movl $0,%ecx
cpuid
movl %eax,%r10d
shrl $14,%r10d
andl $4095,%r10d

L$nocacheinfo:
movl $1,%eax
cpuid
cmpl $0,%r9d
jne L$notintel
orl $1048576,%edx
andb $15,%ah
cmpb $15,%ah
je L$notintel
orl $1073741824,%edx
L$notintel:
btl $28,%edx
jnc L$done
andl $4026531839,%edx
cmpl $0,%r10d
je L$done

orl $268435456,%edx
shrl $16,%ebx
cmpb $1,%bl
ja L$done
andl $4026531839,%edx
L$done:
shlq $32,%rcx
movl %edx,%eax
movq %r8,%rbx
orq %rcx,%rax
.byte 0xf3,0xc3


.globl _OPENSSL_cleanse

.p2align 4
_OPENSSL_cleanse:
xorq %rax,%rax
cmpq $15,%rsi
jae L$ot
cmpq $0,%rsi
je L$ret
L$ittle:
movb %al,(%rdi)
subq $1,%rsi
leaq 1(%rdi),%rdi
jnz L$ittle
L$ret:
.byte 0xf3,0xc3
.p2align 4
L$ot:
testq $7,%rdi
jz L$aligned
movb %al,(%rdi)
leaq -1(%rsi),%rsi
leaq 1(%rdi),%rdi
jmp L$ot
L$aligned:
movq %rax,(%rdi)
leaq -8(%rsi),%rsi
testq $-8,%rsi
leaq 8(%rdi),%rdi
jnz L$aligned
cmpq $0,%rsi
jne L$ittle
.byte 0xf3,0xc3

.globl _OPENSSL_wipe_cpu

.p2align 4
_OPENSSL_wipe_cpu:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
pxor %xmm8,%xmm8
pxor %xmm9,%xmm9
pxor %xmm10,%xmm10
pxor %xmm11,%xmm11
pxor %xmm12,%xmm12
pxor %xmm13,%xmm13
pxor %xmm14,%xmm14
pxor %xmm15,%xmm15
xorq %rcx,%rcx
xorq %rdx,%rdx
xorq %rsi,%rsi
xorq %rdi,%rdi
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
xorq %r11,%r11
leaq 8(%rsp),%rax
.byte 0xf3,0xc3

File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,336 @@
.file "x86-mont.s"
.text
.globl _bn_mul_mont
.align 4
_bn_mul_mont:
L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
cmpl $4,%edi
jl L000just_leave
leal 20(%esp),%esi
leal 24(%esp),%edx
movl %esp,%ebp
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%esp
negl %edi
movl %esp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%esp
xorl %esp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%esp
andl $-64,%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %edx,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %ebp,24(%esp)
movl 8(%esp),%esi
leal 1(%ebx),%ebp
movl 12(%esp),%edi
xorl %ecx,%ecx
movl %esi,%edx
andl $1,%ebp
subl %edi,%edx
leal 4(%edi,%ebx,4),%eax
orl %edx,%ebp
movl (%edi),%edi
jz L001bn_sqr_mont
movl %eax,28(%esp)
movl (%esi),%eax
xorl %edx,%edx
.align 4,0x90
L002mull:
movl %edx,%ebp
mull %edi
addl %eax,%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
movl (%esi,%ecx,4),%eax
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl L002mull
movl %edx,%ebp
mull %edi
movl 20(%esp),%edi
addl %ebp,%eax
movl 16(%esp),%esi
adcl $0,%edx
imull 32(%esp),%edi
movl %eax,32(%esp,%ebx,4)
xorl %ecx,%ecx
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
movl (%esi),%eax
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
incl %ecx
jmp L0032ndmadd
.align 4,0x90
L0041stmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl L0041stmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
addl %eax,%ebp
adcl $0,%edx
imull 32(%esp),%edi
xorl %ecx,%ecx
addl 36(%esp,%ebx,4),%edx
movl %ebp,32(%esp,%ebx,4)
adcl $0,%ecx
movl (%esi),%eax
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
movl $1,%ecx
.align 4,0x90
L0032ndmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl L0032ndmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
xorl %eax,%eax
movl 12(%esp),%ecx
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
leal 4(%ecx),%ecx
movl %edx,32(%esp,%ebx,4)
cmpl 28(%esp),%ecx
movl %eax,36(%esp,%ebx,4)
je L005common_tail
movl (%ecx),%edi
movl 8(%esp),%esi
movl %ecx,12(%esp)
xorl %ecx,%ecx
xorl %edx,%edx
movl (%esi),%eax
jmp L0041stmadd
.align 4,0x90
L001bn_sqr_mont:
movl %ebx,(%esp)
movl %ecx,12(%esp)
movl %edi,%eax
mull %edi
movl %eax,32(%esp)
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
incl %ecx
.align 4,0x90
L006sqr:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal 1(%ecx),%ecx
adcl $0,%edx
leal (%ebx,%eax,2),%ebp
shrl $31,%eax
cmpl (%esp),%ecx
movl %eax,%ebx
movl %ebp,28(%esp,%ecx,4)
jl L006sqr
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
leal (%ebx,%eax,2),%ebp
imull 32(%esp),%edi
shrl $31,%eax
movl %ebp,32(%esp,%ecx,4)
leal (%eax,%edx,2),%ebp
movl (%esi),%eax
shrl $31,%edx
movl %ebp,36(%esp,%ecx,4)
movl %edx,40(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
movl %ecx,%ebx
adcl $0,%edx
movl 4(%esi),%eax
movl $1,%ecx
.align 4,0x90
L0073rdmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
movl 4(%esi,%ecx,4),%eax
adcl $0,%edx
movl %ebp,28(%esp,%ecx,4)
movl %edx,%ebp
mull %edi
addl 36(%esp,%ecx,4),%ebp
leal 2(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl L0073rdmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
movl 12(%esp),%ecx
xorl %eax,%eax
movl 8(%esp),%esi
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
movl %edx,32(%esp,%ebx,4)
cmpl %ebx,%ecx
movl %eax,36(%esp,%ebx,4)
je L005common_tail
movl 4(%esi,%ecx,4),%edi
leal 1(%ecx),%ecx
movl %edi,%eax
movl %ecx,12(%esp)
mull %edi
addl 32(%esp,%ecx,4),%eax
adcl $0,%edx
movl %eax,32(%esp,%ecx,4)
xorl %ebp,%ebp
cmpl %ebx,%ecx
leal 1(%ecx),%ecx
je L008sqrlast
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
.align 4,0x90
L009sqradd:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal (%eax,%eax,1),%ebp
adcl $0,%edx
shrl $31,%eax
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%eax
addl %ebx,%ebp
adcl $0,%eax
cmpl (%esp),%ecx
movl %ebp,28(%esp,%ecx,4)
movl %eax,%ebx
jle L009sqradd
movl %edx,%ebp
addl %edx,%edx
shrl $31,%ebp
addl %ebx,%edx
adcl $0,%ebp
L008sqrlast:
movl 20(%esp),%edi
movl 16(%esp),%esi
imull 32(%esp),%edi
addl 32(%esp,%ecx,4),%edx
movl (%esi),%eax
adcl $0,%ebp
movl %edx,32(%esp,%ecx,4)
movl %ebp,36(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
leal -1(%ecx),%ebx
adcl $0,%edx
movl $1,%ecx
movl 4(%esi),%eax
jmp L0073rdmadd
.align 4,0x90
L005common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 4,0x90
L010sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge L010sub
sbbl $0,%eax
andl %eax,%esi
notl %eax
movl %edi,%ebp
andl %eax,%ebp
orl %ebp,%esi
.align 4,0x90
L011copy:
movl (%esi,%ebx,4),%eax
movl %eax,(%edi,%ebx,4)
movl %ecx,32(%esp,%ebx,4)
decl %ebx
jge L011copy
movl 24(%esp),%esp
movl $1,%eax
L000just_leave:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,224 @@
.file "rc4-586.s"
.text
.globl _RC4
.align 4
_RC4:
L_RC4_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%edx
movl 28(%esp),%esi
movl 32(%esp),%ebp
xorl %eax,%eax
xorl %ebx,%ebx
cmpl $0,%edx
je L000abort
movb (%edi),%al
movb 4(%edi),%bl
addl $8,%edi
leal (%esi,%edx,1),%ecx
subl %esi,%ebp
movl %ecx,24(%esp)
incb %al
cmpl $-1,256(%edi)
je L001RC4_CHAR
movl (%edi,%eax,4),%ecx
andl $-4,%edx
jz L002loop1
leal -4(%esi,%edx,1),%edx
movl %edx,28(%esp)
movl %ebp,32(%esp)
.align 4,0x90
L003loop4:
addb %cl,%bl
movl (%edi,%ebx,4),%edx
movl %ecx,(%edi,%ebx,4)
movl %edx,(%edi,%eax,4)
addl %ecx,%edx
incb %al
andl $255,%edx
movl (%edi,%eax,4),%ecx
movl (%edi,%edx,4),%ebp
addb %cl,%bl
movl (%edi,%ebx,4),%edx
movl %ecx,(%edi,%ebx,4)
movl %edx,(%edi,%eax,4)
addl %ecx,%edx
incb %al
andl $255,%edx
rorl $8,%ebp
movl (%edi,%eax,4),%ecx
orl (%edi,%edx,4),%ebp
addb %cl,%bl
movl (%edi,%ebx,4),%edx
movl %ecx,(%edi,%ebx,4)
movl %edx,(%edi,%eax,4)
addl %ecx,%edx
incb %al
andl $255,%edx
rorl $8,%ebp
movl (%edi,%eax,4),%ecx
orl (%edi,%edx,4),%ebp
addb %cl,%bl
movl (%edi,%ebx,4),%edx
movl %ecx,(%edi,%ebx,4)
movl %edx,(%edi,%eax,4)
addl %ecx,%edx
incb %al
andl $255,%edx
rorl $8,%ebp
movl 32(%esp),%ecx
orl (%edi,%edx,4),%ebp
rorl $8,%ebp
xorl (%esi),%ebp
cmpl 28(%esp),%esi
movl %ebp,(%ecx,%esi,1)
leal 4(%esi),%esi
movl (%edi,%eax,4),%ecx
jb L003loop4
cmpl 24(%esp),%esi
je L004done
movl 32(%esp),%ebp
.align 4,0x90
L002loop1:
addb %cl,%bl
movl (%edi,%ebx,4),%edx
movl %ecx,(%edi,%ebx,4)
movl %edx,(%edi,%eax,4)
addl %ecx,%edx
incb %al
andl $255,%edx
movl (%edi,%edx,4),%edx
xorb (%esi),%dl
leal 1(%esi),%esi
movl (%edi,%eax,4),%ecx
cmpl 24(%esp),%esi
movb %dl,-1(%ebp,%esi,1)
jb L002loop1
jmp L004done
.align 4,0x90
L001RC4_CHAR:
movzbl (%edi,%eax,1),%ecx
L005cloop1:
addb %cl,%bl
movzbl (%edi,%ebx,1),%edx
movb %cl,(%edi,%ebx,1)
movb %dl,(%edi,%eax,1)
addb %cl,%dl
movzbl (%edi,%edx,1),%edx
addb $1,%al
xorb (%esi),%dl
leal 1(%esi),%esi
movzbl (%edi,%eax,1),%ecx
cmpl 24(%esp),%esi
movb %dl,-1(%ebp,%esi,1)
jb L005cloop1
L004done:
decb %al
movb %bl,-4(%edi)
movb %al,-8(%edi)
L000abort:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _RC4_set_key
.align 4
_RC4_set_key:
L_RC4_set_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%ebp
movl 28(%esp),%esi
leal _OPENSSL_ia32cap_P,%edx
leal 8(%edi),%edi
leal (%esi,%ebp,1),%esi
negl %ebp
xorl %eax,%eax
movl %ebp,-4(%edi)
btl $20,(%edx)
jc L006c1stloop
.align 4,0x90
L007w1stloop:
movl %eax,(%edi,%eax,4)
addb $1,%al
jnc L007w1stloop
xorl %ecx,%ecx
xorl %edx,%edx
.align 4,0x90
L008w2ndloop:
movl (%edi,%ecx,4),%eax
addb (%esi,%ebp,1),%dl
addb %al,%dl
addl $1,%ebp
movl (%edi,%edx,4),%ebx
jnz L009wnowrap
movl -4(%edi),%ebp
L009wnowrap:
movl %eax,(%edi,%edx,4)
movl %ebx,(%edi,%ecx,4)
addb $1,%cl
jnc L008w2ndloop
jmp L010exit
.align 4,0x90
L006c1stloop:
movb %al,(%edi,%eax,1)
addb $1,%al
jnc L006c1stloop
xorl %ecx,%ecx
xorl %edx,%edx
xorl %ebx,%ebx
.align 4,0x90
L011c2ndloop:
movb (%edi,%ecx,1),%al
addb (%esi,%ebp,1),%dl
addb %al,%dl
addl $1,%ebp
movb (%edi,%edx,1),%bl
jnz L012cnowrap
movl -4(%edi),%ebp
L012cnowrap:
movb %al,(%edi,%edx,1)
movb %bl,(%edi,%ecx,1)
addb $1,%cl
jnc L011c2ndloop
movl $-1,256(%edi)
L010exit:
xorl %eax,%eax
movl %eax,-8(%edi)
movl %eax,-4(%edi)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _RC4_options
.align 4
_RC4_options:
L_RC4_options_begin:
call L013pic_point
L013pic_point:
popl %eax
leal L014opts-L013pic_point(%eax),%eax
leal _OPENSSL_ia32cap_P,%edx
btl $20,(%edx)
jnc L015skip
addl $12,%eax
L015skip:
ret
.align 6,0x90
L014opts:
.byte 114,99,52,40,52,120,44,105,110,116,41,0
.byte 114,99,52,40,49,120,44,99,104,97,114,41,0
.byte 82,67,52,32,102,111,114,32,120,56,54,44,32,67,82,89
.byte 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114
.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 6,0x90
.comm _OPENSSL_ia32cap_P,4

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,259 @@
.file "sha512-586.s"
.text
.globl _sha256_block_data_order
.align 4
_sha256_block_data_order:
L_sha256_block_data_order_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call L000pic_point
L000pic_point:
popl %ebp
leal L001K256-L000pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
.align 4,0x90
L002loop:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
movl 28(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
movl 44(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
movl 60(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
addl $64,%edi
subl $32,%esp
movl %edi,100(%esp)
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,4(%esp)
movl %ecx,8(%esp)
movl %edi,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edi
movl %ebx,20(%esp)
movl %ecx,24(%esp)
movl %edi,28(%esp)
.align 4,0x90
L00300_15:
movl 92(%esp),%ebx
movl %edx,%ecx
rorl $6,%ecx
movl %edx,%edi
rorl $11,%edi
movl 20(%esp),%esi
xorl %edi,%ecx
rorl $14,%edi
xorl %edi,%ecx
movl 24(%esp),%edi
addl %ecx,%ebx
movl %edx,16(%esp)
xorl %edi,%esi
movl %eax,%ecx
andl %edx,%esi
movl 12(%esp),%edx
xorl %edi,%esi
movl %eax,%edi
addl %esi,%ebx
rorl $2,%ecx
addl 28(%esp),%ebx
rorl $13,%edi
movl 4(%esp),%esi
xorl %edi,%ecx
rorl $9,%edi
addl %ebx,%edx
xorl %edi,%ecx
movl 8(%esp),%edi
addl %ecx,%ebx
movl %eax,(%esp)
movl %eax,%ecx
subl $4,%esp
orl %esi,%eax
andl %esi,%ecx
andl %edi,%eax
movl (%ebp),%esi
orl %ecx,%eax
addl $4,%ebp
addl %ebx,%eax
addl %esi,%edx
addl %esi,%eax
cmpl $3248222580,%esi
jne L00300_15
movl 152(%esp),%ebx
.align 4,0x90
L00416_63:
movl %ebx,%esi
movl 100(%esp),%ecx
shrl $3,%ebx
rorl $7,%esi
xorl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
xorl %esi,%ebx
shrl $10,%ecx
movl 156(%esp),%esi
rorl $17,%edi
xorl %edi,%ecx
rorl $2,%edi
addl %esi,%ebx
xorl %ecx,%edi
addl %edi,%ebx
movl %edx,%ecx
addl 120(%esp),%ebx
rorl $6,%ecx
movl %edx,%edi
rorl $11,%edi
movl 20(%esp),%esi
xorl %edi,%ecx
rorl $14,%edi
movl %ebx,92(%esp)
xorl %edi,%ecx
movl 24(%esp),%edi
addl %ecx,%ebx
movl %edx,16(%esp)
xorl %edi,%esi
movl %eax,%ecx
andl %edx,%esi
movl 12(%esp),%edx
xorl %edi,%esi
movl %eax,%edi
addl %esi,%ebx
rorl $2,%ecx
addl 28(%esp),%ebx
rorl $13,%edi
movl 4(%esp),%esi
xorl %edi,%ecx
rorl $9,%edi
addl %ebx,%edx
xorl %edi,%ecx
movl 8(%esp),%edi
addl %ecx,%ebx
movl %eax,(%esp)
movl %eax,%ecx
subl $4,%esp
orl %esi,%eax
andl %esi,%ecx
andl %edi,%eax
movl (%ebp),%esi
orl %ecx,%eax
addl $4,%ebp
addl %ebx,%eax
movl 152(%esp),%ebx
addl %esi,%edx
addl %esi,%eax
cmpl $3329325298,%esi
jne L00416_63
movl 352(%esp),%esi
movl 4(%esp),%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edi
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%ecx
addl 12(%esi),%edi
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %ecx,8(%esi)
movl %edi,12(%esi)
movl 20(%esp),%eax
movl 24(%esp),%ebx
movl 28(%esp),%ecx
movl 356(%esp),%edi
addl 16(%esi),%edx
addl 20(%esi),%eax
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
movl %eax,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
addl $352,%esp
subl $256,%ebp
cmpl 8(%esp),%edi
jb L002loop
movl 12(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 6,0x90
L001K256:
.long 1116352408,1899447441,3049323471,3921009573
.long 961987163,1508970993,2453635748,2870763221
.long 3624381080,310598401,607225278,1426881987
.long 1925078388,2162078206,2614888103,3248222580
.long 3835390401,4022224774,264347078,604807628
.long 770255983,1249150122,1555081692,1996064986
.long 2554220882,2821834349,2952996808,3210313671
.long 3336571891,3584528711,113926993,338241895
.long 666307205,773529912,1294757372,1396182291
.long 1695183700,1986661051,2177026350,2456956037
.long 2730485921,2820302411,3259730800,3345764771
.long 3516065817,3600352804,4094571909,275423344
.long 430227734,506948616,659060556,883997877
.long 958139571,1322822218,1537002063,1747873779
.long 1955562222,2024104815,2227730452,2361852424
.long 2428436474,2756734187,3204031479,3329325298
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,261 @@
.file "x86cpuid.s"
.text
.globl _OPENSSL_ia32_cpuid
.align 4
_OPENSSL_ia32_cpuid:
L_OPENSSL_ia32_cpuid_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %edx,%edx
pushfl
popl %eax
movl %eax,%ecx
xorl $2097152,%eax
pushl %eax
popfl
pushfl
popl %eax
xorl %eax,%ecx
btl $21,%ecx
jnc L000done
xorl %eax,%eax
.byte 0x0f,0xa2
movl %eax,%edi
xorl %eax,%eax
cmpl $1970169159,%ebx
setne %al
movl %eax,%ebp
cmpl $1231384169,%edx
setne %al
orl %eax,%ebp
cmpl $1818588270,%ecx
setne %al
orl %eax,%ebp
jz L001intel
cmpl $1752462657,%ebx
setne %al
movl %eax,%esi
cmpl $1769238117,%edx
setne %al
orl %eax,%esi
cmpl $1145913699,%ecx
setne %al
orl %eax,%esi
jnz L001intel
movl $2147483648,%eax
.byte 0x0f,0xa2
cmpl $2147483656,%eax
jb L001intel
movl $2147483656,%eax
.byte 0x0f,0xa2
movzbl %cl,%esi
incl %esi
movl $1,%eax
.byte 0x0f,0xa2
btl $28,%edx
jnc L000done
shrl $16,%ebx
andl $255,%ebx
cmpl %esi,%ebx
ja L000done
andl $4026531839,%edx
jmp L000done
L001intel:
cmpl $4,%edi
movl $-1,%edi
jb L002nocacheinfo
movl $4,%eax
movl $0,%ecx
.byte 0x0f,0xa2
movl %eax,%edi
shrl $14,%edi
andl $4095,%edi
L002nocacheinfo:
movl $1,%eax
.byte 0x0f,0xa2
cmpl $0,%ebp
jne L003notP4
andb $15,%ah
cmpb $15,%ah
jne L003notP4
orl $1048576,%edx
L003notP4:
btl $28,%edx
jnc L000done
andl $4026531839,%edx
cmpl $0,%edi
je L000done
orl $268435456,%edx
shrl $16,%ebx
cmpb $1,%bl
ja L000done
andl $4026531839,%edx
L000done:
movl %edx,%eax
movl %ecx,%edx
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _OPENSSL_rdtsc
.align 4
_OPENSSL_rdtsc:
L_OPENSSL_rdtsc_begin:
xorl %eax,%eax
xorl %edx,%edx
leal _OPENSSL_ia32cap_P,%ecx
btl $4,(%ecx)
jnc L004notsc
.byte 0x0f,0x31
L004notsc:
ret
.globl _OPENSSL_instrument_halt
.align 4
_OPENSSL_instrument_halt:
L_OPENSSL_instrument_halt_begin:
leal _OPENSSL_ia32cap_P,%ecx
btl $4,(%ecx)
jnc L005nohalt
.long 2421723150
andl $3,%eax
jnz L005nohalt
pushfl
popl %eax
btl $9,%eax
jnc L005nohalt
.byte 0x0f,0x31
pushl %edx
pushl %eax
hlt
.byte 0x0f,0x31
subl (%esp),%eax
sbbl 4(%esp),%edx
addl $8,%esp
ret
L005nohalt:
xorl %eax,%eax
xorl %edx,%edx
ret
.globl _OPENSSL_far_spin
.align 4
_OPENSSL_far_spin:
L_OPENSSL_far_spin_begin:
pushfl
popl %eax
btl $9,%eax
jnc L006nospin
movl 4(%esp),%eax
movl 8(%esp),%ecx
.long 2430111262
xorl %eax,%eax
movl (%ecx),%edx
jmp L007spin
.align 4,0x90
L007spin:
incl %eax
cmpl (%ecx),%edx
je L007spin
.long 529567888
ret
L006nospin:
xorl %eax,%eax
xorl %edx,%edx
ret
.globl _OPENSSL_wipe_cpu
.align 4
_OPENSSL_wipe_cpu:
L_OPENSSL_wipe_cpu_begin:
xorl %eax,%eax
xorl %edx,%edx
leal _OPENSSL_ia32cap_P,%ecx
movl (%ecx),%ecx
btl $1,(%ecx)
jnc L008no_x87
.long 4007259865,4007259865,4007259865,4007259865,2430851995
L008no_x87:
leal 4(%esp),%eax
ret
.globl _OPENSSL_atomic_add
.align 4
_OPENSSL_atomic_add:
L_OPENSSL_atomic_add_begin:
movl 4(%esp),%edx
movl 8(%esp),%ecx
pushl %ebx
nop
movl (%edx),%eax
L009spin:
leal (%eax,%ecx,1),%ebx
nop
.long 447811568
jne L009spin
movl %ebx,%eax
popl %ebx
ret
.globl _OPENSSL_indirect_call
.align 4
_OPENSSL_indirect_call:
L_OPENSSL_indirect_call_begin:
pushl %ebp
movl %esp,%ebp
subl $28,%esp
movl 12(%ebp),%ecx
movl %ecx,(%esp)
movl 16(%ebp),%edx
movl %edx,4(%esp)
movl 20(%ebp),%eax
movl %eax,8(%esp)
movl 24(%ebp),%eax
movl %eax,12(%esp)
movl 28(%ebp),%eax
movl %eax,16(%esp)
movl 32(%ebp),%eax
movl %eax,20(%esp)
movl 36(%ebp),%eax
movl %eax,24(%esp)
call *8(%ebp)
movl %ebp,%esp
popl %ebp
ret
.globl _OPENSSL_cleanse
.align 4
_OPENSSL_cleanse:
L_OPENSSL_cleanse_begin:
movl 4(%esp),%edx
movl 8(%esp),%ecx
xorl %eax,%eax
cmpl $7,%ecx
jae L010lot
cmpl $0,%ecx
je L011ret
L012little:
movb %al,(%edx)
subl $1,%ecx
leal 1(%edx),%edx
jnz L012little
L011ret:
ret
.align 4,0x90
L010lot:
testl $3,%edx
jz L013aligned
movb %al,(%edx)
leal -1(%ecx),%ecx
leal 1(%edx),%edx
jmp L010lot
L013aligned:
movl %eax,(%edx)
leal -4(%ecx),%ecx
testl $-4,%ecx
leal 4(%edx),%edx
jnz L013aligned
cmpl $0,%ecx
jne L012little
ret
.comm _OPENSSL_ia32cap_P,4
.mod_init_func
.align 2
.long _OPENSSL_cpuid_setup