Skip to content

Commit

Permalink
Awoken code
Browse files Browse the repository at this point in the history
  • Loading branch information
a1i3nj03 committed Apr 27, 2018
2 parents 03d9dd8 + dca590e commit c5f5c09
Show file tree
Hide file tree
Showing 10 changed files with 25 additions and 542 deletions.
2 changes: 1 addition & 1 deletion Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ ccminer_SOURCES = elist.h miner.h compat.h \
x11/x11.cu x11/fresh.cu x11/cuda_x11_luffa512.cu x11/cuda_x11_cubehash512.cu \
x11/cuda_x11_shavite512.cu x11/cuda_x11_shavite512_alexis.cu x11/cuda_x11_simd512.cu x11/cuda_x11_echo.cu x11/cuda_x11_echo_alexis.cu \
x11/cuda_x11_luffa512_Cubehash.cu x11/x11evo.cu x11/timetravel.cu x11/bitcore.cu \
x13/x13.cu x13/cuda_x13_hamsi512.cu x13/cuda_x13_fugue512.cu x13/cuda_x13_fugue512_alexis.cu \
x13/x13.cu x13/cuda_x13_hamsi512_alexis.cu x13/cuda_x13_hamsi512.cu x13/cuda_x13_fugue512.cu x13/cuda_x13_fugue512_alexis.cu \
x13/hsr.cu x13/cuda_hsr_sm3.cu x13/sm3.c \
x15/x14.cu x15/x15.cu x15/cuda_x14_shabal512.cu x15/cuda_x14_shabal512_alexis.cu x15/cuda_x15_whirlpool.cu \
x15/whirlpool.cu x15/cuda_x15_whirlpool_sm3.cu \
Expand Down
3 changes: 2 additions & 1 deletion ccminer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
* Copyright 2010 Jeff Garzik
* Copyright 2012-2014 pooler
* Copyright 2014-2018 tpruvot
* Copyright 2016 Alexis78
* Copyright 2016 Alexis78 - Improved many of the kernels implemented in this x16r miner
* Copyright 2018 brianmct
* Copyright 2018 a1i3nj03
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
Expand Down
526 changes: 1 addition & 525 deletions x11/cuda_x11_echo_aes.cuh

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions x11/cuda_x11_simd512.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/***************************************************************************************************
* SIMD512 SM3+ CUDA IMPLEMENTATION (require cuda_x11_simd512_func.cuh)
*** Based on Alexis78 very good simd modifications ***
*/

#include "miner.h"
Expand Down
3 changes: 2 additions & 1 deletion x11/cuda_x11_simd512_sm2.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -572,6 +572,7 @@ static void x11_simd512_cpu_hash_64_sm2(int thr_id, uint32_t threads, uint32_t s

size_t shared_size = 0;

x11_simd512_gpu_hash_64_sm2<<<grid, block, shared_size, streamk[thr_id]>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector, order);
x11_simd512_gpu_hash_64_sm2 << <grid, block, shared_size>> >(threads, startNounce, (uint64_t*)d_hash, d_nonceVector, order);
// x11_simd512_gpu_hash_64_sm2 << <grid, block, shared_size, streamk[thr_id] >> >(threads, startNounce, (uint64_t*)d_hash, d_nonceVector, order);
// MyStreamSynchronize(NULL, order, thr_id);
}
3 changes: 2 additions & 1 deletion x13/cuda_x13_fugue512.cu
Original file line number Diff line number Diff line change
Expand Up @@ -404,5 +404,6 @@ void x13_fugue512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);

x13_fugue512_gpu_hash_64 <<<grid, block, 0, streamk[thr_id]>>> (threads, (uint64_t*)d_hash, order);
x13_fugue512_gpu_hash_64 << <grid, block>> > (threads, (uint64_t*)d_hash, order);
// x13_fugue512_gpu_hash_64 << <grid, block, 0, streamk[thr_id] >> > (threads, (uint64_t*)d_hash, order);
}
3 changes: 2 additions & 1 deletion x13/cuda_x13_fugue512_alexis.cu
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
*
* Built on cbuchner1's implementation, actual hashing code
* heavily based on phm's sgminer
*
*
* Alexis78's fugue512 kernel modifications
*
*/

#include "cuda_helper_alexis.h"
Expand Down
3 changes: 2 additions & 1 deletion x13/cuda_x13_hamsi512.cu
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,8 @@ void x13_hamsi512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);

x13_hamsi512_gpu_hash_64<<<grid, block, 0, streamk[thr_id]>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector);
x13_hamsi512_gpu_hash_64 << <grid, block>> >(threads, startNounce, (uint64_t*)d_hash, d_nonceVector);
// x13_hamsi512_gpu_hash_64 << <grid, block, 0, streamk[thr_id] >> >(threads, startNounce, (uint64_t*)d_hash, d_nonceVector);
//MyStreamSynchronize(NULL, order, thr_id);
}

Expand Down
18 changes: 9 additions & 9 deletions x15/cuda_x15_whirlpool_sm3.cu
Original file line number Diff line number Diff line change
Expand Up @@ -2374,16 +2374,16 @@ void whirlpool512_setBlock_80_sm3(void *pdata, const void *ptarget)
__host__
void x16_whirlpool512_init(int thr_id, uint32_t threads)
{
cudaMemcpyToSymbolAsync(InitVector_RC, plain_RC, sizeof(plain_RC), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob0Tox, plain_T0, sizeof(plain_T0), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(InitVector_RC, plain_RC, sizeof(plain_RC), 0, cudaMemcpyHostToDevice, 0);
cudaMemcpyToSymbolAsync(mixTob0Tox, plain_T0, sizeof(plain_T0), 0, cudaMemcpyHostToDevice, 0);
#if USE_ALL_TABLES
cudaMemcpyToSymbolAsync(mixTob1Tox, plain_T1, (256 * 8), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob2Tox, plain_T2, (256 * 8), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob3Tox, plain_T3, (256 * 8), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob4Tox, plain_T4, (256 * 8), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob5Tox, plain_T5, (256 * 8), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob6Tox, plain_T6, (256 * 8), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob7Tox, plain_T7, (256 * 8), 0, cudaMemcpyHostToDevice, streamk[thr_id]);
cudaMemcpyToSymbolAsync(mixTob1Tox, plain_T1, (256 * 8), 0, cudaMemcpyHostToDevice, 0);
cudaMemcpyToSymbolAsync(mixTob2Tox, plain_T2, (256 * 8), 0, cudaMemcpyHostToDevice, 0);
cudaMemcpyToSymbolAsync(mixTob3Tox, plain_T3, (256 * 8), 0, cudaMemcpyHostToDevice, 0);
cudaMemcpyToSymbolAsync(mixTob4Tox, plain_T4, (256 * 8), 0, cudaMemcpyHostToDevice, 0);
cudaMemcpyToSymbolAsync(mixTob5Tox, plain_T5, (256 * 8), 0, cudaMemcpyHostToDevice, 0);
cudaMemcpyToSymbolAsync(mixTob6Tox, plain_T6, (256 * 8), 0, cudaMemcpyHostToDevice, 0);
cudaMemcpyToSymbolAsync(mixTob7Tox, plain_T7, (256 * 8), 0, cudaMemcpyHostToDevice, 0);
#endif

}
Expand Down
5 changes: 3 additions & 2 deletions x16r/x16r.cu
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
*
* tpruvot 2018 - GPL code
* a1i3nj03 2018
*** Uses many of Alexis78's very good kernels ***
*/
/*
compute_70, sm_70
Expand Down Expand Up @@ -350,7 +351,7 @@ extern "C" int x16r_init(int thr_id, uint32_t max_nonce)
sleep(1);
}
// set_lo << <1, 1 >> >(d_ark[thr_id]);
CUDA_SAFE_CALL(cudaMemcpyAsync(d_ark[thr_id], (int*)h_ark[thr_id], sizeof(int), cudaMemcpyHostToDevice, streamk[thr_id]));
CUDA_SAFE_CALL(cudaMemcpyAsync(d_ark[thr_id], (int*)h_ark[thr_id], sizeof(int), cudaMemcpyHostToDevice, streamk[0]));
// CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_ark[thr_id], (int*)h_ark[thr_id], sizeof(int), 0, cudaMemcpyHostToDevice));

// CUDA_SAFE_CALL(cudaGetLastError());
Expand Down Expand Up @@ -438,7 +439,7 @@ extern "C" int scanhash_x16r(int thr_id, struct work* work, uint32_t max_nonce,

((uint32_t*)ptarget)[7] = 0x003f;
*((uint64_t*)&pdata[1]) = 0x67452301EFCDAB89;//0x31C8B76F520AEDF4;
*((uint64_t*)&pdata[1]) = 0xbbbbbbbbbbbbbbbb;//2:64,4:80,8,a,e.. error//44B54B9F248C0708//0x31C8B76F520AEDF4;
// *((uint64_t*)&pdata[1]) = 0xbbbbbbbbbbbbbbbb;//2:64,4:80,8,a,e.. error//44B54B9F248C0708//0x31C8B76F520AEDF4;
//489f 4f38 33f4 7016 //01346789f

}
Expand Down

0 comments on commit c5f5c09

Please sign in to comment.