diff --git a/RandomX.md b/RandomX.md index 71f818b4..680baeb4 100644 --- a/RandomX.md +++ b/RandomX.md @@ -121,7 +121,7 @@ in rx_hash.h #define SEEDHASH_EPOCH_TESTNET_LAG 32 // fork seed height, (time frame of RANDOMX_FORK_HEIGHT) + SEEDHASH_EPOCH_LAG = fork time frame -#define RANDOMX_FORK_HEIGHT 1339392 +#define RANDOMX_FORK_HEIGHT 1540096 // (time frame of RANDOMX_TESTNET_FORK_HEIGHT) + SEEDHASH_EPOCH_TESTNET_LAG = test net fork time frame #define RANDOMX_TESTNET_FORK_HEIGHT 196288 // 196288 % 64 = 0 diff --git a/client/rx_hash.c b/client/rx_hash.c index ddc34945..9504e76a 100644 --- a/client/rx_hash.c +++ b/client/rx_hash.c @@ -43,6 +43,7 @@ static uint32_t g_mine_n_threads; static pthread_mutex_t g_rx_dataset_mutex = PTHREAD_MUTEX_INITIALIZER; int rx_update_vm(randomx_vm **vm, randomx_cache *cache, randomx_dataset *dataset); +int getHugePageNumber(unsigned long* num); // get and check if huge pages is available static void rx_abort(const char *msg){ fprintf(stderr, "%s\n", msg); @@ -171,6 +172,16 @@ void rx_init_flags(int is_full_mem, uint32_t init_thread_count) { if (g_randomx_flags & RANDOMX_FLAG_LARGE_PAGES) { xdag_info(" - randomx large pages mode"); +#ifdef __linux__ + unsigned long num = 0; + int ret; + ret = getHugePageNumber(&num); + if (ret == -1 || num < 2560) { + rx_abort("randomx: huge page not available."); + return; + } + xdag_info( "huge page free = %lu ", num ); +#endif } else { xdag_info(" - randomx small pages mode"); } @@ -584,3 +595,20 @@ void rx_loading_fork_time(void) { // node start height greater than g_rx_fork } } } + +// get huge pages free number +int getHugePageNumber(unsigned long* num) { + void *fp; + char str[256]; + if ((fp = fopen("/proc/meminfo", "r")) == NULL) { + return -1; + } + while (fscanf(fp, "%s", str) != EOF) { + if (strcmp(str, "HugePages_Free:")==0) { + fscanf(fp, "%lu", num); + break; + } + } + fclose(fp); + return 0; +}