Skip to content

Commit

Permalink
Remove nvml
Browse files Browse the repository at this point in the history
  • Loading branch information
tmrlvi committed Apr 9, 2022
1 parent d06484b commit ec42f6b
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 115 deletions.
109 changes: 1 addition & 108 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion plugins/cuda/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ log = "0.4"
rand = "0.8"
clap = { version = "3.0", features = ["color", "derive"]}
env_logger = "0.9"
nvml-wrapper = { git = "https://github.com/benrod3k/nvml-wrapper", branch = "495.29.05" }
#nvml-wrapper = { git = "https://github.com/benrod3k/nvml-wrapper", branch = "495.29.05" }

[lib]
crate-type = ["cdylib", "rlib"]
13 changes: 7 additions & 6 deletions plugins/cuda/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ use cust::prelude::*;
use kaspa_miner::{Plugin, Worker, WorkerSpec};
use log::LevelFilter;
use log::{error, info};
use nvml_wrapper::Device as NvmlDevice;
use nvml_wrapper::Nvml;
//use nvml_wrapper::Device as NvmlDevice;
//use nvml_wrapper::Nvml;
use std::error::Error as StdError;

pub type Error = Box<dyn StdError + Send + Sync + 'static>;
Expand All @@ -22,16 +22,16 @@ const DEFAULT_WORKLOAD_SCALE: f32 = 256.;

pub struct CudaPlugin {
specs: Vec<CudaWorkerSpec>,
nvml_instance: Nvml,
//nvml_instance: Nvml,
_enabled: bool,
}

impl CudaPlugin {
fn new() -> Result<Self, Error> {
cust::init(CudaFlags::empty())?;
env_logger::builder().filter_level(LevelFilter::Info).parse_default_env().init();
let nvml_instance = Nvml::init()?;
Ok(Self { specs: Vec::new(), _enabled: false, nvml_instance })
//let nvml_instance = Nvml::init()?;
Ok(Self { specs: Vec::new(), _enabled: false/*, nvml_instance*/ })
}
}

Expand Down Expand Up @@ -62,6 +62,7 @@ impl Plugin for CudaPlugin {
}
};

/*
// if any of cuda_lock_core_clocks / cuda_lock_mem_clocks / cuda_power_limit is valid, init nvml and try to apply
if opts.cuda_lock_core_clocks.is_some()
|| opts.cuda_lock_mem_clocks.is_some()
Expand Down Expand Up @@ -109,7 +110,7 @@ impl Plugin for CudaPlugin {
};
};
}
}
}*/

self.specs = (0..gpus.len())
.map(|i| CudaWorkerSpec {
Expand Down

0 comments on commit ec42f6b

Please sign in to comment.