Skip to content

Commit

Permalink
Merge branch 'main' into fix/nonzero
Browse files Browse the repository at this point in the history
  • Loading branch information
laggui authored Sep 3, 2024
2 parents 8ba81b1 + 96a2340 commit 7167674
Show file tree
Hide file tree
Showing 15 changed files with 64 additions and 38 deletions.
34 changes: 34 additions & 0 deletions CITATION.cff
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- family-names: "Simard"
given-names: "Nathaniel"
email: "[email protected]"
- family-names: "Fortier-Dubois"
given-names: "Louis"
email: "[email protected]"
- family-names: "Tadjibaev"
given-names: "Dilshod"
email: "[email protected]"
- family-names: "Lagrange"
given-names: "Guillaume"
email: "[email protected]"
- name: "Burn Framework Contributors"
title: "Burn"
version: 0.14.0
date-released: 2024-08-27
url: "https://burn.dev/"
repository-code: "https://github.com/tracel-ai/burn"
license:
- MIT
- Apache-2.0
abstract: "Burn is a new comprehensive dynamic Deep Learning Framework built using Rust with extreme flexibility, compute efficiency and portability as its primary goals."
keywords:
- scientific-computing
- deep-learning
- machine-learning
- neural-networks
- rust
- high-performance-computing
- portability
- compute-efficiency
16 changes: 8 additions & 8 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ systemstat = "0.2.3"
portable-atomic-util = { version = "0.2.2", features = ["alloc"] }

### For the main burn branch. ###
cubecl = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "446dc854ef3f7f93fe4ab2df0f1ab779a7d6a98e" }
cubecl-common = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "446dc854ef3f7f93fe4ab2df0f1ab779a7d6a98e" }
cubecl = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "b0fa39bad9d5ecbd98a955e6f7f94c855b6e9a73" }
cubecl-common = { git = "https://github.com/tracel-ai/cubecl", default-features = false, rev = "b0fa39bad9d5ecbd98a955e6f7f94c855b6e9a73" }
### For local development. ###
# cubecl = { path = "../cubecl/crates/cubecl" }
# cubecl-common = { path = "../cubecl/crates/cubecl-common" }
Expand Down
3 changes: 1 addition & 2 deletions backend-comparison/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,13 @@ tch-cpu = ["burn/tch"]
tch-gpu = ["burn/tch"]
wgpu = ["burn/wgpu", "burn/autotune"]
wgpu-fusion = ["wgpu", "burn/fusion"]
# cuda-jit = ["burn-cuda"]
cuda-jit = ["burn/cuda-jit"]

[dependencies]
arboard = { workspace = true }
burn = { path = "../crates/burn", default-features = false }
burn-common = { path = "../crates/burn-common", version = "0.15.0" }
burn-wgpu = { path = "../crates/burn-wgpu", default-features = false, version = "0.15.0", optional = true }
# burn-cuda = { path = "../crates/burn-cuda", version = "0.15.0", optional = true }
clap = { workspace = true }
colored = { workspace = true }
derive-new = { workspace = true }
Expand Down
2 changes: 1 addition & 1 deletion backend-comparison/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ macro_rules! bench_on_backend {

#[cfg(feature = "cuda-jit")]
{
use burn_cuda::{Cuda, CudaDevice};
use burn::backend::cuda_jit::{Cuda, CudaDevice};

bench::<Cuda>(&CudaDevice::default(), feature_name, url, token);
}
Expand Down
13 changes: 2 additions & 11 deletions contributor-book/src/getting-started/configuring-your-editor.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,8 @@ To use the debugger, follow these steps:
1. Open `Command Palette` with `Ctrl+Shift+P` or `F1` and type
`LLDB: Generate Launch Configurations from Cargo.toml` then select it, this will generate a file
that should be saved as `.vscode/launch.json`.
2. Select the configuration from the "run and debug" side panel, then select the target from the
list.
You may also want to enable debugging by creating a `.vscode/settings.json` file:
```
{
"rust-analyzer.runnables.extraEnv": {
"CARGO_PROFILE_DEV_DEBUG": true
}
}
```
since this repo has `debug = 0` in the root `Cargo.toml` to speed up compilation.
2. Select the configuration from the "run and debug" side panel, then select the target from the list.
Since this repo has `debug = 0` in the root `Cargo.toml` to speed up compilation, you need replace it with `debug = true` in the root `Cargo.toml` when using a debugger and breakpoints with `launch.json` settings.
3. Now you can enable breakpoints on code through IDE then start debugging the library/binary you
want, like in the following example:

Expand Down
2 changes: 1 addition & 1 deletion crates/burn-core/src/nn/loss/huber.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ impl HuberLossConfig {
/// The loss for each element of the residuals `r = targets - predictions` is given by
///
/// ```text
/// L(r) = 0.5 * x^2 if |r| <= d
/// L(r) = 0.5 * r^2 if |r| <= d
/// L(r) = 0.5 * d^2 + d * (|r| - d) if |r| > d
/// ```
///
Expand Down
2 changes: 1 addition & 1 deletion crates/burn-dataset/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ fake = ["dep:fake"]
sqlite = ["__sqlite-shared", "dep:rusqlite"]
sqlite-bundled = ["__sqlite-shared", "rusqlite/bundled"]

vision = ["dep:flate2", "dep:globwalk", "dep:burn-common"]
vision = ["dep:flate2", "dep:globwalk", "dep:burn-common", "dep:image"]

# internal
__sqlite-shared = [
Expand Down
10 changes: 5 additions & 5 deletions crates/burn-import/onnx-tests/tests/test_onnx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -964,7 +964,6 @@ mod tests {
}

#[test]
#[ignore = "https://github.com/tracel-ai/burn/issues/2080"]
fn resize_with_scales_1d_linear() {
// Initialize the model without weights (because the exported file does not contain them)
let device = Default::default();
Expand All @@ -980,10 +979,11 @@ mod tests {
// The scales are 1.5
let output = model.forward(input);

let output_sum = output.sum().into_scalar();
let expected_sum = -4.568_224; // from pytorch

assert!(expected_sum.approx_eq(output_sum, (1.0e-4, 2)));
Tensor::<Backend, 3>::from([[[
1.5410, 0.3945, -0.7648, -1.9431, -0.8052, 0.3618, -0.6713, -1.2023, -1.3986,
]]])
.to_data()
.assert_approx_eq(&output.into_data(), 3);
}

#[test]
Expand Down
2 changes: 2 additions & 0 deletions crates/burn-jit/src/kernel/interpolate/bicubic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ impl<E: JitElement> InterpolateBicubicShader<E> {

cpa!(scope, input_height = input_shape_2 - 1u32);
cpa!(scope, output_height = output_shape_2 - 1u32);
cpa!(scope, output_height = max(output_height, 1u32));
cpa!(scope, numerator = h * input_height);
cpa!(scope, numerator_float = cast(numerator));
cpa!(scope, output_height_float = cast(output_height));
Expand Down Expand Up @@ -129,6 +130,7 @@ impl<E: JitElement> InterpolateBicubicShader<E> {

cpa!(scope, input_width = input_shape_3 - 1u32);
cpa!(scope, output_width = output_shape_3 - 1u32);
cpa!(scope, output_width = max(output_width, 1u32));
cpa!(scope, numerator = w * input_width);
cpa!(scope, numerator_float = cast(numerator));
cpa!(scope, output_width_float = cast(output_width));
Expand Down
2 changes: 2 additions & 0 deletions crates/burn-jit/src/kernel/interpolate/bilinear.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ impl InterpolateBilinearShader {

cpa!(scope, numerator_int = input_shape_2 - 1u32);
cpa!(scope, denominator_int = output_shape_2 - 1u32);
cpa!(scope, denominator_int = max(denominator_int, 1u32));
cpa!(scope, factor_float = cast(h));
cpa!(scope, numerator_float = cast(numerator_int));
cpa!(scope, denominator_float = cast(denominator_int));
Expand All @@ -115,6 +116,7 @@ impl InterpolateBilinearShader {

cpa!(scope, numerator_int = input_shape_3 - 1u32);
cpa!(scope, denominator_int = output_shape_3 - 1u32);
cpa!(scope, denominator_int = max(denominator_int, 1u32));
cpa!(scope, factor_float = cast(w));
cpa!(scope, numerator_float = cast(numerator_int));
cpa!(scope, denominator_float = cast(denominator_int));
Expand Down
8 changes: 4 additions & 4 deletions crates/burn-ndarray/src/ops/interpolate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ pub(crate) fn bilinear_interpolate<E: FloatNdArrayElement>(
let (batch_size, channels, in_height, in_width) = x.dim();
let [out_height, out_width] = output_size;

let y_ratio = ((in_height - 1) as f64) / ((out_height - 1) as f64);
let x_ratio = ((in_width - 1) as f64) / ((out_width - 1) as f64);
let y_ratio = ((in_height - 1) as f64) / (core::cmp::max(out_height - 1, 1) as f64);
let x_ratio = ((in_width - 1) as f64) / (core::cmp::max(out_width - 1, 1) as f64);

let out_element_num = batch_size * channels * out_height * out_width;
let strides = (
Expand Down Expand Up @@ -174,8 +174,8 @@ pub(crate) fn bicubic_interpolate<E: FloatNdArrayElement>(
let (batch_size, channels, in_height, in_width) = x.dim();
let [out_height, out_width] = output_size;

let y_ratio = ((in_height - 1) as f64) / ((out_height - 1) as f64);
let x_ratio = ((in_width - 1) as f64) / ((out_width - 1) as f64);
let y_ratio = ((in_height - 1) as f64) / (core::cmp::max(out_height - 1, 1) as f64);
let x_ratio = ((in_width - 1) as f64) / (core::cmp::max(out_width - 1, 1) as f64);

let out_element_num = batch_size * channels * out_height * out_width;
let strides = (
Expand Down
1 change: 0 additions & 1 deletion crates/burn-tensor/src/tests/module/bicubic_interpolate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ mod tests {
}

#[test]
#[ignore = "https://github.com/tracel-ai/burn/issues/2080"]
fn test_1d_bicubic() {
// Initialize the model without weights (because the exported file does not contain them)
let device = Default::default();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ mod tests {
}

#[test]
#[ignore = "https://github.com/tracel-ai/burn/issues/2080"]
fn test_1d_bilinear() {
// Initialize the model without weights (because the exported file does not contain them)
let device = Default::default();
Expand Down
2 changes: 1 addition & 1 deletion crates/burn-train/src/renderer/tui/progress.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ impl ProgressBarView {

let iteration = Gauge::default()
.gauge_style(Style::default().fg(Color::Yellow))
.ratio(self.progress);
.ratio(self.progress.min(1.0));
let eta = Paragraph::new(Line::from(vec![
Span::from(" ("),
Span::from(self.eta).italic(),
Expand Down

0 comments on commit 7167674

Please sign in to comment.