From bbbe0b730271e52c0ae8f6ac1942de5464c3be3c Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Wed, 13 Nov 2024 08:00:21 +0000 Subject: [PATCH] Prefer `Tensor::from` for creating vectors from array literals --- rten-tensor/src/tensor.rs | 14 +++++++------- src/graph.rs | 16 ++++++++-------- src/ops/binary_elementwise.rs | 4 ++-- src/ops/conv.rs | 6 +++--- src/ops/layout.rs | 8 ++++---- src/ops/reduce.rs | 2 +- src/ops/unary_elementwise.rs | 6 ++---- 7 files changed, 27 insertions(+), 29 deletions(-) diff --git a/rten-tensor/src/tensor.rs b/rten-tensor/src/tensor.rs index e7d10dcb..0b1bb7af 100644 --- a/rten-tensor/src/tensor.rs +++ b/rten-tensor/src/tensor.rs @@ -2921,7 +2921,7 @@ mod tests { #[test] fn test_into_data() { - let tensor = NdTensor::from_data([2], vec![2., 3.]); + let tensor = NdTensor::from([2., 3.]); assert_eq!(tensor.into_data(), vec![2., 3.]); let mut tensor = NdTensor::from_data([2, 2], vec![1., 2., 3., 4.]); @@ -3043,18 +3043,18 @@ mod tests { #[test] fn test_item() { - let tensor = NdTensor::from_data([], vec![5.]); + let tensor = NdTensor::from(5.); assert_eq!(tensor.item(), Some(&5.)); - let tensor = NdTensor::from_data([1], vec![6.]); + let tensor = NdTensor::from([6.]); assert_eq!(tensor.item(), Some(&6.)); - let tensor = NdTensor::from_data([2], vec![2., 3.]); + let tensor = NdTensor::from([2., 3.]); assert_eq!(tensor.item(), None); - let tensor = Tensor::from_data(&[], vec![5.]); + let tensor = Tensor::from(5.); assert_eq!(tensor.item(), Some(&5.)); - let tensor = Tensor::from_data(&[1], vec![6.]); + let tensor = Tensor::from([6.]); assert_eq!(tensor.item(), Some(&6.)); - let tensor = Tensor::from_data(&[2], vec![2., 3.]); + let tensor = Tensor::from([2., 3.]); assert_eq!(tensor.item(), None); } diff --git a/src/graph.rs b/src/graph.rs index 2ecd2a1f..40f3ce6b 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -1715,7 +1715,7 @@ mod tests { fn test_graph_node_debug_names() { let mut g = Graph::new(); - let weights = Tensor::from_data(&[1], vec![0.3230]); + let weights = Tensor::from([0.3230]); let weights_id = g.add_constant(Some("weights"), weights.clone()); let input_id = g.add_value(Some("input"), None); let relu_out_id = g.add_value(Some("relu_out"), None); @@ -1820,18 +1820,18 @@ mod tests { // op_d is the same as op_c, but input order is reversed let (_, op_d_out) = g.add_simple_op("op_d", Concat { axis: 0 }, &[op_b_out, op_a_out]); - let input = Tensor::from_data(&[1], vec![1.]); + let input = Tensor::from([1.]); let results = g .run(vec![(input_id, input.view().into())], &[op_c_out], None) .unwrap(); - let expected = Tensor::from_data(&[2], vec![2., 3.]); + let expected = Tensor::from([2., 3.]); expect_equal(&results[0].as_tensor_view().unwrap(), &expected.view())?; let results = g .run(vec![(input_id, input.into())], &[op_d_out], None) .unwrap(); - let expected = Tensor::from_data(&[2], vec![3., 2.]); + let expected = Tensor::from([3., 2.]); expect_equal(&results[0].as_tensor_view().unwrap(), &expected.view())?; Ok(()) @@ -1865,7 +1865,7 @@ mod tests { fn test_graph_many_steps() -> Result<(), Box> { let mut g = Graph::new(); - let input = Tensor::from_data(&[5], vec![1., 2., 3., 4., 5.]); + let input = Tensor::from([1., 2., 3., 4., 5.]); let input_id = g.add_value(Some("input"), None); let mut prev_output = input_id; @@ -1884,7 +1884,7 @@ mod tests { .run(vec![(input_id, input.into())], &[prev_output], None) .unwrap(); - let expected = Tensor::from_data(&[5], vec![101., 102., 103., 104., 105.]); + let expected = Tensor::from([101., 102., 103., 104., 105.]); expect_equal(&results[0].as_tensor_view().unwrap(), &expected.view())?; Ok(()) @@ -1894,7 +1894,7 @@ mod tests { fn test_noop_graph() -> Result<(), Box> { let mut g = Graph::new(); - let input = Tensor::from_data(&[5], vec![1., 2., 3., 4., 5.]); + let input = Tensor::from([1., 2., 3., 4., 5.]); let input_id = g.add_value(Some("input"), None); let results = g @@ -1910,7 +1910,7 @@ mod tests { fn test_constant_graph() -> Result<(), Box> { let mut g = Graph::new(); - let value = Tensor::from_data(&[5], vec![1., 2., 3., 4., 5.]); + let value = Tensor::from([1., 2., 3., 4., 5.]); let const_id = g.add_constant(Some("weight"), value.clone()); let results = g.run(vec![], &[const_id], None).unwrap(); diff --git a/src/ops/binary_elementwise.rs b/src/ops/binary_elementwise.rs index ef18e013..4d1c2447 100644 --- a/src/ops/binary_elementwise.rs +++ b/src/ops/binary_elementwise.rs @@ -1085,7 +1085,7 @@ mod tests { // Simple case where comparing ordering of tensor shapes tells us // target shape. let a = Tensor::from_data(&[2, 2], vec![1., 2., 3., 4.]); - let b = Tensor::from_data(&[1], vec![10.]); + let b = Tensor::from([10.]); let expected = Tensor::from_data(&[2, 2], vec![11., 12., 13., 14.]); let result = add(&pool, a.view(), b.view()).unwrap(); expect_equal(&result, &expected)?; @@ -1096,7 +1096,7 @@ mod tests { // Case where the length of tensor shapes needs to be compared before // the ordering, since ([5] > [1,5]). - let a = Tensor::from_data(&[5], vec![1., 2., 3., 4., 5.]); + let a = Tensor::from([1., 2., 3., 4., 5.]); let b = Tensor::from_data(&[1, 5], vec![1., 2., 3., 4., 5.]); let expected = Tensor::from_data(&[1, 5], vec![2., 4., 6., 8., 10.]); diff --git a/src/ops/conv.rs b/src/ops/conv.rs index ca4e084c..5cb243c5 100644 --- a/src/ops/conv.rs +++ b/src/ops/conv.rs @@ -799,7 +799,7 @@ mod tests { expect_eq_1e4(&result, &expected_with_no_padding)?; let expected_with_bias = Tensor::from_data(&[1, 1, 1, 1], vec![3.6358]); - let bias = Tensor::from_data(&[1], vec![1.0]); + let bias = Tensor::from([1.0]); let result = check_conv( input.view(), kernel.view(), @@ -981,7 +981,7 @@ mod tests { 0.4273, 0.4180, 0.4338, ], ); - let bias = Tensor::from_data(&[3], vec![0.1, 0.2, 0.3]); + let bias = Tensor::from([0.1, 0.2, 0.3]); let expected = Tensor::from_data( &[1, 3, 1, 1], vec![ @@ -1341,7 +1341,7 @@ mod tests { for eb in expected_with_bias.iter_mut() { *eb += 1.234; } - let bias = Tensor::from_data(&[1], vec![1.234]); + let bias = Tensor::from([1.234]); let result = conv_transpose( &pool, input.view(), diff --git a/src/ops/layout.rs b/src/ops/layout.rs index 29303a73..85cffea6 100644 --- a/src/ops/layout.rs +++ b/src/ops/layout.rs @@ -778,7 +778,7 @@ mod tests { expect_equal(&result, &expected)?; // Case where copied input dim is also zero. - let input = Tensor::::from_data(&[0], vec![]); + let input = Tensor::from([0.; 0]); let shape = NdTensor::from([0]); let expected = input.to_shape([0].as_slice()); let result = reshape( @@ -791,7 +791,7 @@ mod tests { expect_equal(&result, &expected)?; // Case where there is no corresponding input dim. - let input = Tensor::from_data(&[1], vec![5.]); + let input = Tensor::from([5.]); let shape = NdTensor::from([1, 0]); let result = reshape( &pool, @@ -859,7 +859,7 @@ mod tests { assert_eq!(result.err(), expected_err); // Case when allow_zero is true - let input = Tensor::from_data(&[1], vec![1]); + let input = Tensor::from([1]); let shape = NdTensor::from([0, -1]); let result = reshape( &pool, @@ -890,7 +890,7 @@ mod tests { fn test_reshape_op() -> Result<(), Box> { let pool = new_pool(); let input = Tensor::from_data(&[2, 2], vec![-0.5, 0.5, 3.0, -5.5]); - let shape = Tensor::from_data(&[1], vec![4]); + let shape = Tensor::from([4]); let expected = input.to_shape([4].as_slice()); let op = Reshape { allow_zero: false }; diff --git a/src/ops/reduce.rs b/src/ops/reduce.rs index 34d0d1fa..b8a5130e 100644 --- a/src/ops/reduce.rs +++ b/src/ops/reduce.rs @@ -48,7 +48,7 @@ fn select_max_index std::cmp::Ordering>( if !keep_dims { let axes = &[resolved_axis as i32]; - let axes = NdTensorView::from_data([1], axes); + let axes = NdTensorView::from(axes); squeeze_in_place(&mut reduced, Some(axes)).expect("Invalid axis"); } diff --git a/src/ops/unary_elementwise.rs b/src/ops/unary_elementwise.rs index 4b7bba9d..bb2835b6 100644 --- a/src/ops/unary_elementwise.rs +++ b/src/ops/unary_elementwise.rs @@ -1141,10 +1141,8 @@ mod tests { #[test] fn test_sigmoid() -> Result<(), Box> { let pool = new_pool(); - let input: Tensor = Tensor::from_data( - &[9], - vec![-500.0, -3.0, -1.0, -0.5, 0.0, 0.5, 1.0, 3.0, 500.0], - ); + let input: Tensor = + Tensor::from([-500.0, -3.0, -1.0, -0.5, 0.0, 0.5, 1.0, 3.0, 500.0]); let expected = input.map(|x| reference_sigmoid(*x)); let result = sigmoid(&pool, input.view());