From 2659a4e1c76d81e20c7f15811877fb5d7d9e1a69 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sat, 9 Mar 2024 19:44:59 -0800 Subject: [PATCH 01/12] Upgrade to TensorFlow 2.16.1 --- .github/workflows/requirements.txt | 2 +- RELEASING.md | 1 + examples/addition/model.pb | Bin 178 -> 178 bytes run-valgrind | 2 +- tensorflow-proto-codegen/src/main.rs | 62 +++++++++++++-------------- tensorflow-sys/build.rs | 8 ++-- test-all | 7 ++- 7 files changed, 43 insertions(+), 39 deletions(-) diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt index 39be941d37..aad4d3f1bc 100644 --- a/.github/workflows/requirements.txt +++ b/.github/workflows/requirements.txt @@ -1 +1 @@ -tensorflow == 2.13.0 +tensorflow == 2.16.1 diff --git a/RELEASING.md b/RELEASING.md index 3d06cd2247..da14e9b932 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -63,5 +63,6 @@ Note that any crate not mentioned here (e.g. tensorflow-proto-codegen, tensorflo 1. Run `source ~/tensorflow-${TENSORFLOW_VERSION?}/bin/activate` to activate the virtualenv 1. Run `pip install --upgrade pip` 1. Run `pip install --upgrade tensorflow==${TENSORFLOW_VERSION?}` + 1. Run `pip install tf_keras` (required for examples/mobilenetv3) 1. Run `python examples/addition/addition.py` 1. Run `deactivate` to exit the virtualenv diff --git a/examples/addition/model.pb b/examples/addition/model.pb index 882b3516cd2ab6b04d3694a8b86d0f9c97e87026..0fd289ea749b26062782920990368e3c27a518a7 100644 GIT binary patch delta 9 QcmdnQxQTJX2Bv4c01~DHoB#j- delta 9 QcmdnQxQTJX2BuTo01|8iegFUf diff --git a/run-valgrind b/run-valgrind index 1a330d7de5..89cf19268f 100755 --- a/run-valgrind +++ b/run-valgrind @@ -19,7 +19,7 @@ function run { echo } -tensorflow_version=2.13.0 +tensorflow_version=2.16.1 valgrind_log=valgrind.log truncate --size=0 "$valgrind_log" diff --git a/tensorflow-proto-codegen/src/main.rs b/tensorflow-proto-codegen/src/main.rs index cf84605f1f..9022cce8de 100644 --- a/tensorflow-proto-codegen/src/main.rs +++ b/tensorflow-proto-codegen/src/main.rs @@ -18,39 +18,39 @@ fn main() -> Result<(), Box> { ) .inputs( [ - "core/framework/allocation_description.proto", - "core/framework/attr_value.proto", - "core/framework/cost_graph.proto", - "core/framework/full_type.proto", - "core/framework/function.proto", - "core/framework/graph.proto", - "core/framework/graph_debug_info.proto", - "core/framework/node_def.proto", - "core/framework/op_def.proto", - "core/framework/resource_handle.proto", - "core/framework/step_stats.proto", - "core/framework/tensor.proto", - "core/framework/tensor_description.proto", - "core/framework/tensor_shape.proto", - "core/framework/types.proto", - "core/framework/variable.proto", - "core/framework/versions.proto", - "core/protobuf/cluster.proto", - "core/protobuf/config.proto", - "core/protobuf/debug.proto", - "core/protobuf/meta_graph.proto", - "core/protobuf/rewriter_config.proto", - "core/protobuf/saved_model.proto", - "core/protobuf/saved_object_graph.proto", - "core/protobuf/saver.proto", - "core/protobuf/struct.proto", - "core/protobuf/trackable_object_graph.proto", - "core/protobuf/verifier_config.proto", - "tsl/protobuf/coordination_config.proto", - "tsl/protobuf/rpc_options.proto", + "tensorflow/core/framework/allocation_description.proto", + "tensorflow/core/framework/attr_value.proto", + "tensorflow/core/framework/cost_graph.proto", + "tensorflow/core/framework/full_type.proto", + "tensorflow/core/framework/function.proto", + "tensorflow/core/framework/graph.proto", + "tensorflow/core/framework/graph_debug_info.proto", + "tensorflow/core/framework/node_def.proto", + "tensorflow/core/framework/op_def.proto", + "tensorflow/core/framework/resource_handle.proto", + "tensorflow/core/framework/step_stats.proto", + "tensorflow/core/framework/tensor.proto", + "tensorflow/core/framework/tensor_description.proto", + "tensorflow/core/framework/tensor_shape.proto", + "tensorflow/core/framework/types.proto", + "tensorflow/core/framework/variable.proto", + "tensorflow/core/framework/versions.proto", + "tensorflow/core/protobuf/cluster.proto", + "tensorflow/core/protobuf/config.proto", + "tensorflow/core/protobuf/debug.proto", + "tensorflow/core/protobuf/meta_graph.proto", + "tensorflow/core/protobuf/rewriter_config.proto", + "tensorflow/core/protobuf/saved_model.proto", + "tensorflow/core/protobuf/saved_object_graph.proto", + "tensorflow/core/protobuf/saver.proto", + "tensorflow/core/protobuf/struct.proto", + "tensorflow/core/protobuf/trackable_object_graph.proto", + "tensorflow/core/protobuf/verifier_config.proto", + "third_party/xla/third_party/tsl/tsl/protobuf/coordination_config.proto", + "third_party/xla/third_party/tsl/tsl/protobuf/rpc_options.proto", ] .iter() - .map(|p| format!("{}/tensorflow/{}", tensorflow_folder, p)) + .map(|p| format!("{}/{}", tensorflow_folder, p)) .collect::>(), ) .include(tensorflow_folder) diff --git a/tensorflow-sys/build.rs b/tensorflow-sys/build.rs index 1a4314f2da..ab7f16b6c9 100644 --- a/tensorflow-sys/build.rs +++ b/tensorflow-sys/build.rs @@ -24,8 +24,8 @@ const REPOSITORY: &str = "https://github.com/tensorflow/tensorflow.git"; const FRAMEWORK_TARGET: &str = "tensorflow:libtensorflow_framework"; const TARGET: &str = "tensorflow:libtensorflow"; // `VERSION` and `TAG` are separate because the tag is not always `'v' + VERSION`. -const VERSION: &str = "2.13.0"; -const TAG: &str = "v2.13.0"; +const VERSION: &str = "2.16.1"; +const TAG: &str = "v2.16.1"; const MIN_BAZEL: &str = "3.7.2"; macro_rules! get(($name:expr) => (ok!(env::var($name)))); @@ -198,11 +198,11 @@ fn install_prebuilt() { let windows = target_os() == "windows"; let ext = if windows { ".zip" } else { ".tar.gz" }; let binary_url = format!( - "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-{}-{}-{}-{}{}", + "https://storage.googleapis.com/tensorflow/versions/{}/libtensorflow-{}-{}-{}{}", + VERSION, proc_type, os, target_arch(), - VERSION, ext ); log_var!(binary_url); diff --git a/test-all b/test-all index 66824f16c9..eb0ca73d26 100755 --- a/test-all +++ b/test-all @@ -20,7 +20,7 @@ if [[ "${version_build_script}" != "${version_run_valgrind}" || \ echo " valgrind run script." echo " tensorflow-sys/build.rs: ${version_build_script}" echo " run-valgrind: ${version_run_valgrind}" - echo " ./github/workflows/requirements.txt: ${version_requirements}" + echo " .github/workflows/requirements.txt: ${version_requirements}" exit 1 fi @@ -47,7 +47,10 @@ if [[ "${version_tensorflow_sys_crate}" != "${version_tensorflow_sys_readme}" ]] exit 1 fi -run python3 examples/mobilenetv3/create_model.py +# Legacy Keras required for now because Keras 3 requires exporting models as +# Keras format, which the C API can't read: +# https://github.com/tensorflow/tensorflow/issues/70514 +TF_USE_LEGACY_KERAS=1 run python3 examples/mobilenetv3/create_model.py # TODO(#391): Re-enable: (cd test_resources/library && ./build-test-op) run cargo fmt --all -- --check run cargo test -vv -j 2 From 89fa9ff2a9320625ea5ac9c916641c80121f7969 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sat, 9 Mar 2024 20:26:48 -0800 Subject: [PATCH 02/12] Fix Clippy lints --- src/graph.rs | 17 ++++--- src/lib.rs | 63 +++++++++++++------------- src/session.rs | 2 +- tensorflow-op-codegen/src/bin/eager.rs | 2 +- tensorflow-sys/build.rs | 4 +- 5 files changed, 43 insertions(+), 45 deletions(-) diff --git a/src/graph.rs b/src/graph.rs index 2c20a2c9d0..c3943f905a 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -22,7 +22,6 @@ use std::fmt; use std::fmt::Display; use std::fmt::Formatter; use std::mem::MaybeUninit; -use std::os::raw::c_void as std_c_void; use std::ptr; use std::slice; use std::str::FromStr; @@ -1906,7 +1905,7 @@ impl<'a> OperationDescription<'a> { tf::TF_SetAttrString( self.inner, c_attr_name.as_ptr(), - c_value.as_ptr() as *const std_c_void, + c_value.as_ptr() as *const c_void, c_value.len() as size_t, ); } @@ -1928,7 +1927,7 @@ impl<'a> OperationDescription<'a> { tf::TF_SetAttrStringList( self.inner, c_attr_name.as_ptr(), - ptrs.as_ptr() as *const *const std_c_void, + ptrs.as_ptr(), lens.as_ptr(), ptrs.len() as c_int, ); @@ -2159,7 +2158,7 @@ impl<'a> OperationDescription<'a> { tf::TF_SetAttrTensorShapeProto( self.inner, c_attr_name.as_ptr(), - value.as_ptr() as *const std_c_void, + value.as_ptr() as *const c_void, value.len() as size_t, status.inner(), ); @@ -2185,7 +2184,7 @@ impl<'a> OperationDescription<'a> { tf::TF_SetAttrTensorShapeProtoList( self.inner, c_attr_name.as_ptr(), - ptrs.as_ptr() as *const *const std_c_void, + ptrs.as_ptr(), lens.as_ptr(), ptrs.len() as c_int, status.inner(), @@ -2238,7 +2237,7 @@ impl<'a> OperationDescription<'a> { tf::TF_SetAttrTensorList( self.inner, c_attr_name.as_ptr(), - ptrs.as_ptr() as *const *mut tf::TF_Tensor, + ptrs.as_ptr(), ptrs.len() as c_int, status.inner(), ); @@ -2261,7 +2260,7 @@ impl<'a> OperationDescription<'a> { tf::TF_SetAttrValueProto( self.inner, c_attr_name.as_ptr(), - value.as_ptr() as *const std_c_void, + value.as_ptr() as *const c_void, // Allow trivial_numeric_casts because usize is not // necessarily size_t. value.len() as size_t, @@ -2329,7 +2328,7 @@ impl Function { let status = Status::new(); unsafe { let inner = tf::TF_FunctionImportFunctionDef( - proto.as_ptr() as *const std_c_void, + proto.as_ptr() as *const c_void, proto.len(), status.inner, ); @@ -2349,7 +2348,7 @@ impl Function { tf::TF_FunctionSetAttrValueProto( self.inner, attr_name_cstr.as_ptr(), - proto.as_ptr() as *const std_c_void, + proto.as_ptr() as *const c_void, proto.len(), status.inner, ); diff --git a/src/lib.rs b/src/lib.rs index b3a64ef3ac..dbb1f233bf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -344,87 +344,86 @@ c_enum!("Error values that can be returned.", TF_Code, Code { //////////////////////// -c_enum!("Type of a single tensor element.", TF_DataType, DataType { +c_enum!( +TF_DataType, +/// Type of a single tensor element. +#[derive(Default)] +DataType { /// 32-bit floating point. - value Float = 1, + #[default] + Float = 1, /// 64-bit floating point. - value Double = 2, + Double = 2, /// 32-bit signed integer. - value Int32 = 3, + Int32 = 3, /// 8-bit unsigned integer. - value UInt8 = 4, + UInt8 = 4, /// 16-bit signed integer. - value Int16 = 5, + Int16 = 5, /// 8-bit signed integer. - value Int8 = 6, + Int8 = 6, /// String. - value String = 7, + String = 7, /// Complex number composed of two 32-bit floats. - value Complex64 = 8, + Complex64 = 8, /// 64-bit signed integer. - value Int64 = 9, + Int64 = 9, /// Boolean. - value Bool = 10, + Bool = 10, /// Quantized 8-bit signed integer. - value QInt8 = 11, + QInt8 = 11, /// Quantized 8-bit unsigned integer. - value QUInt8 = 12, + QUInt8 = 12, /// Quantized 32-bit signed integer. - value QInt32 = 13, + QInt32 = 13, /// Float32 truncated to 16 bits. Only for cast ops. /// Note that this is not the same as Half. BFloat16 is not an IEEE-754 /// 16-bit float. See /// /// for details. - value BFloat16 = 14, + BFloat16 = 14, /// Quantized 16-bit signed integer. - value QInt16 = 15, + QInt16 = 15, /// Quantized 16-bit unsigned integer. - value QUInt16 = 16, + QUInt16 = 16, /// 16-bit unsigned integer. - value UInt16 = 17, + UInt16 = 17, /// Complex number composed of two 64-bit floats. - value Complex128 = 18, + Complex128 = 18, /// 16-bit floating point. - value Half = 19, + Half = 19, /// TensorFlow Resource (name, container, device,...) - value Resource = 20, + Resource = 20, /// A dynamic type similar to std::any::Any. - value Variant = 21, + Variant = 21, /// 32-bit unsigned integer. - value UInt32 = 22, + UInt32 = 22, /// 64-bit unsigned integer. - value UInt64 = 23, + UInt64 = 23, }); -impl Default for DataType { - fn default() -> DataType { - DataType::Float - } -} - impl DataType { // We don't use Into, because we don't want this to be public API. fn into_proto(self) -> protos::types::DataType { @@ -658,7 +657,7 @@ pub type Result = std::result::Result; //////////////////////// /// A common implementation of the sealed supertrait -/// +/// /// See https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed mod private { use crate::{BFloat16, QInt16, QInt32, QInt8, QUInt16, QUInt8}; @@ -1092,7 +1091,7 @@ where // Zero-initialize allocated memory. let data = tf::TF_TensorData(inner); let byte_size = tf::TF_TensorByteSize(inner); - libc::memset(data as *mut libc::c_void, 0, byte_size); + libc::memset(data, 0, byte_size); TensorDataCRepr { inner, diff --git a/src/session.rs b/src/session.rs index 3ac69a193a..ed842517fe 100644 --- a/src/session.rs +++ b/src/session.rs @@ -164,7 +164,7 @@ impl Session { self.inner, run_options_ptr, step.input_ports.as_ptr(), - input_tensors.as_ptr() as *const *mut tf::TF_Tensor, + input_tensors.as_ptr(), input_tensors.len() as c_int, step.output_ports.as_ptr(), step.output_tensors.as_mut_ptr(), diff --git a/tensorflow-op-codegen/src/bin/eager.rs b/tensorflow-op-codegen/src/bin/eager.rs index 89640357f0..9b639e8f98 100644 --- a/tensorflow-op-codegen/src/bin/eager.rs +++ b/tensorflow-op-codegen/src/bin/eager.rs @@ -437,7 +437,7 @@ fn define_op( write!(w, "Some(f32::INFINITY)")?; } else if f == &f32::NEG_INFINITY { write!(w, "Some(f32::NEG_INFINITY)")?; - } else if f == &f32::NAN { + } else if f.is_nan() { write!(w, "Some(f32::NAN)")?; } else { write!(w, "Some({}f32)", f)?; diff --git a/tensorflow-sys/build.rs b/tensorflow-sys/build.rs index ab7f16b6c9..d1bc1851a3 100644 --- a/tensorflow-sys/build.rs +++ b/tensorflow-sys/build.rs @@ -245,8 +245,8 @@ fn install_prebuilt() { let framework_library_file = format!("{}{}{}", dll_prefix(), FRAMEWORK_LIBRARY, dll_suffix()); let library_file = format!("{}{}{}", dll_prefix(), LIBRARY, dll_suffix()); - let framework_library_full_path = lib_dir.join(&framework_library_file); - let library_full_path = lib_dir.join(&library_file); + let framework_library_full_path = lib_dir.join(framework_library_file); + let library_full_path = lib_dir.join(library_file); let download_required = (!windows && !framework_library_full_path.exists()) || !library_full_path.exists(); From 27b7c80abdb90691b8c06e4f55c3cf026a6b4ec6 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sun, 7 Jul 2024 14:52:36 -0700 Subject: [PATCH 03/12] Update releasing instructions --- RELEASING.md | 21 ++++++++++----------- create-virtualenv | 25 +++++++++++++++++++++++++ run-valgrind | 2 -- test-all | 9 +++------ 4 files changed, 38 insertions(+), 19 deletions(-) create mode 100755 create-virtualenv diff --git a/RELEASING.md b/RELEASING.md index da14e9b932..fd0ae87923 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -36,7 +36,7 @@ Note that any crate not mentioned here (e.g. tensorflow-proto-codegen, tensorflo 1. Bump the version for `tensorflow-internal-macros` in the root `Cargo.toml` 1. Bump the version number in `Cargo.toml` 1. Bump the version number in `README.md` -1. Run `./test-all` +1. Inside a virtualenv, run `./test-all`. (See "Running in a virtualenv" section.) 1. Double-check that addition.py is built using the version of TensorFlow being linked against. (See "Upgrading TensorFlow" section.) 1. Run `./run-valgrind` 1. Commit and push the changes. (Push before publishing to ensure that the changes being published are up to date.) @@ -56,13 +56,12 @@ Note that any crate not mentioned here (e.g. tensorflow-proto-codegen, tensorflo ## Upgrading TensorFlow 1. Update version and tag in tensorflow-sys/build.rs -1. Update version in run-valgrind -1. Run `python examples/addition/addition.py` using the version of TensorFlow being linked against. - (Use pip in a virtualenv, see https://www.tensorflow.org/install/pip#2-create-a-virtual-environment-recommended) - 1. Run `virtualenv --system-site-packages -p python3 ~/tensorflow-${TENSORFLOW_VERSION?}` - 1. Run `source ~/tensorflow-${TENSORFLOW_VERSION?}/bin/activate` to activate the virtualenv - 1. Run `pip install --upgrade pip` - 1. Run `pip install --upgrade tensorflow==${TENSORFLOW_VERSION?}` - 1. Run `pip install tf_keras` (required for examples/mobilenetv3) - 1. Run `python examples/addition/addition.py` - 1. Run `deactivate` to exit the virtualenv +1. Update version in .github/workflow/requirements.txt +1. Inside a virtualenv using the version of TensorFlow being linked against, run `python examples/addition/addition.py`. (See "Running in a virtualenv" section.) + +## Running in a virtualenv + +1. If you haven't set it up, run `./create-virtualenv ${TENSORFLOW_VERSION?}` +1. Run `source ~/tensorflow-${TENSORFLOW_VERSION?}/bin/activate` to activate the virtualenv +1. Do whatever you need to do in the virtual env +1. Run `deactivate` to exit the virtualenv diff --git a/create-virtualenv b/create-virtualenv new file mode 100755 index 0000000000..17ced0bb15 --- /dev/null +++ b/create-virtualenv @@ -0,0 +1,25 @@ +#!/bin/bash + +# See https://www.tensorflow.org/install/pip#2-create-a-virtual-environment-recommended + +set -euo pipefail + +if [ "$#" -lt 1 ]; then + echo "Expected first argument to be TensorFlow version" + exit 1 +fi +TENSORFLOW_VERSION="$1" + +function run { + echo "----------------------------------------------------------------------" + echo "Running: $@" + "$@" + echo +} + +run virtualenv --system-site-packages -p python3 ~/tensorflow-${TENSORFLOW_VERSION?} +run source ~/tensorflow-${TENSORFLOW_VERSION?}/bin/activate +run pip install --upgrade pip +run pip install --upgrade tensorflow==${TENSORFLOW_VERSION?} +run pip install tf_keras # required for examples/mobilenetv3 +run deactivate diff --git a/run-valgrind b/run-valgrind index 89cf19268f..30cf6d3aea 100755 --- a/run-valgrind +++ b/run-valgrind @@ -19,8 +19,6 @@ function run { echo } -tensorflow_version=2.16.1 - valgrind_log=valgrind.log truncate --size=0 "$valgrind_log" diff --git a/test-all b/test-all index eb0ca73d26..ada9c9e29f 100755 --- a/test-all +++ b/test-all @@ -12,14 +12,11 @@ function run { # Make sure the Tensorflow version in the -sys build script matches the one in # the run-valgrind script. version_build_script=`grep "const VERSION" tensorflow-sys/build.rs | sed 's|.*"\([^"]*\)";|\1|g'` -version_run_valgrind=`grep "tensorflow_version=" run-valgrind | sed "s|.*=\(.*\)|\1|g"` version_requirements=`grep "tensorflow\s*=" .github/workflows/requirements.txt | sed "s|.*== \(.*\)|\1|g"` -if [[ "${version_build_script}" != "${version_run_valgrind}" || \ - "${version_build_script}" != "${version_requirements}" ]]; then - echo "ERROR: Tensorflow version specified in build script does not match the one in the" - echo " valgrind run script." +if [[ "${version_build_script}" != "${version_requirements}" ]]; then + echo "ERROR: TensorFlow version specified in build script does not match the one in the" + echo " GitHub requirements." echo " tensorflow-sys/build.rs: ${version_build_script}" - echo " run-valgrind: ${version_run_valgrind}" echo " .github/workflows/requirements.txt: ${version_requirements}" exit 1 fi From abb313116d8c326f1c10438103d7833733f39454 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sun, 21 Jul 2024 15:04:44 -0700 Subject: [PATCH 04/12] Upgrade to TensorFlow 2.17.0 --- .github/workflows/requirements.txt | 2 +- examples/addition/model.pb | Bin 178 -> 178 bytes tensorflow-sys/build.rs | 4 ++-- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt index aad4d3f1bc..0d17710148 100644 --- a/.github/workflows/requirements.txt +++ b/.github/workflows/requirements.txt @@ -1 +1 @@ -tensorflow == 2.16.1 +tensorflow == 2.17.0 diff --git a/examples/addition/model.pb b/examples/addition/model.pb index 0fd289ea749b26062782920990368e3c27a518a7..19aae3ea30b98e5d5aa5dc8864fdfcede2ac2e05 100644 GIT binary patch delta 9 QcmdnQxQTJX2Buql01}S_kpKVy delta 9 QcmdnQxQTJX2Bv4c01~DHoB#j- diff --git a/tensorflow-sys/build.rs b/tensorflow-sys/build.rs index d1bc1851a3..ad0b9a248a 100644 --- a/tensorflow-sys/build.rs +++ b/tensorflow-sys/build.rs @@ -24,8 +24,8 @@ const REPOSITORY: &str = "https://github.com/tensorflow/tensorflow.git"; const FRAMEWORK_TARGET: &str = "tensorflow:libtensorflow_framework"; const TARGET: &str = "tensorflow:libtensorflow"; // `VERSION` and `TAG` are separate because the tag is not always `'v' + VERSION`. -const VERSION: &str = "2.16.1"; -const TAG: &str = "v2.16.1"; +const VERSION: &str = "2.17.0"; +const TAG: &str = "v2.17.0"; const MIN_BAZEL: &str = "3.7.2"; macro_rules! get(($name:expr) => (ok!(env::var($name)))); From 7d6c5abff654bac1e0d357137098f4a60aae3bff Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sun, 21 Jul 2024 16:25:21 -0700 Subject: [PATCH 05/12] Update protos and ops for TensorFlow 2.17.0 --- src/eager/op/raw_ops.rs | 8349 +++++++++++- src/ops/ops_impl.rs | 13674 +++++++++++++++++++- src/protos/config.rs | 961 +- src/protos/coordination_config.rs | 72 +- src/protos/graph_debug_info.rs | 347 +- src/protos/meta_graph.rs | 175 +- src/protos/rewriter_config.rs | 90 +- src/protos/rpc_options.rs | 21 +- src/protos/struct_pb.rs | 149 +- src/protos/types.rs | 44 +- tensorflow-op-codegen/src/protos/types.rs | 44 +- tensorflow-proto-codegen/src/main.rs | 21 +- 12 files changed, 22557 insertions(+), 1390 deletions(-) diff --git a/src/eager/op/raw_ops.rs b/src/eager/op/raw_ops.rs index 5f08cd474c..6f1c107afb 100644 --- a/src/eager/op/raw_ops.rs +++ b/src/eager/op/raw_ops.rs @@ -8988,6 +8988,11 @@ pub struct BatchFunction { container: ::std::option::Option<::std::string::String>, shared_name: ::std::option::Option<::std::string::String>, batching_queue: ::std::option::Option<::std::string::String>, + low_priority_max_batch_size: ::std::option::Option, + low_priority_batch_timeout_micros: ::std::option::Option, + low_priority_allowed_batch_sizes: ::std::option::Option<::std::vec::Vec>, + low_priority_max_enqueued_batches: ::std::option::Option, + mixed_priority_policy: ::std::option::Option<::std::string::String>, Tout: ::std::option::Option<::std::vec::Vec>, enable_large_batch_splitting: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed @@ -9005,6 +9010,13 @@ impl ::std::default::Default for BatchFunction { container: None, shared_name: None, batching_queue: None, + low_priority_max_batch_size: Some(0i64), + low_priority_batch_timeout_micros: Some(0i64), + low_priority_allowed_batch_sizes: Some(vec![]), + low_priority_max_enqueued_batches: Some(0i64), + mixed_priority_policy: Some(::std::string::String::from( + "low_priority_padding_with_max_batch_size", + )), Tout: None, enable_large_batch_splitting: Some(false), target_device_name: None, @@ -9092,6 +9104,51 @@ impl BatchFunction { self } + /// Sets the `low_priority_max_batch_size` attribute. + pub fn low_priority_max_batch_size>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_max_batch_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `low_priority_batch_timeout_micros` attribute. + pub fn low_priority_batch_timeout_micros>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_batch_timeout_micros = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `low_priority_allowed_batch_sizes` attribute. + pub fn low_priority_allowed_batch_sizes>>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_allowed_batch_sizes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `low_priority_max_enqueued_batches` attribute. + pub fn low_priority_max_enqueued_batches>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_max_enqueued_batches = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `mixed_priority_policy` attribute. + pub fn mixed_priority_policy>( + mut self, + value: ArgType, + ) -> Self { + self.mixed_priority_policy = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `Tout` attribute. pub fn Tout>>( mut self, @@ -9161,6 +9218,21 @@ impl BatchFunction { if let ::std::option::Option::Some(value) = &self.batching_queue { op.set_attr_string("batching_queue", value)?; } + if let ::std::option::Option::Some(value) = &self.low_priority_max_batch_size { + op.set_attr_int("low_priority_max_batch_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_batch_timeout_micros { + op.set_attr_int("low_priority_batch_timeout_micros", *value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_allowed_batch_sizes { + op.set_attr_int_list("low_priority_allowed_batch_sizes", value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_max_enqueued_batches { + op.set_attr_int("low_priority_max_enqueued_batches", *value)?; + } + if let ::std::option::Option::Some(value) = &self.mixed_priority_policy { + op.set_attr_string("mixed_priority_policy", value)?; + } if let ::std::option::Option::Some(value) = &self.Tout { op.set_attr_type_list("Tout", value)?; } @@ -9396,6 +9468,8 @@ pub fn batch_ifft3_d<'a, T0: crate::eager::ToTensorHandle<'a>>( pub struct BatchMatMul { adj_x: ::std::option::Option, adj_y: ::std::option::Option, + grad_x: ::std::option::Option, + grad_y: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } @@ -9404,6 +9478,8 @@ impl ::std::default::Default for BatchMatMul { Self { adj_x: Some(false), adj_y: Some(false), + grad_x: Some(false), + grad_y: Some(false), target_device_name: None, } } @@ -9426,6 +9502,18 @@ impl BatchMatMul { self } + /// Sets the `grad_x` attribute. + pub fn grad_x>(mut self, value: ArgType) -> Self { + self.grad_x = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_y` attribute. + pub fn grad_y>(mut self, value: ArgType) -> Self { + self.grad_y = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -9456,6 +9544,12 @@ impl BatchMatMul { if let ::std::option::Option::Some(value) = &self.adj_y { op.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + op.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + op.set_attr_bool("grad_y", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -9490,6 +9584,8 @@ pub fn batch_mat_mul< pub struct BatchMatMulV2 { adj_x: ::std::option::Option, adj_y: ::std::option::Option, + grad_x: ::std::option::Option, + grad_y: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } @@ -9498,6 +9594,8 @@ impl ::std::default::Default for BatchMatMulV2 { Self { adj_x: Some(false), adj_y: Some(false), + grad_x: Some(false), + grad_y: Some(false), target_device_name: None, } } @@ -9520,6 +9618,18 @@ impl BatchMatMulV2 { self } + /// Sets the `grad_x` attribute. + pub fn grad_x>(mut self, value: ArgType) -> Self { + self.grad_x = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_y` attribute. + pub fn grad_y>(mut self, value: ArgType) -> Self { + self.grad_y = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -9550,6 +9660,12 @@ impl BatchMatMulV2 { if let ::std::option::Option::Some(value) = &self.adj_y { op.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + op.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + op.set_attr_bool("grad_y", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -9585,6 +9701,8 @@ pub struct BatchMatMulV3 { Tout: ::std::option::Option, adj_x: ::std::option::Option, adj_y: ::std::option::Option, + grad_x: ::std::option::Option, + grad_y: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } @@ -9594,6 +9712,8 @@ impl ::std::default::Default for BatchMatMulV3 { Tout: None, adj_x: Some(false), adj_y: Some(false), + grad_x: Some(false), + grad_y: Some(false), target_device_name: None, } } @@ -9622,6 +9742,18 @@ impl BatchMatMulV3 { self } + /// Sets the `grad_x` attribute. + pub fn grad_x>(mut self, value: ArgType) -> Self { + self.grad_x = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_y` attribute. + pub fn grad_y>(mut self, value: ArgType) -> Self { + self.grad_y = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -9655,6 +9787,12 @@ impl BatchMatMulV3 { if let ::std::option::Option::Some(value) = &self.adj_y { op.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + op.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + op.set_attr_bool("grad_y", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -18531,6 +18669,7 @@ pub fn collate_tpuembedding_memory<'a, T0: crate::eager::ToTensorHandle<'a>>( pub struct CollectiveAllToAllV2 { communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } @@ -18539,6 +18678,7 @@ impl ::std::default::Default for CollectiveAllToAllV2 { Self { communication_hint: Some(::std::string::String::from("auto")), timeout_seconds: Some(0f32), + is_stateless: Some(false), target_device_name: None, } } @@ -18564,6 +18704,12 @@ impl CollectiveAllToAllV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -18613,6 +18759,9 @@ impl CollectiveAllToAllV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { op.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + op.set_attr_bool("is_stateless", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -19476,6 +19625,7 @@ pub fn collective_gather<'a, T0: crate::eager::ToTensorHandle<'a>>( pub struct CollectiveGatherV2 { communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } @@ -19484,6 +19634,7 @@ impl ::std::default::Default for CollectiveGatherV2 { Self { communication_hint: Some(::std::string::String::from("auto")), timeout_seconds: Some(0f32), + is_stateless: Some(false), target_device_name: None, } } @@ -19509,6 +19660,12 @@ impl CollectiveGatherV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -19558,6 +19715,9 @@ impl CollectiveGatherV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { op.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + op.set_attr_bool("is_stateless", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -19964,6 +20124,7 @@ pub struct CollectiveReduceScatterV2 { final_op: ::std::option::Option<::std::string::String>, communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, max_subdivs_per_device: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, @@ -19975,6 +20136,7 @@ impl ::std::default::Default for CollectiveReduceScatterV2 { final_op: None, communication_hint: Some(::std::string::String::from("auto")), timeout_seconds: Some(0f32), + is_stateless: Some(false), max_subdivs_per_device: Some(-1i64), target_device_name: None, } @@ -20019,6 +20181,12 @@ impl CollectiveReduceScatterV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `max_subdivs_per_device` attribute. pub fn max_subdivs_per_device>( mut self, @@ -20083,6 +20251,9 @@ impl CollectiveReduceScatterV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { op.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + op.set_attr_bool("is_stateless", *value)?; + } if let ::std::option::Option::Some(value) = &self.max_subdivs_per_device { op.set_attr_int("max_subdivs_per_device", *value)?; } @@ -20135,6 +20306,7 @@ pub struct CollectiveReduceV2 { final_op: ::std::option::Option<::std::string::String>, communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, max_subdivs_per_device: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, @@ -20146,6 +20318,7 @@ impl ::std::default::Default for CollectiveReduceV2 { final_op: None, communication_hint: Some(::std::string::String::from("auto")), timeout_seconds: Some(0f32), + is_stateless: Some(false), max_subdivs_per_device: Some(-1i64), target_device_name: None, } @@ -20190,6 +20363,12 @@ impl CollectiveReduceV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `max_subdivs_per_device` attribute. pub fn max_subdivs_per_device>( mut self, @@ -20254,6 +20433,9 @@ impl CollectiveReduceV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { op.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + op.set_attr_bool("is_stateless", *value)?; + } if let ::std::option::Option::Some(value) = &self.max_subdivs_per_device { op.set_attr_int("max_subdivs_per_device", *value)?; } @@ -21091,6 +21273,200 @@ pub fn compute_batch_size<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input_dataset) } +/// ComputeDedupDataSize +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct ComputeDedupDataSize { + config: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for ComputeDedupDataSize { + fn default() -> Self { + Self { + config: None, + target_device_name: None, + } + } +} +impl ComputeDedupDataSize { + /// Creates a new `ComputeDedupDataSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute compute_dedup_data_size. + pub fn call<'a>( + &self, + ctx: &'a crate::eager::Context, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "ComputeDedupDataSize")?; + + // Required input arguments + + // Attributes + if let ::std::option::Option::Some(value) = &self.config { + op.set_attr_string("config", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `ComputeDedupDataSize::new().call(&ctx)`. +/// +/// See : +pub fn compute_dedup_data_size<'a>( + ctx: &'a crate::eager::Context, +) -> crate::Result> { + let op = ComputeDedupDataSize::new(); + op.call(ctx) +} + +/// ComputeDedupDataSizeV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct ComputeDedupDataSizeV2 { + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for ComputeDedupDataSizeV2 { + fn default() -> Self { + Self { + config: None, + embedding_partitions: None, + hbm_buffers_config: None, + tpu_topology: None, + target_device_name: None, + } + } +} +impl ComputeDedupDataSizeV2 { + /// Creates a new `ComputeDedupDataSizeV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute compute_dedup_data_size_v2. + pub fn call<'a>( + &self, + ctx: &'a crate::eager::Context, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "ComputeDedupDataSizeV2")?; + + // Required input arguments + + // Attributes + if let ::std::option::Option::Some(value) = &self.config { + op.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + op.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + op.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + op.set_attr_string("tpu_topology", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `ComputeDedupDataSizeV2::new().call(&ctx)`. +/// +/// See : +pub fn compute_dedup_data_size_v2<'a>( + ctx: &'a crate::eager::Context, +) -> crate::Result> { + let op = ComputeDedupDataSizeV2::new(); + op.call(ctx) +} + /// ComputeDedupDataTupleMask /// /// See : @@ -21167,6 +21543,124 @@ pub fn compute_dedup_data_tuple_mask<'a>( op.call(ctx) } +/// ComputeDedupDataTupleMaskV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct ComputeDedupDataTupleMaskV2 { + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for ComputeDedupDataTupleMaskV2 { + fn default() -> Self { + Self { + config: None, + embedding_partitions: None, + hbm_buffers_config: None, + tpu_topology: None, + target_device_name: None, + } + } +} +impl ComputeDedupDataTupleMaskV2 { + /// Creates a new `ComputeDedupDataTupleMaskV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute compute_dedup_data_tuple_mask_v2. + pub fn call<'a>( + &self, + ctx: &'a crate::eager::Context, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "ComputeDedupDataTupleMaskV2")?; + + // Required input arguments + + // Attributes + if let ::std::option::Option::Some(value) = &self.config { + op.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + op.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + op.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + op.set_attr_string("tpu_topology", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `ComputeDedupDataTupleMaskV2::new().call(&ctx)`. +/// +/// See : +pub fn compute_dedup_data_tuple_mask_v2<'a>( + ctx: &'a crate::eager::Context, +) -> crate::Result> { + let op = ComputeDedupDataTupleMaskV2::new(); + op.call(ctx) +} + /// Concat /// /// See : @@ -22491,35 +22985,37 @@ pub fn control_trigger<'a>(ctx: &'a crate::eager::Context) -> crate::Result<()> op.call(ctx) } -/// Conv2D +/// Conv /// -/// See : +/// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] -pub struct Conv2D { +pub struct Conv { strides: ::std::option::Option<::std::vec::Vec>, - use_cudnn_on_gpu: ::std::option::Option, padding: ::std::option::Option<::std::string::String>, explicit_paddings: ::std::option::Option<::std::vec::Vec>, data_format: ::std::option::Option<::std::string::String>, dilations: ::std::option::Option<::std::vec::Vec>, + batch_dims: ::std::option::Option, + groups: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } -impl ::std::default::Default for Conv2D { +impl ::std::default::Default for Conv { fn default() -> Self { Self { strides: None, - use_cudnn_on_gpu: Some(true), padding: None, explicit_paddings: Some(vec![]), - data_format: Some(::std::string::String::from("NHWC")), - dilations: Some(vec![1, 1, 1, 1]), + data_format: Some(::std::string::String::from("CHANNELS_LAST")), + dilations: Some(vec![]), + batch_dims: Some(1i64), + groups: Some(1i64), target_device_name: None, } } } -impl Conv2D { - /// Creates a new `Conv2D`. +impl Conv { + /// Creates a new `Conv`. pub fn new() -> Self { Self::default() } @@ -22533,12 +23029,6 @@ impl Conv2D { self } - /// Sets the `use_cudnn_on_gpu` attribute. - pub fn use_cudnn_on_gpu>(mut self, value: ArgType) -> Self { - self.use_cudnn_on_gpu = ::std::option::Option::Some(value.into()); - self - } - /// Sets the `padding` attribute. pub fn padding>( mut self, @@ -22575,152 +23065,15 @@ impl Conv2D { self } - /// Sets the `` attribute. - pub fn target_device_name>( - mut self, - value: ArgType, - ) -> Self { - self.target_device_name = ::std::option::Option::Some(value.into()); - self - } - - /// Execute conv2_d. - pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( - &self, - ctx: &'a crate::eager::Context, - input: &T0, - filter: &T1, - ) -> crate::Result> { - // Define Op - let mut op = super::Op::new(ctx, "Conv2D")?; - - // Required input arguments - op.add_input(&input.to_handle(ctx)?)?; - op.add_input(&filter.to_handle(ctx)?)?; - - // Attributes - if let ::std::option::Option::Some(value) = &self.strides { - op.set_attr_int_list("strides", value)?; - } - if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { - op.set_attr_bool("use_cudnn_on_gpu", *value)?; - } - if let ::std::option::Option::Some(value) = &self.padding { - op.set_attr_string("padding", value)?; - } - if let ::std::option::Option::Some(value) = &self.explicit_paddings { - op.set_attr_int_list("explicit_paddings", value)?; - } - if let ::std::option::Option::Some(value) = &self.data_format { - op.set_attr_string("data_format", value)?; - } - if let ::std::option::Option::Some(value) = &self.dilations { - op.set_attr_int_list("dilations", value)?; - } - - // Set the device name where this Op will be executed - if let ::std::option::Option::Some(value) = &self.target_device_name { - op.set_device(value)?; - } - // Execute Op - let [h] = op.execute::<1>(ctx)?; - Ok(h) - } -} - -/// Shorthand for `Conv2D::new().call(&ctx, &input, &filter)`. -/// -/// See : -pub fn conv2_d<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( - ctx: &'a crate::eager::Context, - input: &T0, - filter: &T1, -) -> crate::Result> { - let op = Conv2D::new(); - op.call(ctx, input, filter) -} - -/// Conv2DBackpropFilter -/// -/// See : -#[derive(::std::fmt::Debug, ::std::clone::Clone)] -pub struct Conv2DBackpropFilter { - strides: ::std::option::Option<::std::vec::Vec>, - use_cudnn_on_gpu: ::std::option::Option, - padding: ::std::option::Option<::std::string::String>, - explicit_paddings: ::std::option::Option<::std::vec::Vec>, - data_format: ::std::option::Option<::std::string::String>, - dilations: ::std::option::Option<::std::vec::Vec>, - /// (Rust wrapper specific) A device name where this op will be executed - target_device_name: ::std::option::Option<::std::string::String>, -} -impl ::std::default::Default for Conv2DBackpropFilter { - fn default() -> Self { - Self { - strides: None, - use_cudnn_on_gpu: Some(true), - padding: None, - explicit_paddings: Some(vec![]), - data_format: Some(::std::string::String::from("NHWC")), - dilations: Some(vec![1, 1, 1, 1]), - target_device_name: None, - } - } -} -impl Conv2DBackpropFilter { - /// Creates a new `Conv2DBackpropFilter`. - pub fn new() -> Self { - Self::default() - } - - /// Sets the `strides` attribute. - pub fn strides>>( - mut self, - value: ArgType, - ) -> Self { - self.strides = ::std::option::Option::Some(value.into()); - self - } - - /// Sets the `use_cudnn_on_gpu` attribute. - pub fn use_cudnn_on_gpu>(mut self, value: ArgType) -> Self { - self.use_cudnn_on_gpu = ::std::option::Option::Some(value.into()); - self - } - - /// Sets the `padding` attribute. - pub fn padding>( - mut self, - value: ArgType, - ) -> Self { - self.padding = ::std::option::Option::Some(value.into()); - self - } - - /// Sets the `explicit_paddings` attribute. - pub fn explicit_paddings>>( - mut self, - value: ArgType, - ) -> Self { - self.explicit_paddings = ::std::option::Option::Some(value.into()); - self - } - - /// Sets the `data_format` attribute. - pub fn data_format>( - mut self, - value: ArgType, - ) -> Self { - self.data_format = ::std::option::Option::Some(value.into()); + /// Sets the `batch_dims` attribute. + pub fn batch_dims>(mut self, value: ArgType) -> Self { + self.batch_dims = ::std::option::Option::Some(value.into()); self } - /// Sets the `dilations` attribute. - pub fn dilations>>( - mut self, - value: ArgType, - ) -> Self { - self.dilations = ::std::option::Option::Some(value.into()); + /// Sets the `groups` attribute. + pub fn groups>(mut self, value: ArgType) -> Self { + self.groups = ::std::option::Option::Some(value.into()); self } @@ -22733,34 +23086,24 @@ impl Conv2DBackpropFilter { self } - /// Execute conv2_dbackprop_filter. - pub fn call< - 'a, - T0: crate::eager::ToTensorHandle<'a>, - T1: crate::eager::ToTensorHandle<'a>, - T2: crate::eager::ToTensorHandle<'a>, - >( + /// Execute conv. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( &self, ctx: &'a crate::eager::Context, input: &T0, - filter_sizes: &T1, - out_backprop: &T2, + filter: &T1, ) -> crate::Result> { // Define Op - let mut op = super::Op::new(ctx, "Conv2DBackpropFilter")?; + let mut op = super::Op::new(ctx, "Conv")?; // Required input arguments op.add_input(&input.to_handle(ctx)?)?; - op.add_input(&filter_sizes.to_handle(ctx)?)?; - op.add_input(&out_backprop.to_handle(ctx)?)?; + op.add_input(&filter.to_handle(ctx)?)?; // Attributes if let ::std::option::Option::Some(value) = &self.strides { op.set_attr_int_list("strides", value)?; } - if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { - op.set_attr_bool("use_cudnn_on_gpu", *value)?; - } if let ::std::option::Option::Some(value) = &self.padding { op.set_attr_string("padding", value)?; } @@ -22773,6 +23116,12 @@ impl Conv2DBackpropFilter { if let ::std::option::Option::Some(value) = &self.dilations { op.set_attr_int_list("dilations", value)?; } + if let ::std::option::Option::Some(value) = &self.batch_dims { + op.set_attr_int("batch_dims", *value)?; + } + if let ::std::option::Option::Some(value) = &self.groups { + op.set_attr_int("groups", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -22784,29 +23133,23 @@ impl Conv2DBackpropFilter { } } -/// Shorthand for `Conv2DBackpropFilter::new().call(&ctx, &input, &filter_sizes, &out_backprop)`. +/// Shorthand for `Conv::new().call(&ctx, &input, &filter)`. /// -/// See : -pub fn conv2_dbackprop_filter< - 'a, - T0: crate::eager::ToTensorHandle<'a>, - T1: crate::eager::ToTensorHandle<'a>, - T2: crate::eager::ToTensorHandle<'a>, ->( +/// See : +pub fn conv<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( ctx: &'a crate::eager::Context, input: &T0, - filter_sizes: &T1, - out_backprop: &T2, + filter: &T1, ) -> crate::Result> { - let op = Conv2DBackpropFilter::new(); - op.call(ctx, input, filter_sizes, out_backprop) + let op = Conv::new(); + op.call(ctx, input, filter) } -/// Conv2DBackpropFilterV2 +/// Conv2D /// -/// See : +/// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] -pub struct Conv2DBackpropFilterV2 { +pub struct Conv2D { strides: ::std::option::Option<::std::vec::Vec>, use_cudnn_on_gpu: ::std::option::Option, padding: ::std::option::Option<::std::string::String>, @@ -22816,7 +23159,7 @@ pub struct Conv2DBackpropFilterV2 { /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } -impl ::std::default::Default for Conv2DBackpropFilterV2 { +impl ::std::default::Default for Conv2D { fn default() -> Self { Self { strides: None, @@ -22829,8 +23172,8 @@ impl ::std::default::Default for Conv2DBackpropFilterV2 { } } } -impl Conv2DBackpropFilterV2 { - /// Creates a new `Conv2DBackpropFilterV2`. +impl Conv2D { + /// Creates a new `Conv2D`. pub fn new() -> Self { Self::default() } @@ -22895,26 +23238,19 @@ impl Conv2DBackpropFilterV2 { self } - /// Execute conv2_dbackprop_filter_v2. - pub fn call< - 'a, - T0: crate::eager::ToTensorHandle<'a>, - T1: crate::eager::ToTensorHandle<'a>, - T2: crate::eager::ToTensorHandle<'a>, - >( + /// Execute conv2_d. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( &self, ctx: &'a crate::eager::Context, input: &T0, filter: &T1, - out_backprop: &T2, ) -> crate::Result> { // Define Op - let mut op = super::Op::new(ctx, "Conv2DBackpropFilterV2")?; + let mut op = super::Op::new(ctx, "Conv2D")?; // Required input arguments op.add_input(&input.to_handle(ctx)?)?; op.add_input(&filter.to_handle(ctx)?)?; - op.add_input(&out_backprop.to_handle(ctx)?)?; // Attributes if let ::std::option::Option::Some(value) = &self.strides { @@ -22946,29 +23282,23 @@ impl Conv2DBackpropFilterV2 { } } -/// Shorthand for `Conv2DBackpropFilterV2::new().call(&ctx, &input, &filter, &out_backprop)`. +/// Shorthand for `Conv2D::new().call(&ctx, &input, &filter)`. /// -/// See : -pub fn conv2_dbackprop_filter_v2< - 'a, - T0: crate::eager::ToTensorHandle<'a>, - T1: crate::eager::ToTensorHandle<'a>, - T2: crate::eager::ToTensorHandle<'a>, ->( +/// See : +pub fn conv2_d<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( ctx: &'a crate::eager::Context, input: &T0, filter: &T1, - out_backprop: &T2, ) -> crate::Result> { - let op = Conv2DBackpropFilterV2::new(); - op.call(ctx, input, filter, out_backprop) + let op = Conv2D::new(); + op.call(ctx, input, filter) } -/// Conv2DBackpropInput +/// Conv2DBackpropFilter /// -/// See : +/// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] -pub struct Conv2DBackpropInput { +pub struct Conv2DBackpropFilter { strides: ::std::option::Option<::std::vec::Vec>, use_cudnn_on_gpu: ::std::option::Option, padding: ::std::option::Option<::std::string::String>, @@ -22978,7 +23308,7 @@ pub struct Conv2DBackpropInput { /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } -impl ::std::default::Default for Conv2DBackpropInput { +impl ::std::default::Default for Conv2DBackpropFilter { fn default() -> Self { Self { strides: None, @@ -22991,8 +23321,8 @@ impl ::std::default::Default for Conv2DBackpropInput { } } } -impl Conv2DBackpropInput { - /// Creates a new `Conv2DBackpropInput`. +impl Conv2DBackpropFilter { + /// Creates a new `Conv2DBackpropFilter`. pub fn new() -> Self { Self::default() } @@ -23057,7 +23387,7 @@ impl Conv2DBackpropInput { self } - /// Execute conv2_dbackprop_input. + /// Execute conv2_dbackprop_filter. pub fn call< 'a, T0: crate::eager::ToTensorHandle<'a>, @@ -23066,16 +23396,16 @@ impl Conv2DBackpropInput { >( &self, ctx: &'a crate::eager::Context, - input_sizes: &T0, - filter: &T1, + input: &T0, + filter_sizes: &T1, out_backprop: &T2, ) -> crate::Result> { // Define Op - let mut op = super::Op::new(ctx, "Conv2DBackpropInput")?; + let mut op = super::Op::new(ctx, "Conv2DBackpropFilter")?; // Required input arguments - op.add_input(&input_sizes.to_handle(ctx)?)?; - op.add_input(&filter.to_handle(ctx)?)?; + op.add_input(&input.to_handle(ctx)?)?; + op.add_input(&filter_sizes.to_handle(ctx)?)?; op.add_input(&out_backprop.to_handle(ctx)?)?; // Attributes @@ -23108,29 +23438,29 @@ impl Conv2DBackpropInput { } } -/// Shorthand for `Conv2DBackpropInput::new().call(&ctx, &input_sizes, &filter, &out_backprop)`. +/// Shorthand for `Conv2DBackpropFilter::new().call(&ctx, &input, &filter_sizes, &out_backprop)`. /// -/// See : -pub fn conv2_dbackprop_input< +/// See : +pub fn conv2_dbackprop_filter< 'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>, T2: crate::eager::ToTensorHandle<'a>, >( ctx: &'a crate::eager::Context, - input_sizes: &T0, - filter: &T1, + input: &T0, + filter_sizes: &T1, out_backprop: &T2, ) -> crate::Result> { - let op = Conv2DBackpropInput::new(); - op.call(ctx, input_sizes, filter, out_backprop) + let op = Conv2DBackpropFilter::new(); + op.call(ctx, input, filter_sizes, out_backprop) } -/// Conv2DBackpropInputV2 +/// Conv2DBackpropFilterV2 /// -/// See : +/// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] -pub struct Conv2DBackpropInputV2 { +pub struct Conv2DBackpropFilterV2 { strides: ::std::option::Option<::std::vec::Vec>, use_cudnn_on_gpu: ::std::option::Option, padding: ::std::option::Option<::std::string::String>, @@ -23140,7 +23470,7 @@ pub struct Conv2DBackpropInputV2 { /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } -impl ::std::default::Default for Conv2DBackpropInputV2 { +impl ::std::default::Default for Conv2DBackpropFilterV2 { fn default() -> Self { Self { strides: None, @@ -23153,8 +23483,8 @@ impl ::std::default::Default for Conv2DBackpropInputV2 { } } } -impl Conv2DBackpropInputV2 { - /// Creates a new `Conv2DBackpropInputV2`. +impl Conv2DBackpropFilterV2 { + /// Creates a new `Conv2DBackpropFilterV2`. pub fn new() -> Self { Self::default() } @@ -23219,7 +23549,7 @@ impl Conv2DBackpropInputV2 { self } - /// Execute conv2_dbackprop_input_v2. + /// Execute conv2_dbackprop_filter_v2. pub fn call< 'a, T0: crate::eager::ToTensorHandle<'a>, @@ -23233,7 +23563,331 @@ impl Conv2DBackpropInputV2 { out_backprop: &T2, ) -> crate::Result> { // Define Op - let mut op = super::Op::new(ctx, "Conv2DBackpropInputV2")?; + let mut op = super::Op::new(ctx, "Conv2DBackpropFilterV2")?; + + // Required input arguments + op.add_input(&input.to_handle(ctx)?)?; + op.add_input(&filter.to_handle(ctx)?)?; + op.add_input(&out_backprop.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.strides { + op.set_attr_int_list("strides", value)?; + } + if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { + op.set_attr_bool("use_cudnn_on_gpu", *value)?; + } + if let ::std::option::Option::Some(value) = &self.padding { + op.set_attr_string("padding", value)?; + } + if let ::std::option::Option::Some(value) = &self.explicit_paddings { + op.set_attr_int_list("explicit_paddings", value)?; + } + if let ::std::option::Option::Some(value) = &self.data_format { + op.set_attr_string("data_format", value)?; + } + if let ::std::option::Option::Some(value) = &self.dilations { + op.set_attr_int_list("dilations", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `Conv2DBackpropFilterV2::new().call(&ctx, &input, &filter, &out_backprop)`. +/// +/// See : +pub fn conv2_dbackprop_filter_v2< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input: &T0, + filter: &T1, + out_backprop: &T2, +) -> crate::Result> { + let op = Conv2DBackpropFilterV2::new(); + op.call(ctx, input, filter, out_backprop) +} + +/// Conv2DBackpropInput +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct Conv2DBackpropInput { + strides: ::std::option::Option<::std::vec::Vec>, + use_cudnn_on_gpu: ::std::option::Option, + padding: ::std::option::Option<::std::string::String>, + explicit_paddings: ::std::option::Option<::std::vec::Vec>, + data_format: ::std::option::Option<::std::string::String>, + dilations: ::std::option::Option<::std::vec::Vec>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for Conv2DBackpropInput { + fn default() -> Self { + Self { + strides: None, + use_cudnn_on_gpu: Some(true), + padding: None, + explicit_paddings: Some(vec![]), + data_format: Some(::std::string::String::from("NHWC")), + dilations: Some(vec![1, 1, 1, 1]), + target_device_name: None, + } + } +} +impl Conv2DBackpropInput { + /// Creates a new `Conv2DBackpropInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `strides` attribute. + pub fn strides>>( + mut self, + value: ArgType, + ) -> Self { + self.strides = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `use_cudnn_on_gpu` attribute. + pub fn use_cudnn_on_gpu>(mut self, value: ArgType) -> Self { + self.use_cudnn_on_gpu = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `padding` attribute. + pub fn padding>( + mut self, + value: ArgType, + ) -> Self { + self.padding = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `explicit_paddings` attribute. + pub fn explicit_paddings>>( + mut self, + value: ArgType, + ) -> Self { + self.explicit_paddings = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `data_format` attribute. + pub fn data_format>( + mut self, + value: ArgType, + ) -> Self { + self.data_format = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `dilations` attribute. + pub fn dilations>>( + mut self, + value: ArgType, + ) -> Self { + self.dilations = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute conv2_dbackprop_input. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input_sizes: &T0, + filter: &T1, + out_backprop: &T2, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "Conv2DBackpropInput")?; + + // Required input arguments + op.add_input(&input_sizes.to_handle(ctx)?)?; + op.add_input(&filter.to_handle(ctx)?)?; + op.add_input(&out_backprop.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.strides { + op.set_attr_int_list("strides", value)?; + } + if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { + op.set_attr_bool("use_cudnn_on_gpu", *value)?; + } + if let ::std::option::Option::Some(value) = &self.padding { + op.set_attr_string("padding", value)?; + } + if let ::std::option::Option::Some(value) = &self.explicit_paddings { + op.set_attr_int_list("explicit_paddings", value)?; + } + if let ::std::option::Option::Some(value) = &self.data_format { + op.set_attr_string("data_format", value)?; + } + if let ::std::option::Option::Some(value) = &self.dilations { + op.set_attr_int_list("dilations", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `Conv2DBackpropInput::new().call(&ctx, &input_sizes, &filter, &out_backprop)`. +/// +/// See : +pub fn conv2_dbackprop_input< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input_sizes: &T0, + filter: &T1, + out_backprop: &T2, +) -> crate::Result> { + let op = Conv2DBackpropInput::new(); + op.call(ctx, input_sizes, filter, out_backprop) +} + +/// Conv2DBackpropInputV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct Conv2DBackpropInputV2 { + strides: ::std::option::Option<::std::vec::Vec>, + use_cudnn_on_gpu: ::std::option::Option, + padding: ::std::option::Option<::std::string::String>, + explicit_paddings: ::std::option::Option<::std::vec::Vec>, + data_format: ::std::option::Option<::std::string::String>, + dilations: ::std::option::Option<::std::vec::Vec>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for Conv2DBackpropInputV2 { + fn default() -> Self { + Self { + strides: None, + use_cudnn_on_gpu: Some(true), + padding: None, + explicit_paddings: Some(vec![]), + data_format: Some(::std::string::String::from("NHWC")), + dilations: Some(vec![1, 1, 1, 1]), + target_device_name: None, + } + } +} +impl Conv2DBackpropInputV2 { + /// Creates a new `Conv2DBackpropInputV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `strides` attribute. + pub fn strides>>( + mut self, + value: ArgType, + ) -> Self { + self.strides = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `use_cudnn_on_gpu` attribute. + pub fn use_cudnn_on_gpu>(mut self, value: ArgType) -> Self { + self.use_cudnn_on_gpu = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `padding` attribute. + pub fn padding>( + mut self, + value: ArgType, + ) -> Self { + self.padding = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `explicit_paddings` attribute. + pub fn explicit_paddings>>( + mut self, + value: ArgType, + ) -> Self { + self.explicit_paddings = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `data_format` attribute. + pub fn data_format>( + mut self, + value: ArgType, + ) -> Self { + self.data_format = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `dilations` attribute. + pub fn dilations>>( + mut self, + value: ArgType, + ) -> Self { + self.dilations = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute conv2_dbackprop_input_v2. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input: &T0, + filter: &T1, + out_backprop: &T2, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "Conv2DBackpropInputV2")?; // Required input arguments op.add_input(&input.to_handle(ctx)?)?; @@ -23932,6 +24586,512 @@ pub fn conv3_dbackprop_input_v2< op.call(ctx, input_sizes, filter, out_backprop) } +/// ConvertToCooTensor +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct ConvertToCooTensor { + sample_count: ::std::option::Option, + combiner: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for ConvertToCooTensor { + fn default() -> Self { + Self { + sample_count: None, + combiner: None, + target_device_name: None, + } + } +} +impl ConvertToCooTensor { + /// Creates a new `ConvertToCooTensor`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `combiner` attribute. + pub fn combiner>( + mut self, + value: ArgType, + ) -> Self { + self.combiner = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute convert_to_coo_tensor. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + indices_or_row_splits: &T0, + values: &T1, + weights: &T2, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "ConvertToCooTensor")?; + + // Required input arguments + op.add_input(&indices_or_row_splits.to_handle(ctx)?)?; + op.add_input(&values.to_handle(ctx)?)?; + op.add_input(&weights.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count { + op.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.combiner { + op.set_attr_string("combiner", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `ConvertToCooTensor::new().call(&ctx, &indices_or_row_splits, &values, &weights)`. +/// +/// See : +pub fn convert_to_coo_tensor< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + indices_or_row_splits: &T0, + values: &T1, + weights: &T2, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = ConvertToCooTensor::new(); + op.call(ctx, indices_or_row_splits, values, weights) +} + +/// ConvertToListOfSparseCoreCooTensors +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct ConvertToListOfSparseCoreCooTensors { + sample_count: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + row_offset: ::std::option::Option, + col_offset: ::std::option::Option, + col_shift: ::std::option::Option, + num_sc_shards: ::std::option::Option, + stacked_table_sample_count: ::std::option::Option, + combiner: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for ConvertToListOfSparseCoreCooTensors { + fn default() -> Self { + Self { + sample_count: None, + num_sc_per_chip: None, + row_offset: None, + col_offset: None, + col_shift: None, + num_sc_shards: None, + stacked_table_sample_count: None, + combiner: None, + target_device_name: None, + } + } +} +impl ConvertToListOfSparseCoreCooTensors { + /// Creates a new `ConvertToListOfSparseCoreCooTensors`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `row_offset` attribute. + pub fn row_offset>(mut self, value: ArgType) -> Self { + self.row_offset = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_offset` attribute. + pub fn col_offset>(mut self, value: ArgType) -> Self { + self.col_offset = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_shift` attribute. + pub fn col_shift>(mut self, value: ArgType) -> Self { + self.col_shift = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_shards` attribute. + pub fn num_sc_shards>(mut self, value: ArgType) -> Self { + self.num_sc_shards = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `stacked_table_sample_count` attribute. + pub fn stacked_table_sample_count>( + mut self, + value: ArgType, + ) -> Self { + self.stacked_table_sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `combiner` attribute. + pub fn combiner>( + mut self, + value: ArgType, + ) -> Self { + self.combiner = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute convert_to_list_of_sparse_core_coo_tensors. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + indices_or_row_splits: &T0, + values: &T1, + weights: &T2, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "ConvertToListOfSparseCoreCooTensors")?; + + // Required input arguments + op.add_input(&indices_or_row_splits.to_handle(ctx)?)?; + op.add_input(&values.to_handle(ctx)?)?; + op.add_input(&weights.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count { + op.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + op.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.row_offset { + op.set_attr_int("row_offset", *value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset { + op.set_attr_int("col_offset", *value)?; + } + if let ::std::option::Option::Some(value) = &self.col_shift { + op.set_attr_int("col_shift", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_shards { + op.set_attr_int("num_sc_shards", *value)?; + } + if let ::std::option::Option::Some(value) = &self.stacked_table_sample_count { + op.set_attr_int("stacked_table_sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.combiner { + op.set_attr_string("combiner", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `ConvertToListOfSparseCoreCooTensors::new().call(&ctx, &indices_or_row_splits, &values, &weights)`. +/// +/// See : +pub fn convert_to_list_of_sparse_core_coo_tensors< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + indices_or_row_splits: &T0, + values: &T1, + weights: &T2, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = ConvertToListOfSparseCoreCooTensors::new(); + op.call(ctx, indices_or_row_splits, values, weights) +} + +/// ConvertToSparseCoreCsrWrappedCooTensor +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct ConvertToSparseCoreCsrWrappedCooTensor { + sample_count_per_sc: ::std::option::Option, + num_replica: ::std::option::Option, + max_minibatches_per_sc: ::std::option::Option, + max_ids_per_chip_per_sample: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + allow_id_dropping: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for ConvertToSparseCoreCsrWrappedCooTensor { + fn default() -> Self { + Self { + sample_count_per_sc: None, + num_replica: None, + max_minibatches_per_sc: None, + max_ids_per_chip_per_sample: None, + table_vocab_size: None, + feature_width: None, + table_name: None, + allow_id_dropping: None, + target_device_name: None, + } + } +} +impl ConvertToSparseCoreCsrWrappedCooTensor { + /// Creates a new `ConvertToSparseCoreCsrWrappedCooTensor`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count_per_sc` attribute. + pub fn sample_count_per_sc>( + mut self, + value: ArgType, + ) -> Self { + self.sample_count_per_sc = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_minibatches_per_sc` attribute. + pub fn max_minibatches_per_sc>( + mut self, + value: ArgType, + ) -> Self { + self.max_minibatches_per_sc = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_chip_per_sample` attribute. + pub fn max_ids_per_chip_per_sample>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_chip_per_sample = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `allow_id_dropping` attribute. + pub fn allow_id_dropping>( + mut self, + value: ArgType, + ) -> Self { + self.allow_id_dropping = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute convert_to_sparse_core_csr_wrapped_coo_tensor. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + sorted_row_ids_list: &[&T0], + sorted_col_ids_list: &[&T1], + sorted_gains_list: &[&T2], + id_counts_list: &[&T3], + splits: &T4, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 7]> { + // Define Op + let mut op = super::Op::new(ctx, "ConvertToSparseCoreCsrWrappedCooTensor")?; + + // Required input arguments + let mut sorted_row_ids_list_list = Vec::new(); + + for t in sorted_row_ids_list { + sorted_row_ids_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&sorted_row_ids_list_list)?; + let mut sorted_col_ids_list_list = Vec::new(); + + for t in sorted_col_ids_list { + sorted_col_ids_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&sorted_col_ids_list_list)?; + let mut sorted_gains_list_list = Vec::new(); + + for t in sorted_gains_list { + sorted_gains_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&sorted_gains_list_list)?; + let mut id_counts_list_list = Vec::new(); + + for t in id_counts_list { + id_counts_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&id_counts_list_list)?; + op.add_input(&splits.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count_per_sc { + op.set_attr_int("sample_count_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + op.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_minibatches_per_sc { + op.set_attr_int("max_minibatches_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_chip_per_sample { + op.set_attr_int("max_ids_per_chip_per_sample", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + op.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.allow_id_dropping { + op.set_attr_bool("allow_id_dropping", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<7>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `ConvertToSparseCoreCsrWrappedCooTensor::new().call(&ctx, &sorted_row_ids_list, &sorted_col_ids_list, &sorted_gains_list, &id_counts_list, &splits)`. +/// +/// See : +pub fn convert_to_sparse_core_csr_wrapped_coo_tensor< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + sorted_row_ids_list: &[&T0], + sorted_col_ids_list: &[&T1], + sorted_gains_list: &[&T2], + id_counts_list: &[&T3], + splits: &T4, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 7]> { + let op = ConvertToSparseCoreCsrWrappedCooTensor::new(); + op.call( + ctx, + sorted_row_ids_list, + sorted_col_ids_list, + sorted_gains_list, + id_counts_list, + splits, + ) +} + /// Copy /// /// See : @@ -24123,14 +25283,14 @@ pub fn copy_host<'a, T0: crate::eager::ToTensorHandle<'a>>( /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct CopyToMesh { - layout: ::std::option::Option<::std::string::String>, + mesh: ::std::option::Option<::std::string::String>, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for CopyToMesh { fn default() -> Self { Self { - layout: None, + mesh: None, target_device_name: None, } } @@ -24141,12 +25301,12 @@ impl CopyToMesh { Self::default() } - /// Sets the `layout` attribute. - pub fn layout>( + /// Sets the `mesh` attribute. + pub fn mesh>( mut self, value: ArgType, ) -> Self { - self.layout = ::std::option::Option::Some(value.into()); + self.mesh = ::std::option::Option::Some(value.into()); self } @@ -24172,8 +25332,8 @@ impl CopyToMesh { op.add_input(&input.to_handle(ctx)?)?; // Attributes - if let ::std::option::Option::Some(value) = &self.layout { - op.set_attr_string("layout", value)?; + if let ::std::option::Option::Some(value) = &self.mesh { + op.set_attr_string("mesh", value)?; } // Set the device name where this Op will be executed @@ -24202,14 +25362,12 @@ pub fn copy_to_mesh<'a, T0: crate::eager::ToTensorHandle<'a>>( /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct CopyToMeshGrad { - reference_layout: ::std::option::Option<::std::string::String>, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for CopyToMeshGrad { fn default() -> Self { Self { - reference_layout: None, target_device_name: None, } } @@ -24220,15 +25378,6 @@ impl CopyToMeshGrad { Self::default() } - /// Sets the `reference_layout` attribute. - pub fn reference_layout>( - mut self, - value: ArgType, - ) -> Self { - self.reference_layout = ::std::option::Option::Some(value.into()); - self - } - /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -24253,9 +25402,6 @@ impl CopyToMeshGrad { op.add_input(&forward_input.to_handle(ctx)?)?; // Attributes - if let ::std::option::Option::Some(value) = &self.reference_layout { - op.set_attr_string("reference_layout", value)?; - } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -28867,6 +30013,71 @@ pub fn dataset_cardinality<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input_dataset) } +/// DatasetFingerprint +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct DatasetFingerprint { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for DatasetFingerprint { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl DatasetFingerprint { + /// Creates a new `DatasetFingerprint`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute dataset_fingerprint. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + input_dataset: &T0, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "DatasetFingerprint")?; + + // Required input arguments + op.add_input(&input_dataset.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `DatasetFingerprint::new().call(&ctx, &input_dataset)`. +/// +/// See : +pub fn dataset_fingerprint<'a, T0: crate::eager::ToTensorHandle<'a>>( + ctx: &'a crate::eager::Context, + input_dataset: &T0, +) -> crate::Result> { + let op = DatasetFingerprint::new(); + op.call(ctx, input_dataset) +} + /// DatasetFromGraph /// /// See : @@ -35338,6 +36549,178 @@ pub fn dynamic_enqueue_tpuembedding_arbitrary_tensor_batch< ) } +/// DynamicEnqueueTPUEmbeddingRaggedTensorBatch +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct DynamicEnqueueTPUEmbeddingRaggedTensorBatch { + combiners: ::std::option::Option<::std::vec::Vec<::std::string::String>>, + table_ids: ::std::option::Option<::std::vec::Vec>, + max_sequence_lengths: ::std::option::Option<::std::vec::Vec>, + num_features: ::std::option::Option<::std::vec::Vec>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for DynamicEnqueueTPUEmbeddingRaggedTensorBatch { + fn default() -> Self { + Self { + combiners: Some(vec![]), + table_ids: None, + max_sequence_lengths: Some(vec![]), + num_features: Some(vec![]), + target_device_name: None, + } + } +} +impl DynamicEnqueueTPUEmbeddingRaggedTensorBatch { + /// Creates a new `DynamicEnqueueTPUEmbeddingRaggedTensorBatch`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `combiners` attribute. + pub fn combiners>>( + mut self, + value: ArgType, + ) -> Self { + self.combiners = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_ids` attribute. + pub fn table_ids>>( + mut self, + value: ArgType, + ) -> Self { + self.table_ids = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_sequence_lengths` attribute. + pub fn max_sequence_lengths>>( + mut self, + value: ArgType, + ) -> Self { + self.max_sequence_lengths = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_features` attribute. + pub fn num_features>>( + mut self, + value: ArgType, + ) -> Self { + self.num_features = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute dynamic_enqueue_tpuembedding_ragged_tensor_batch. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + sample_splits: &[&T0], + embedding_indices: &[&T1], + aggregation_weights: &[&T2], + mode_override: &T3, + device_ordinal: &T4, + ) -> crate::Result<()> { + // Define Op + let mut op = super::Op::new(ctx, "DynamicEnqueueTPUEmbeddingRaggedTensorBatch")?; + + // Required input arguments + let mut sample_splits_list = Vec::new(); + + for t in sample_splits { + sample_splits_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&sample_splits_list)?; + let mut embedding_indices_list = Vec::new(); + + for t in embedding_indices { + embedding_indices_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&embedding_indices_list)?; + let mut aggregation_weights_list = Vec::new(); + + for t in aggregation_weights { + aggregation_weights_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&aggregation_weights_list)?; + op.add_input(&mode_override.to_handle(ctx)?)?; + op.add_input(&device_ordinal.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.combiners { + op.set_attr_string_list("combiners", value)?; + } + if let ::std::option::Option::Some(value) = &self.table_ids { + op.set_attr_int_list("table_ids", value)?; + } + if let ::std::option::Option::Some(value) = &self.max_sequence_lengths { + op.set_attr_int_list("max_sequence_lengths", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_features { + op.set_attr_int_list("num_features", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let _ = op.execute::<0>(ctx)?; + Ok(()) + } +} + +/// Shorthand for `DynamicEnqueueTPUEmbeddingRaggedTensorBatch::new().call(&ctx, &sample_splits, &embedding_indices, &aggregation_weights, &mode_override, &device_ordinal)`. +/// +/// See : +pub fn dynamic_enqueue_tpuembedding_ragged_tensor_batch< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + sample_splits: &[&T0], + embedding_indices: &[&T1], + aggregation_weights: &[&T2], + mode_override: &T3, + device_ordinal: &T4, +) -> crate::Result<()> { + let op = DynamicEnqueueTPUEmbeddingRaggedTensorBatch::new(); + op.call( + ctx, + sample_splits, + embedding_indices, + aggregation_weights, + mode_override, + device_ordinal, + ) +} + /// DynamicPartition /// /// See : @@ -43456,6 +44839,87 @@ pub fn fft3_d<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input) } +/// FFTND +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct FFTND { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for FFTND { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl FFTND { + /// Creates a new `FFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute fftnd. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "FFTND")?; + + // Required input arguments + op.add_input(&input.to_handle(ctx)?)?; + op.add_input(&fft_length.to_handle(ctx)?)?; + op.add_input(&axes.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `FFTND::new().call(&ctx, &input, &fft_length, &axes)`. +/// +/// See : +pub fn fftnd< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, +) -> crate::Result> { + let op = FFTND::new(); + op.call(ctx, input, fft_length, axes) +} + /// FIFOQueue /// /// See : @@ -45116,6 +46580,78 @@ pub fn finalize_tpuembedding< op.call(ctx, common_config, memory_config) } +/// FinalizeTPUEmbeddingV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct FinalizeTPUEmbeddingV2 { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for FinalizeTPUEmbeddingV2 { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl FinalizeTPUEmbeddingV2 { + /// Creates a new `FinalizeTPUEmbeddingV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute finalize_tpuembedding_v2. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + common_config: &T0, + memory_config: &T1, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new(ctx, "FinalizeTPUEmbeddingV2")?; + + // Required input arguments + op.add_input(&common_config.to_handle(ctx)?)?; + op.add_input(&memory_config.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `FinalizeTPUEmbeddingV2::new().call(&ctx, &common_config, &memory_config)`. +/// +/// See : +pub fn finalize_tpuembedding_v2< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + common_config: &T0, + memory_config: &T1, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = FinalizeTPUEmbeddingV2::new(); + op.call(ctx, common_config, memory_config) +} + /// Fingerprint /// /// See : @@ -48385,12 +49921,14 @@ pub fn gather<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTens /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct GatherNd { + bad_indices_policy: ::std::option::Option<::std::string::String>, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for GatherNd { fn default() -> Self { Self { + bad_indices_policy: None, target_device_name: None, } } @@ -48401,6 +49939,15 @@ impl GatherNd { Self::default() } + /// Sets the `bad_indices_policy` attribute. + pub fn bad_indices_policy>( + mut self, + value: ArgType, + ) -> Self { + self.bad_indices_policy = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -48425,6 +49972,9 @@ impl GatherNd { op.add_input(&indices.to_handle(ctx)?)?; // Attributes + if let ::std::option::Option::Some(value) = &self.bad_indices_policy { + op.set_attr_string("bad_indices_policy", value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -49036,6 +50586,382 @@ pub fn get_element_at_index< op.call(ctx, dataset, index) } +/// GetMinibatchSplitsWithPhysicalReplica +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct GetMinibatchSplitsWithPhysicalReplica { + sample_count: ::std::option::Option, + num_replica: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + mini_batch_splits: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for GetMinibatchSplitsWithPhysicalReplica { + fn default() -> Self { + Self { + sample_count: None, + num_replica: None, + table_vocab_size: None, + feature_width: None, + num_sc_per_chip: None, + table_name: None, + mini_batch_splits: None, + target_device_name: None, + } + } +} +impl GetMinibatchSplitsWithPhysicalReplica { + /// Creates a new `GetMinibatchSplitsWithPhysicalReplica`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `mini_batch_splits` attribute. + pub fn mini_batch_splits>( + mut self, + value: ArgType, + ) -> Self { + self.mini_batch_splits = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute get_minibatch_splits_with_physical_replica. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + program_key: &T0, + row_ids: &T1, + col_ids: &T2, + gains: &T3, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 7]> { + // Define Op + let mut op = super::Op::new(ctx, "GetMinibatchSplitsWithPhysicalReplica")?; + + // Required input arguments + op.add_input(&program_key.to_handle(ctx)?)?; + op.add_input(&row_ids.to_handle(ctx)?)?; + op.add_input(&col_ids.to_handle(ctx)?)?; + op.add_input(&gains.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count { + op.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + op.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + op.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + op.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.mini_batch_splits { + op.set_attr_string("mini_batch_splits", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<7>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `GetMinibatchSplitsWithPhysicalReplica::new().call(&ctx, &program_key, &row_ids, &col_ids, &gains)`. +/// +/// See : +pub fn get_minibatch_splits_with_physical_replica< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + program_key: &T0, + row_ids: &T1, + col_ids: &T2, + gains: &T3, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 7]> { + let op = GetMinibatchSplitsWithPhysicalReplica::new(); + op.call(ctx, program_key, row_ids, col_ids, gains) +} + +/// GetMinibatchesInCsrWithPhysicalReplica +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct GetMinibatchesInCsrWithPhysicalReplica { + sample_count: ::std::option::Option, + num_replica: ::std::option::Option, + max_minibatches_per_sc: ::std::option::Option, + max_ids_per_chip_per_sample: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + mini_batch_in_csr: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for GetMinibatchesInCsrWithPhysicalReplica { + fn default() -> Self { + Self { + sample_count: None, + num_replica: None, + max_minibatches_per_sc: None, + max_ids_per_chip_per_sample: None, + table_vocab_size: None, + feature_width: None, + num_sc_per_chip: None, + table_name: None, + mini_batch_in_csr: None, + target_device_name: None, + } + } +} +impl GetMinibatchesInCsrWithPhysicalReplica { + /// Creates a new `GetMinibatchesInCsrWithPhysicalReplica`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_minibatches_per_sc` attribute. + pub fn max_minibatches_per_sc>( + mut self, + value: ArgType, + ) -> Self { + self.max_minibatches_per_sc = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_chip_per_sample` attribute. + pub fn max_ids_per_chip_per_sample>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_chip_per_sample = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `mini_batch_in_csr` attribute. + pub fn mini_batch_in_csr>( + mut self, + value: ArgType, + ) -> Self { + self.mini_batch_in_csr = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute get_minibatches_in_csr_with_physical_replica. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + program_key: &T0, + row_ids: &T1, + col_ids: &T2, + gains: &T3, + splits: &T4, + id_counts: &T5, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 7]> { + // Define Op + let mut op = super::Op::new(ctx, "GetMinibatchesInCsrWithPhysicalReplica")?; + + // Required input arguments + op.add_input(&program_key.to_handle(ctx)?)?; + op.add_input(&row_ids.to_handle(ctx)?)?; + op.add_input(&col_ids.to_handle(ctx)?)?; + op.add_input(&gains.to_handle(ctx)?)?; + op.add_input(&splits.to_handle(ctx)?)?; + op.add_input(&id_counts.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count { + op.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + op.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_minibatches_per_sc { + op.set_attr_int("max_minibatches_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_chip_per_sample { + op.set_attr_int("max_ids_per_chip_per_sample", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + op.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + op.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.mini_batch_in_csr { + op.set_attr_string("mini_batch_in_csr", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<7>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `GetMinibatchesInCsrWithPhysicalReplica::new().call(&ctx, &program_key, &row_ids, &col_ids, &gains, &splits, &id_counts)`. +/// +/// See : +pub fn get_minibatches_in_csr_with_physical_replica< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + program_key: &T0, + row_ids: &T1, + col_ids: &T2, + gains: &T3, + splits: &T4, + id_counts: &T5, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 7]> { + let op = GetMinibatchesInCsrWithPhysicalReplica::new(); + op.call(ctx, program_key, row_ids, col_ids, gains, splits, id_counts) +} + /// GetOptions /// /// See : @@ -49307,6 +51233,457 @@ pub fn get_session_tensor<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, handle) } +/// GetStatsFromListOfSparseCoreCooTensors +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct GetStatsFromListOfSparseCoreCooTensors { + sample_count_list: ::std::option::Option<::std::vec::Vec>, + col_offset_list: ::std::option::Option<::std::vec::Vec>, + num_replica: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for GetStatsFromListOfSparseCoreCooTensors { + fn default() -> Self { + Self { + sample_count_list: None, + col_offset_list: None, + num_replica: None, + table_vocab_size: None, + feature_width: None, + num_sc_per_chip: None, + table_name: None, + target_device_name: None, + } + } +} +impl GetStatsFromListOfSparseCoreCooTensors { + /// Creates a new `GetStatsFromListOfSparseCoreCooTensors`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count_list` attribute. + pub fn sample_count_list>>( + mut self, + value: ArgType, + ) -> Self { + self.sample_count_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_offset_list` attribute. + pub fn col_offset_list>>( + mut self, + value: ArgType, + ) -> Self { + self.col_offset_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute get_stats_from_list_of_sparse_core_coo_tensors. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_ids_list: &[&T0], + col_ids_list: &[&T1], + gains_list: &[&T2], + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new(ctx, "GetStatsFromListOfSparseCoreCooTensors")?; + + // Required input arguments + let mut row_ids_list_list = Vec::new(); + + for t in row_ids_list { + row_ids_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&row_ids_list_list)?; + let mut col_ids_list_list = Vec::new(); + + for t in col_ids_list { + col_ids_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&col_ids_list_list)?; + let mut gains_list_list = Vec::new(); + + for t in gains_list { + gains_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&gains_list_list)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count_list { + op.set_attr_int_list("sample_count_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset_list { + op.set_attr_int_list("col_offset_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + op.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + op.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + op.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `GetStatsFromListOfSparseCoreCooTensors::new().call(&ctx, &row_ids_list, &col_ids_list, &gains_list)`. +/// +/// See : +pub fn get_stats_from_list_of_sparse_core_coo_tensors< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_ids_list: &[&T0], + col_ids_list: &[&T1], + gains_list: &[&T2], +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = GetStatsFromListOfSparseCoreCooTensors::new(); + op.call(ctx, row_ids_list, col_ids_list, gains_list) +} + +/// GetTpuTaskId +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct GetTpuTaskId { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for GetTpuTaskId { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl GetTpuTaskId { + /// Creates a new `GetTpuTaskId`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute get_tpu_task_id. + pub fn call<'a>( + &self, + ctx: &'a crate::eager::Context, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "GetTpuTaskId")?; + + // Required input arguments + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `GetTpuTaskId::new().call(&ctx)`. +/// +/// See : +pub fn get_tpu_task_id<'a>( + ctx: &'a crate::eager::Context, +) -> crate::Result> { + let op = GetTpuTaskId::new(); + op.call(ctx) +} + +/// GlobalIterId +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct GlobalIterId { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for GlobalIterId { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl GlobalIterId { + /// Creates a new `GlobalIterId`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute global_iter_id. + pub fn call<'a>( + &self, + ctx: &'a crate::eager::Context, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "GlobalIterId")?; + + // Required input arguments + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `GlobalIterId::new().call(&ctx)`. +/// +/// See : +pub fn global_iter_id<'a>( + ctx: &'a crate::eager::Context, +) -> crate::Result> { + let op = GlobalIterId::new(); + op.call(ctx) +} + +/// GlobalShuffleDataset +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct GlobalShuffleDataset { + reshuffle_each_iteration: ::std::option::Option, + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + metadata: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for GlobalShuffleDataset { + fn default() -> Self { + Self { + reshuffle_each_iteration: Some(true), + output_types: None, + output_shapes: None, + metadata: None, + target_device_name: None, + } + } +} +impl GlobalShuffleDataset { + /// Creates a new `GlobalShuffleDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `reshuffle_each_iteration` attribute. + pub fn reshuffle_each_iteration>( + mut self, + value: ArgType, + ) -> Self { + self.reshuffle_each_iteration = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute global_shuffle_dataset. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input_dataset: &T0, + seed: &T1, + seed2: &T2, + seed_generator: &T3, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "GlobalShuffleDataset")?; + + // Required input arguments + op.add_input(&input_dataset.to_handle(ctx)?)?; + op.add_input(&seed.to_handle(ctx)?)?; + op.add_input(&seed2.to_handle(ctx)?)?; + op.add_input(&seed_generator.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.reshuffle_each_iteration { + op.set_attr_bool("reshuffle_each_iteration", *value)?; + } + if let ::std::option::Option::Some(value) = &self.output_types { + op.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + op.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + op.set_attr_string("metadata", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `GlobalShuffleDataset::new().call(&ctx, &input_dataset, &seed, &seed2, &seed_generator)`. +/// +/// See : +pub fn global_shuffle_dataset< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input_dataset: &T0, + seed: &T1, + seed2: &T2, + seed_generator: &T3, +) -> crate::Result> { + let op = GlobalShuffleDataset::new(); + op.call(ctx, input_dataset, seed, seed2, seed_generator) +} + /// Greater /// /// See : @@ -50647,6 +53024,87 @@ pub fn ifft3_d<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input) } +/// IFFTND +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct IFFTND { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for IFFTND { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl IFFTND { + /// Creates a new `IFFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute ifftnd. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "IFFTND")?; + + // Required input arguments + op.add_input(&input.to_handle(ctx)?)?; + op.add_input(&fft_length.to_handle(ctx)?)?; + op.add_input(&axes.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `IFFTND::new().call(&ctx, &input, &fft_length, &axes)`. +/// +/// See : +pub fn ifftnd< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, +) -> crate::Result> { + let op = IFFTND::new(); + op.call(ctx, input, fft_length, axes) +} + /// IRFFT /// /// See : @@ -50884,6 +53342,98 @@ pub fn irfft3_d<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTe op.call(ctx, input, fft_length) } +/// IRFFTND +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct IRFFTND { + Treal: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for IRFFTND { + fn default() -> Self { + Self { + Treal: Some(crate::DataType::Float), + target_device_name: None, + } + } +} +impl IRFFTND { + /// Creates a new `IRFFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `Treal` attribute. + pub fn Treal>(mut self, value: ArgType) -> Self { + self.Treal = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute irfftnd. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "IRFFTND")?; + + // Required input arguments + op.add_input(&input.to_handle(ctx)?)?; + op.add_input(&fft_length.to_handle(ctx)?)?; + op.add_input(&axes.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.Treal { + op.set_attr_type("Treal", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `IRFFTND::new().call(&ctx, &input, &fft_length, &axes)`. +/// +/// See : +pub fn irfftnd< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, +) -> crate::Result> { + let op = IRFFTND::new(); + op.call(ctx, input, fft_length, axes) +} + /// Identity /// /// See : @@ -52358,6 +54908,168 @@ pub fn in_top_kv2< op.call(ctx, predictions, targets, k) } +/// IndexFlatMapDataset +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct IndexFlatMapDataset { + map_func: ::std::option::Option<::std::string::String>, + index_map_func: ::std::option::Option<::std::string::String>, + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + metadata: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for IndexFlatMapDataset { + fn default() -> Self { + Self { + map_func: None, + index_map_func: None, + output_types: None, + output_shapes: None, + metadata: None, + target_device_name: None, + } + } +} +impl IndexFlatMapDataset { + /// Creates a new `IndexFlatMapDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `map_func` attribute. + pub fn map_func>( + mut self, + value: ArgType, + ) -> Self { + self.map_func = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `index_map_func` attribute. + pub fn index_map_func>( + mut self, + value: ArgType, + ) -> Self { + self.index_map_func = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute index_flat_map_dataset. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input_dataset: &T0, + map_func_other_args: &T1, + index_map_func_other_args: &T2, + output_cardinality: &T3, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "IndexFlatMapDataset")?; + + // Required input arguments + op.add_input(&input_dataset.to_handle(ctx)?)?; + op.add_input(&map_func_other_args.to_handle(ctx)?)?; + op.add_input(&index_map_func_other_args.to_handle(ctx)?)?; + op.add_input(&output_cardinality.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.map_func { + op.set_attr_string("map_func", value)?; + } + if let ::std::option::Option::Some(value) = &self.index_map_func { + op.set_attr_string("index_map_func", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_types { + op.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + op.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + op.set_attr_string("metadata", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `IndexFlatMapDataset::new().call(&ctx, &input_dataset, &map_func_other_args, &index_map_func_other_args, &output_cardinality)`. +/// +/// See : +pub fn index_flat_map_dataset< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input_dataset: &T0, + map_func_other_args: &T1, + index_map_func_other_args: &T2, + output_cardinality: &T3, +) -> crate::Result> { + let op = IndexFlatMapDataset::new(); + op.call( + ctx, + input_dataset, + map_func_other_args, + index_map_func_other_args, + output_cardinality, + ) +} + /// InfeedDequeue /// /// See : @@ -54878,6 +57590,71 @@ pub fn iterator_get_device<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, resource) } +/// IteratorGetModelProto +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct IteratorGetModelProto { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for IteratorGetModelProto { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl IteratorGetModelProto { + /// Creates a new `IteratorGetModelProto`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute iterator_get_model_proto. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + iterator: &T0, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "IteratorGetModelProto")?; + + // Required input arguments + op.add_input(&iterator.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `IteratorGetModelProto::new().call(&ctx, &iterator)`. +/// +/// See : +pub fn iterator_get_model_proto<'a, T0: crate::eager::ToTensorHandle<'a>>( + ctx: &'a crate::eager::Context, + iterator: &T0, +) -> crate::Result> { + let op = IteratorGetModelProto::new(); + op.call(ctx, iterator) +} + /// IteratorGetNext /// /// See : @@ -57465,6 +60242,99 @@ pub fn list_diff<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToT op.call(ctx, x, y) } +/// ListSnapshotChunksDataset +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct ListSnapshotChunksDataset { + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for ListSnapshotChunksDataset { + fn default() -> Self { + Self { + output_types: None, + output_shapes: None, + target_device_name: None, + } + } +} +impl ListSnapshotChunksDataset { + /// Creates a new `ListSnapshotChunksDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute list_snapshot_chunks_dataset. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + snapshot_path: &T0, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "ListSnapshotChunksDataset")?; + + // Required input arguments + op.add_input(&snapshot_path.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.output_types { + op.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + op.set_attr_shape_list("output_shapes", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `ListSnapshotChunksDataset::new().call(&ctx, &snapshot_path)`. +/// +/// See : +pub fn list_snapshot_chunks_dataset<'a, T0: crate::eager::ToTensorHandle<'a>>( + ctx: &'a crate::eager::Context, + snapshot_path: &T0, +) -> crate::Result> { + let op = ListSnapshotChunksDataset::new(); + op.call(ctx, snapshot_path) +} + /// LoadAllTPUEmbeddingParameters /// /// See : @@ -62932,6 +65802,8 @@ pub fn map_unstage_no_key<'a, T0: crate::eager::ToTensorHandle<'a>>( pub struct MatMul { transpose_a: ::std::option::Option, transpose_b: ::std::option::Option, + grad_a: ::std::option::Option, + grad_b: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } @@ -62940,6 +65812,8 @@ impl ::std::default::Default for MatMul { Self { transpose_a: Some(false), transpose_b: Some(false), + grad_a: Some(false), + grad_b: Some(false), target_device_name: None, } } @@ -62962,6 +65836,18 @@ impl MatMul { self } + /// Sets the `grad_a` attribute. + pub fn grad_a>(mut self, value: ArgType) -> Self { + self.grad_a = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_b` attribute. + pub fn grad_b>(mut self, value: ArgType) -> Self { + self.grad_b = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -62992,6 +65878,12 @@ impl MatMul { if let ::std::option::Option::Some(value) = &self.transpose_b { op.set_attr_bool("transpose_b", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_a { + op.set_attr_bool("grad_a", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_b { + op.set_attr_bool("grad_b", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -87235,6 +90127,101 @@ pub fn rfft3_d<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTen op.call(ctx, input, fft_length) } +/// RFFTND +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct RFFTND { + Tcomplex: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for RFFTND { + fn default() -> Self { + Self { + Tcomplex: Some(crate::DataType::Complex64), + target_device_name: None, + } + } +} +impl RFFTND { + /// Creates a new `RFFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `Tcomplex` attribute. + pub fn Tcomplex>( + mut self, + value: ArgType, + ) -> Self { + self.Tcomplex = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute rfftnd. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "RFFTND")?; + + // Required input arguments + op.add_input(&input.to_handle(ctx)?)?; + op.add_input(&fft_length.to_handle(ctx)?)?; + op.add_input(&axes.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.Tcomplex { + op.set_attr_type("Tcomplex", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `RFFTND::new().call(&ctx, &input, &fft_length, &axes)`. +/// +/// See : +pub fn rfftnd< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input: &T0, + fft_length: &T1, + axes: &T2, +) -> crate::Result> { + let op = RFFTND::new(); + op.call(ctx, input, fft_length, axes) +} + /// RGBToHSV /// /// See : @@ -93716,23 +96703,23 @@ pub fn relayout<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input) } -/// RelayoutGrad +/// RelayoutLike /// -/// See : +/// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] -pub struct RelayoutGrad { +pub struct RelayoutLike { /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } -impl ::std::default::Default for RelayoutGrad { +impl ::std::default::Default for RelayoutLike { fn default() -> Self { Self { target_device_name: None, } } } -impl RelayoutGrad { - /// Creates a new `RelayoutGrad`. +impl RelayoutLike { + /// Creates a new `RelayoutLike`. pub fn new() -> Self { Self::default() } @@ -93746,19 +96733,19 @@ impl RelayoutGrad { self } - /// Execute relayout_grad. + /// Execute relayout_like. pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( &self, ctx: &'a crate::eager::Context, input: &T0, - forward_input: &T1, + layout_input: &T1, ) -> crate::Result> { // Define Op - let mut op = super::Op::new(ctx, "RelayoutGrad")?; + let mut op = super::Op::new(ctx, "RelayoutLike")?; // Required input arguments op.add_input(&input.to_handle(ctx)?)?; - op.add_input(&forward_input.to_handle(ctx)?)?; + op.add_input(&layout_input.to_handle(ctx)?)?; // Attributes @@ -93772,20 +96759,20 @@ impl RelayoutGrad { } } -/// Shorthand for `RelayoutGrad::new().call(&ctx, &input, &forward_input)`. +/// Shorthand for `RelayoutLike::new().call(&ctx, &input, &layout_input)`. /// -/// See : -pub fn relayout_grad< +/// See : +pub fn relayout_like< 'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>, >( ctx: &'a crate::eager::Context, input: &T0, - forward_input: &T1, + layout_input: &T1, ) -> crate::Result> { - let op = RelayoutGrad::new(); - op.call(ctx, input, forward_input) + let op = RelayoutLike::new(); + op.call(ctx, input, layout_input) } /// Relu @@ -116475,6 +119462,219 @@ pub fn softsign_grad< op.call(ctx, gradients, features) } +/// SortListOfSparseCoreCooTensors +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct SortListOfSparseCoreCooTensors { + sample_count_list: ::std::option::Option<::std::vec::Vec>, + col_offset_list: ::std::option::Option<::std::vec::Vec>, + num_replica: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for SortListOfSparseCoreCooTensors { + fn default() -> Self { + Self { + sample_count_list: None, + col_offset_list: None, + num_replica: None, + table_vocab_size: None, + feature_width: None, + num_sc_per_chip: None, + max_ids_per_sparse_core: None, + max_unique_ids_per_sparse_core: None, + table_name: None, + target_device_name: None, + } + } +} +impl SortListOfSparseCoreCooTensors { + /// Creates a new `SortListOfSparseCoreCooTensors`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count_list` attribute. + pub fn sample_count_list>>( + mut self, + value: ArgType, + ) -> Self { + self.sample_count_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_offset_list` attribute. + pub fn col_offset_list>>( + mut self, + value: ArgType, + ) -> Self { + self.col_offset_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute sort_list_of_sparse_core_coo_tensors. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_ids_list: &[&T0], + col_ids_list: &[&T1], + gains_list: &[&T2], + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 4]> { + // Define Op + let mut op = super::Op::new(ctx, "SortListOfSparseCoreCooTensors")?; + + // Required input arguments + let mut row_ids_list_list = Vec::new(); + + for t in row_ids_list { + row_ids_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&row_ids_list_list)?; + let mut col_ids_list_list = Vec::new(); + + for t in col_ids_list { + col_ids_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&col_ids_list_list)?; + let mut gains_list_list = Vec::new(); + + for t in gains_list { + gains_list_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&gains_list_list)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count_list { + op.set_attr_int_list("sample_count_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset_list { + op.set_attr_int_list("col_offset_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + op.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + op.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + op.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + op.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + op.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<4>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `SortListOfSparseCoreCooTensors::new().call(&ctx, &row_ids_list, &col_ids_list, &gains_list)`. +/// +/// See : +pub fn sort_list_of_sparse_core_coo_tensors< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_ids_list: &[&T0], + col_ids_list: &[&T1], + gains_list: &[&T2], +) -> crate::Result<[crate::eager::TensorHandle<'a>; 4]> { + let op = SortListOfSparseCoreCooTensors::new(); + op.call(ctx, row_ids_list, col_ids_list, gains_list) +} + /// SpaceToBatch /// /// See : @@ -121388,12 +124588,14 @@ pub fn sparse_reshape< /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct SparseSegmentMean { + sparse_gradient: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for SparseSegmentMean { fn default() -> Self { Self { + sparse_gradient: Some(false), target_device_name: None, } } @@ -121404,6 +124606,12 @@ impl SparseSegmentMean { Self::default() } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -121435,6 +124643,9 @@ impl SparseSegmentMean { op.add_input(&segment_ids.to_handle(ctx)?)?; // Attributes + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + op.set_attr_bool("sparse_gradient", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -121550,17 +124761,105 @@ pub fn sparse_segment_mean_grad< op.call(ctx, grad, indices, segment_ids, output_dim0) } +/// SparseSegmentMeanGradV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct SparseSegmentMeanGradV2 { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for SparseSegmentMeanGradV2 { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl SparseSegmentMeanGradV2 { + /// Creates a new `SparseSegmentMeanGradV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute sparse_segment_mean_grad_v2. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + grad: &T0, + indices: &T1, + segment_ids: &T2, + dense_output_dim0: &T3, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new(ctx, "SparseSegmentMeanGradV2")?; + + // Required input arguments + op.add_input(&grad.to_handle(ctx)?)?; + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&segment_ids.to_handle(ctx)?)?; + op.add_input(&dense_output_dim0.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `SparseSegmentMeanGradV2::new().call(&ctx, &grad, &indices, &segment_ids, &dense_output_dim0)`. +/// +/// See : +pub fn sparse_segment_mean_grad_v2< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + grad: &T0, + indices: &T1, + segment_ids: &T2, + dense_output_dim0: &T3, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = SparseSegmentMeanGradV2::new(); + op.call(ctx, grad, indices, segment_ids, dense_output_dim0) +} + /// SparseSegmentMeanWithNumSegments /// /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct SparseSegmentMeanWithNumSegments { + sparse_gradient: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for SparseSegmentMeanWithNumSegments { fn default() -> Self { Self { + sparse_gradient: Some(false), target_device_name: None, } } @@ -121571,6 +124870,12 @@ impl SparseSegmentMeanWithNumSegments { Self::default() } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -121605,6 +124910,9 @@ impl SparseSegmentMeanWithNumSegments { op.add_input(&num_segments.to_handle(ctx)?)?; // Attributes + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + op.set_attr_bool("sparse_gradient", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -121641,12 +124949,14 @@ pub fn sparse_segment_mean_with_num_segments< /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct SparseSegmentSqrtN { + sparse_gradient: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for SparseSegmentSqrtN { fn default() -> Self { Self { + sparse_gradient: Some(false), target_device_name: None, } } @@ -121657,6 +124967,12 @@ impl SparseSegmentSqrtN { Self::default() } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -121688,6 +125004,9 @@ impl SparseSegmentSqrtN { op.add_input(&segment_ids.to_handle(ctx)?)?; // Attributes + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + op.set_attr_bool("sparse_gradient", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -121803,17 +125122,105 @@ pub fn sparse_segment_sqrt_ngrad< op.call(ctx, grad, indices, segment_ids, output_dim0) } +/// SparseSegmentSqrtNGradV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct SparseSegmentSqrtNGradV2 { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for SparseSegmentSqrtNGradV2 { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl SparseSegmentSqrtNGradV2 { + /// Creates a new `SparseSegmentSqrtNGradV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute sparse_segment_sqrt_ngrad_v2. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + grad: &T0, + indices: &T1, + segment_ids: &T2, + dense_output_dim0: &T3, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new(ctx, "SparseSegmentSqrtNGradV2")?; + + // Required input arguments + op.add_input(&grad.to_handle(ctx)?)?; + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&segment_ids.to_handle(ctx)?)?; + op.add_input(&dense_output_dim0.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `SparseSegmentSqrtNGradV2::new().call(&ctx, &grad, &indices, &segment_ids, &dense_output_dim0)`. +/// +/// See : +pub fn sparse_segment_sqrt_ngrad_v2< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + grad: &T0, + indices: &T1, + segment_ids: &T2, + dense_output_dim0: &T3, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = SparseSegmentSqrtNGradV2::new(); + op.call(ctx, grad, indices, segment_ids, dense_output_dim0) +} + /// SparseSegmentSqrtNWithNumSegments /// /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct SparseSegmentSqrtNWithNumSegments { + sparse_gradient: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for SparseSegmentSqrtNWithNumSegments { fn default() -> Self { Self { + sparse_gradient: Some(false), target_device_name: None, } } @@ -121824,6 +125231,12 @@ impl SparseSegmentSqrtNWithNumSegments { Self::default() } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -121858,6 +125271,9 @@ impl SparseSegmentSqrtNWithNumSegments { op.add_input(&num_segments.to_handle(ctx)?)?; // Attributes + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + op.set_attr_bool("sparse_gradient", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -121894,12 +125310,14 @@ pub fn sparse_segment_sqrt_nwith_num_segments< /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct SparseSegmentSum { + sparse_gradient: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for SparseSegmentSum { fn default() -> Self { Self { + sparse_gradient: Some(false), target_device_name: None, } } @@ -121910,6 +125328,12 @@ impl SparseSegmentSum { Self::default() } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -121941,6 +125365,9 @@ impl SparseSegmentSum { op.add_input(&segment_ids.to_handle(ctx)?)?; // Attributes + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + op.set_attr_bool("sparse_gradient", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -122056,17 +125483,105 @@ pub fn sparse_segment_sum_grad< op.call(ctx, grad, indices, segment_ids, output_dim0) } +/// SparseSegmentSumGradV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct SparseSegmentSumGradV2 { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for SparseSegmentSumGradV2 { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl SparseSegmentSumGradV2 { + /// Creates a new `SparseSegmentSumGradV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute sparse_segment_sum_grad_v2. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + grad: &T0, + indices: &T1, + segment_ids: &T2, + dense_output_dim0: &T3, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new(ctx, "SparseSegmentSumGradV2")?; + + // Required input arguments + op.add_input(&grad.to_handle(ctx)?)?; + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&segment_ids.to_handle(ctx)?)?; + op.add_input(&dense_output_dim0.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `SparseSegmentSumGradV2::new().call(&ctx, &grad, &indices, &segment_ids, &dense_output_dim0)`. +/// +/// See : +pub fn sparse_segment_sum_grad_v2< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + grad: &T0, + indices: &T1, + segment_ids: &T2, + dense_output_dim0: &T3, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = SparseSegmentSumGradV2::new(); + op.call(ctx, grad, indices, segment_ids, dense_output_dim0) +} + /// SparseSegmentSumWithNumSegments /// /// See : #[derive(::std::fmt::Debug, ::std::clone::Clone)] pub struct SparseSegmentSumWithNumSegments { + sparse_gradient: ::std::option::Option, /// (Rust wrapper specific) A device name where this op will be executed target_device_name: ::std::option::Option<::std::string::String>, } impl ::std::default::Default for SparseSegmentSumWithNumSegments { fn default() -> Self { Self { + sparse_gradient: Some(false), target_device_name: None, } } @@ -122077,6 +125592,12 @@ impl SparseSegmentSumWithNumSegments { Self::default() } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `` attribute. pub fn target_device_name>( mut self, @@ -122111,6 +125632,9 @@ impl SparseSegmentSumWithNumSegments { op.add_input(&num_segments.to_handle(ctx)?)?; // Attributes + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + op.set_attr_bool("sparse_gradient", *value)?; + } // Set the device name where this Op will be executed if let ::std::option::Option::Some(value) = &self.target_device_name { @@ -128975,6 +132499,159 @@ pub fn stop_gradient<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input) } +/// StoreMinibatchStatisticsInFdo +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct StoreMinibatchStatisticsInFdo { + sample_count: ::std::option::Option, + num_replica: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + mini_batch_splits: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for StoreMinibatchStatisticsInFdo { + fn default() -> Self { + Self { + sample_count: None, + num_replica: None, + feature_width: None, + num_sc_per_chip: None, + table_name: None, + mini_batch_splits: None, + target_device_name: None, + } + } +} +impl StoreMinibatchStatisticsInFdo { + /// Creates a new `StoreMinibatchStatisticsInFdo`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `mini_batch_splits` attribute. + pub fn mini_batch_splits>( + mut self, + value: ArgType, + ) -> Self { + self.mini_batch_splits = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute store_minibatch_statistics_in_fdo. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + program_key: &T0, + max_ids: &T1, + max_uniques: &T2, + ) -> crate::Result<()> { + // Define Op + let mut op = super::Op::new(ctx, "StoreMinibatchStatisticsInFdo")?; + + // Required input arguments + op.add_input(&program_key.to_handle(ctx)?)?; + op.add_input(&max_ids.to_handle(ctx)?)?; + op.add_input(&max_uniques.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.sample_count { + op.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + op.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + op.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.mini_batch_splits { + op.set_attr_string("mini_batch_splits", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let _ = op.execute::<0>(ctx)?; + Ok(()) + } +} + +/// Shorthand for `StoreMinibatchStatisticsInFdo::new().call(&ctx, &program_key, &max_ids, &max_uniques)`. +/// +/// See : +pub fn store_minibatch_statistics_in_fdo< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + program_key: &T0, + max_ids: &T1, + max_uniques: &T2, +) -> crate::Result<()> { + let op = StoreMinibatchStatisticsInFdo::new(); + op.call(ctx, program_key, max_ids, max_uniques) +} + /// StridedSlice /// /// See : @@ -131271,6 +134948,106 @@ pub fn tfrecord_dataset< op.call(ctx, filenames, compression_type, buffer_size) } +/// TFRecordDatasetV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct TFRecordDatasetV2 { + metadata: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for TFRecordDatasetV2 { + fn default() -> Self { + Self { + metadata: None, + target_device_name: None, + } + } +} +impl TFRecordDatasetV2 { + /// Creates a new `TFRecordDatasetV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute tfrecord_dataset_v2. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + filenames: &T0, + compression_type: &T1, + buffer_size: &T2, + byte_offsets: &T3, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "TFRecordDatasetV2")?; + + // Required input arguments + op.add_input(&filenames.to_handle(ctx)?)?; + op.add_input(&compression_type.to_handle(ctx)?)?; + op.add_input(&buffer_size.to_handle(ctx)?)?; + op.add_input(&byte_offsets.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.metadata { + op.set_attr_string("metadata", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `TFRecordDatasetV2::new().call(&ctx, &filenames, &compression_type, &buffer_size, &byte_offsets)`. +/// +/// See : +pub fn tfrecord_dataset_v2< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + filenames: &T0, + compression_type: &T1, + buffer_size: &T2, + byte_offsets: &T3, +) -> crate::Result> { + let op = TFRecordDatasetV2::new(); + op.call(ctx, filenames, compression_type, buffer_size, byte_offsets) +} + /// TFRecordReader /// /// See : @@ -131479,6 +135256,71 @@ pub fn tfrecord_reader_v2<'a>( op.call(ctx) } +/// TPUAnnotateTensorsWithDynamicShape +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct TPUAnnotateTensorsWithDynamicShape { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for TPUAnnotateTensorsWithDynamicShape { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl TPUAnnotateTensorsWithDynamicShape { + /// Creates a new `TPUAnnotateTensorsWithDynamicShape`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute tpuannotate_tensors_with_dynamic_shape. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + tensors: &T0, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "TPUAnnotateTensorsWithDynamicShape")?; + + // Required input arguments + op.add_input(&tensors.to_handle(ctx)?)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `TPUAnnotateTensorsWithDynamicShape::new().call(&ctx, &tensors)`. +/// +/// See : +pub fn tpuannotate_tensors_with_dynamic_shape<'a, T0: crate::eager::ToTensorHandle<'a>>( + ctx: &'a crate::eager::Context, + tensors: &T0, +) -> crate::Result> { + let op = TPUAnnotateTensorsWithDynamicShape::new(); + op.call(ctx, tensors) +} + /// TPUCompilationResult /// /// See : @@ -131723,6 +135565,84 @@ pub fn tpucompile_succeeded_assert<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, compilation_status) } +/// TPUCopyWithDynamicShape +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct TPUCopyWithDynamicShape { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for TPUCopyWithDynamicShape { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl TPUCopyWithDynamicShape { + /// Creates a new `TPUCopyWithDynamicShape`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute tpucopy_with_dynamic_shape. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + tensors: &T0, + unpadded_sizes: &[&T1], + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "TPUCopyWithDynamicShape")?; + + // Required input arguments + op.add_input(&tensors.to_handle(ctx)?)?; + let mut unpadded_sizes_list = Vec::new(); + + for t in unpadded_sizes { + unpadded_sizes_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&unpadded_sizes_list)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `TPUCopyWithDynamicShape::new().call(&ctx, &tensors, &unpadded_sizes)`. +/// +/// See : +pub fn tpucopy_with_dynamic_shape< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + tensors: &T0, + unpadded_sizes: &[&T1], +) -> crate::Result> { + let op = TPUCopyWithDynamicShape::new(); + op.call(ctx, tensors, unpadded_sizes) +} + /// TPUEmbeddingActivations /// /// See : @@ -145716,6 +149636,77 @@ pub fn unwrap_dataset_variant<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input_handle) } +/// UpdateTaskIdAndGlobalCoreArray +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct UpdateTaskIdAndGlobalCoreArray { + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for UpdateTaskIdAndGlobalCoreArray { + fn default() -> Self { + Self { + target_device_name: None, + } + } +} +impl UpdateTaskIdAndGlobalCoreArray { + /// Creates a new `UpdateTaskIdAndGlobalCoreArray`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute update_task_id_and_global_core_array. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + tpu_task_id_to_shard_id: &[&T0], + ) -> crate::Result<()> { + // Define Op + let mut op = super::Op::new(ctx, "UpdateTaskIdAndGlobalCoreArray")?; + + // Required input arguments + let mut tpu_task_id_to_shard_id_list = Vec::new(); + + for t in tpu_task_id_to_shard_id { + tpu_task_id_to_shard_id_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&tpu_task_id_to_shard_id_list)?; + + // Attributes + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let _ = op.execute::<0>(ctx)?; + Ok(()) + } +} + +/// Shorthand for `UpdateTaskIdAndGlobalCoreArray::new().call(&ctx, &tpu_task_id_to_shard_id)`. +/// +/// See : +pub fn update_task_id_and_global_core_array<'a, T0: crate::eager::ToTensorHandle<'a>>( + ctx: &'a crate::eager::Context, + tpu_task_id_to_shard_id: &[&T0], +) -> crate::Result<()> { + let op = UpdateTaskIdAndGlobalCoreArray::new(); + op.call(ctx, tpu_task_id_to_shard_id) +} + /// UpperBound /// /// See : @@ -145809,6 +149800,7 @@ pub fn upper_bound< pub struct VarHandleOp { container: ::std::option::Option<::std::string::String>, shared_name: ::std::option::Option<::std::string::String>, + debug_name: ::std::option::Option<::std::string::String>, dtype: ::std::option::Option, shape: ::std::option::Option, allowed_devices: ::std::option::Option<::std::vec::Vec<::std::string::String>>, @@ -145820,6 +149812,7 @@ impl ::std::default::Default for VarHandleOp { Self { container: None, shared_name: None, + debug_name: None, dtype: None, shape: None, allowed_devices: Some(vec![]), @@ -145851,6 +149844,15 @@ impl VarHandleOp { self } + /// Sets the `debug_name` attribute. + pub fn debug_name>( + mut self, + value: ArgType, + ) -> Self { + self.debug_name = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `dtype` attribute. pub fn dtype>(mut self, value: ArgType) -> Self { self.dtype = ::std::option::Option::Some(value.into()); @@ -145900,6 +149902,9 @@ impl VarHandleOp { if let ::std::option::Option::Some(value) = &self.shared_name { op.set_attr_string("shared_name", value)?; } + if let ::std::option::Option::Some(value) = &self.debug_name { + op.set_attr_string("debug_name", value)?; + } if let ::std::option::Option::Some(value) = &self.dtype { op.set_attr_type("dtype", *value)?; } @@ -146298,6 +150303,132 @@ pub fn variable_v2<'a>( op.call(ctx) } +/// WeightedFlatMapDataset +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct WeightedFlatMapDataset { + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + metadata: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for WeightedFlatMapDataset { + fn default() -> Self { + Self { + output_types: None, + output_shapes: None, + metadata: None, + target_device_name: None, + } + } +} +impl WeightedFlatMapDataset { + /// Creates a new `WeightedFlatMapDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute weighted_flat_map_dataset. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>, T1: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + input_datasets: &[&T0], + weights: &[&T1], + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "WeightedFlatMapDataset")?; + + // Required input arguments + let mut input_datasets_list = Vec::new(); + + for t in input_datasets { + input_datasets_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&input_datasets_list)?; + let mut weights_list = Vec::new(); + + for t in weights { + weights_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&weights_list)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.output_types { + op.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + op.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + op.set_attr_string("metadata", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `WeightedFlatMapDataset::new().call(&ctx, &input_datasets, &weights)`. +/// +/// See : +pub fn weighted_flat_map_dataset< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + input_datasets: &[&T0], + weights: &[&T1], +) -> crate::Result> { + let op = WeightedFlatMapDataset::new(); + op.call(ctx, input_datasets, weights) +} + /// Where /// /// See : @@ -148264,6 +152395,138 @@ pub fn xla_recv_tpuembedding_activations<'a, T0: crate::eager::ToTensorHandle<'a op.call(ctx, deduplication_data) } +/// XlaRecvTPUEmbeddingActivationsV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaRecvTPUEmbeddingActivationsV2 { + num_tables: ::std::option::Option, + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaRecvTPUEmbeddingActivationsV2 { + fn default() -> Self { + Self { + num_tables: None, + config: None, + embedding_partitions: None, + hbm_buffers_config: None, + tpu_topology: None, + target_device_name: None, + } + } +} +impl XlaRecvTPUEmbeddingActivationsV2 { + /// Creates a new `XlaRecvTPUEmbeddingActivationsV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `num_tables` attribute. + pub fn num_tables>(mut self, value: ArgType) -> Self { + self.num_tables = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_recv_tpuembedding_activations_v2. + pub fn call<'a, T0: crate::eager::ToTensorHandle<'a>>( + &self, + ctx: &'a crate::eager::Context, + deduplication_data: &T0, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "XlaRecvTPUEmbeddingActivationsV2")?; + + // Required input arguments + op.add_input(&deduplication_data.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.num_tables { + op.set_attr_int("num_tables", *value)?; + } + if let ::std::option::Option::Some(value) = &self.config { + op.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + op.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + op.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + op.set_attr_string("tpu_topology", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `XlaRecvTPUEmbeddingActivationsV2::new().call(&ctx, &deduplication_data)`. +/// +/// See : +pub fn xla_recv_tpuembedding_activations_v2<'a, T0: crate::eager::ToTensorHandle<'a>>( + ctx: &'a crate::eager::Context, + deduplication_data: &T0, +) -> crate::Result> { + let op = XlaRecvTPUEmbeddingActivationsV2::new(); + op.call(ctx, deduplication_data) +} + /// XlaRecvTPUEmbeddingDeduplicationData /// /// See : @@ -148340,6 +152603,124 @@ pub fn xla_recv_tpuembedding_deduplication_data<'a>( op.call(ctx) } +/// XlaRecvTPUEmbeddingDeduplicationDataV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaRecvTPUEmbeddingDeduplicationDataV2 { + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaRecvTPUEmbeddingDeduplicationDataV2 { + fn default() -> Self { + Self { + config: None, + embedding_partitions: None, + hbm_buffers_config: None, + tpu_topology: None, + target_device_name: None, + } + } +} +impl XlaRecvTPUEmbeddingDeduplicationDataV2 { + /// Creates a new `XlaRecvTPUEmbeddingDeduplicationDataV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_recv_tpuembedding_deduplication_data_v2. + pub fn call<'a>( + &self, + ctx: &'a crate::eager::Context, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "XlaRecvTPUEmbeddingDeduplicationDataV2")?; + + // Required input arguments + + // Attributes + if let ::std::option::Option::Some(value) = &self.config { + op.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + op.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + op.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + op.set_attr_string("tpu_topology", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `XlaRecvTPUEmbeddingDeduplicationDataV2::new().call(&ctx)`. +/// +/// See : +pub fn xla_recv_tpuembedding_deduplication_data_v2<'a>( + ctx: &'a crate::eager::Context, +) -> crate::Result> { + let op = XlaRecvTPUEmbeddingDeduplicationDataV2::new(); + op.call(ctx) +} + /// XlaSendTPUEmbeddingGradients /// /// See : @@ -148447,6 +152828,155 @@ pub fn xla_send_tpuembedding_gradients< op.call(ctx, gradients, learning_rates, deduplication_data) } +/// XlaSendTPUEmbeddingGradientsV2 +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSendTPUEmbeddingGradientsV2 { + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSendTPUEmbeddingGradientsV2 { + fn default() -> Self { + Self { + config: None, + embedding_partitions: None, + hbm_buffers_config: None, + tpu_topology: None, + target_device_name: None, + } + } +} +impl XlaSendTPUEmbeddingGradientsV2 { + /// Creates a new `XlaSendTPUEmbeddingGradientsV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_send_tpuembedding_gradients_v2. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + gradients: &[&T0], + learning_rates: &[&T1], + deduplication_data: &T2, + ) -> crate::Result<()> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSendTPUEmbeddingGradientsV2")?; + + // Required input arguments + let mut gradients_list = Vec::new(); + + for t in gradients { + gradients_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&gradients_list)?; + let mut learning_rates_list = Vec::new(); + + for t in learning_rates { + learning_rates_list.push(t.to_handle(ctx)?); + } + + op.add_input_list(&learning_rates_list)?; + op.add_input(&deduplication_data.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.config { + op.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + op.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + op.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + op.set_attr_string("tpu_topology", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let _ = op.execute::<0>(ctx)?; + Ok(()) + } +} + +/// Shorthand for `XlaSendTPUEmbeddingGradientsV2::new().call(&ctx, &gradients, &learning_rates, &deduplication_data)`. +/// +/// See : +pub fn xla_send_tpuembedding_gradients_v2< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + gradients: &[&T0], + learning_rates: &[&T1], + deduplication_data: &T2, +) -> crate::Result<()> { + let op = XlaSendTPUEmbeddingGradientsV2::new(); + op.call(ctx, gradients, learning_rates, deduplication_data) +} + /// XlaSendToHost /// /// See : @@ -148526,6 +153056,3271 @@ pub fn xla_send_to_host<'a, T0: crate::eager::ToTensorHandle<'a>>( op.call(ctx, input) } +/// XlaSparseCoreAdagrad +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseCoreAdagrad { + feature_width: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseCoreAdagrad { + fn default() -> Self { + Self { + feature_width: None, + target_device_name: None, + } + } +} +impl XlaSparseCoreAdagrad { + /// Creates a new `XlaSparseCoreAdagrad`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_core_adagrad. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + indices: &T0, + gradient: &T1, + learning_rate: &T2, + accumulator: &T3, + embedding_table: &T4, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseCoreAdagrad")?; + + // Required input arguments + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&gradient.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseCoreAdagrad::new().call(&ctx, &indices, &gradient, &learning_rate, &accumulator, &embedding_table)`. +/// +/// See : +pub fn xla_sparse_core_adagrad< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + indices: &T0, + gradient: &T1, + learning_rate: &T2, + accumulator: &T3, + embedding_table: &T4, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = XlaSparseCoreAdagrad::new(); + op.call( + ctx, + indices, + gradient, + learning_rate, + accumulator, + embedding_table, + ) +} + +/// XlaSparseCoreAdagradMomentum +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseCoreAdagradMomentum { + feature_width: ::std::option::Option, + use_nesterov: ::std::option::Option, + beta_2: ::std::option::Option, + exponent: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseCoreAdagradMomentum { + fn default() -> Self { + Self { + feature_width: None, + use_nesterov: None, + beta_2: None, + exponent: None, + target_device_name: None, + } + } +} +impl XlaSparseCoreAdagradMomentum { + /// Creates a new `XlaSparseCoreAdagradMomentum`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `use_nesterov` attribute. + pub fn use_nesterov>(mut self, value: ArgType) -> Self { + self.use_nesterov = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta_2` attribute. + pub fn beta_2>(mut self, value: ArgType) -> Self { + self.beta_2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `exponent` attribute. + pub fn exponent>(mut self, value: ArgType) -> Self { + self.exponent = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_core_adagrad_momentum. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + indices: &T0, + gradient: &T1, + learning_rate: &T2, + beta_1: &T3, + epsilon: &T4, + accumulator: &T5, + momentum: &T6, + embedding_table: &T7, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseCoreAdagradMomentum")?; + + // Required input arguments + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&gradient.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&beta_1.to_handle(ctx)?)?; + op.add_input(&epsilon.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&momentum.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.use_nesterov { + op.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta_2 { + op.set_attr_float("beta_2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + op.set_attr_float("exponent", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseCoreAdagradMomentum::new().call(&ctx, &indices, &gradient, &learning_rate, &beta_1, &epsilon, &accumulator, &momentum, &embedding_table)`. +/// +/// See : +pub fn xla_sparse_core_adagrad_momentum< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + indices: &T0, + gradient: &T1, + learning_rate: &T2, + beta_1: &T3, + epsilon: &T4, + accumulator: &T5, + momentum: &T6, + embedding_table: &T7, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseCoreAdagradMomentum::new(); + op.call( + ctx, + indices, + gradient, + learning_rate, + beta_1, + epsilon, + accumulator, + momentum, + embedding_table, + ) +} + +/// XlaSparseCoreAdam +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseCoreAdam { + feature_width: ::std::option::Option, + use_sum_inside_sqrt: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseCoreAdam { + fn default() -> Self { + Self { + feature_width: None, + use_sum_inside_sqrt: None, + target_device_name: None, + } + } +} +impl XlaSparseCoreAdam { + /// Creates a new `XlaSparseCoreAdam`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `use_sum_inside_sqrt` attribute. + pub fn use_sum_inside_sqrt>( + mut self, + value: ArgType, + ) -> Self { + self.use_sum_inside_sqrt = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_core_adam. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + embedding_table: &T0, + indices: &T1, + gradient: &T2, + learning_rate: &T3, + momentum: &T4, + velocity: &T5, + beta_1: &T6, + beta_2: &T7, + epsilon: &T8, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseCoreAdam")?; + + // Required input arguments + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&gradient.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&momentum.to_handle(ctx)?)?; + op.add_input(&velocity.to_handle(ctx)?)?; + op.add_input(&beta_1.to_handle(ctx)?)?; + op.add_input(&beta_2.to_handle(ctx)?)?; + op.add_input(&epsilon.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + op.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseCoreAdam::new().call(&ctx, &embedding_table, &indices, &gradient, &learning_rate, &momentum, &velocity, &beta_1, &beta_2, &epsilon)`. +/// +/// See : +pub fn xla_sparse_core_adam< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + embedding_table: &T0, + indices: &T1, + gradient: &T2, + learning_rate: &T3, + momentum: &T4, + velocity: &T5, + beta_1: &T6, + beta_2: &T7, + epsilon: &T8, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseCoreAdam::new(); + op.call( + ctx, + embedding_table, + indices, + gradient, + learning_rate, + momentum, + velocity, + beta_1, + beta_2, + epsilon, + ) +} + +/// XlaSparseCoreFtrl +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseCoreFtrl { + feature_width: ::std::option::Option, + multiply_linear_by_learning_rate: ::std::option::Option, + l1_regularization_strength: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseCoreFtrl { + fn default() -> Self { + Self { + feature_width: None, + multiply_linear_by_learning_rate: None, + l1_regularization_strength: None, + target_device_name: None, + } + } +} +impl XlaSparseCoreFtrl { + /// Creates a new `XlaSparseCoreFtrl`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `multiply_linear_by_learning_rate` attribute. + pub fn multiply_linear_by_learning_rate>( + mut self, + value: ArgType, + ) -> Self { + self.multiply_linear_by_learning_rate = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l1_regularization_strength` attribute. + pub fn l1_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l1_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_core_ftrl. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + embedding_table: &T0, + accumulator: &T1, + linear: &T2, + learning_rate: &T3, + indices: &T4, + gradient: &T5, + beta: &T6, + learning_rate_power: &T7, + l2_regularization_strength: &T8, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseCoreFtrl")?; + + // Required input arguments + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&linear.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&gradient.to_handle(ctx)?)?; + op.add_input(&beta.to_handle(ctx)?)?; + op.add_input(&learning_rate_power.to_handle(ctx)?)?; + op.add_input(&l2_regularization_strength.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + op.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + op.set_attr_float("l1_regularization_strength", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseCoreFtrl::new().call(&ctx, &embedding_table, &accumulator, &linear, &learning_rate, &indices, &gradient, &beta, &learning_rate_power, &l2_regularization_strength)`. +/// +/// See : +pub fn xla_sparse_core_ftrl< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + embedding_table: &T0, + accumulator: &T1, + linear: &T2, + learning_rate: &T3, + indices: &T4, + gradient: &T5, + beta: &T6, + learning_rate_power: &T7, + l2_regularization_strength: &T8, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseCoreFtrl::new(); + op.call( + ctx, + embedding_table, + accumulator, + linear, + learning_rate, + indices, + gradient, + beta, + learning_rate_power, + l2_regularization_strength, + ) +} + +/// XlaSparseCoreSgd +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseCoreSgd { + feature_width: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseCoreSgd { + fn default() -> Self { + Self { + feature_width: None, + target_device_name: None, + } + } +} +impl XlaSparseCoreSgd { + /// Creates a new `XlaSparseCoreSgd`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_core_sgd. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + indices: &T0, + gradient: &T1, + learning_rate: &T2, + embedding_table: &T3, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseCoreSgd")?; + + // Required input arguments + op.add_input(&indices.to_handle(ctx)?)?; + op.add_input(&gradient.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.feature_width { + op.set_attr_int("feature_width", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `XlaSparseCoreSgd::new().call(&ctx, &indices, &gradient, &learning_rate, &embedding_table)`. +/// +/// See : +pub fn xla_sparse_core_sgd< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + indices: &T0, + gradient: &T1, + learning_rate: &T2, + embedding_table: &T3, +) -> crate::Result> { + let op = XlaSparseCoreSgd::new(); + op.call(ctx, indices, gradient, learning_rate, embedding_table) +} + +/// XlaSparseDenseMatmul +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmul { + max_ids_per_partition: ::std::option::Option, + max_unique_ids_per_partition: ::std::option::Option, + input_size: ::std::option::Option, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmul { + fn default() -> Self { + Self { + max_ids_per_partition: None, + max_unique_ids_per_partition: None, + input_size: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmul { + /// Creates a new `XlaSparseDenseMatmul`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `max_ids_per_partition` attribute. + pub fn max_ids_per_partition>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_partition = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_partition` attribute. + pub fn max_unique_ids_per_partition>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_partition = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `input_size` attribute. + pub fn input_size>(mut self, value: ArgType) -> Self { + self.input_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_ids: &T0, + col_ids: &T1, + values: &T2, + offsets: &T3, + embedding_table: &T4, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 5]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmul")?; + + // Required input arguments + op.add_input(&row_ids.to_handle(ctx)?)?; + op.add_input(&col_ids.to_handle(ctx)?)?; + op.add_input(&values.to_handle(ctx)?)?; + op.add_input(&offsets.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.max_ids_per_partition { + op.set_attr_int("max_ids_per_partition", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_partition { + op.set_attr_int("max_unique_ids_per_partition", *value)?; + } + if let ::std::option::Option::Some(value) = &self.input_size { + op.set_attr_int("input_size", *value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<5>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmul::new().call(&ctx, &row_ids, &col_ids, &values, &offsets, &embedding_table)`. +/// +/// See : +pub fn xla_sparse_dense_matmul< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_ids: &T0, + col_ids: &T1, + values: &T2, + offsets: &T3, + embedding_table: &T4, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 5]> { + let op = XlaSparseDenseMatmul::new(); + op.call(ctx, row_ids, col_ids, values, offsets, embedding_table) +} + +/// XlaSparseDenseMatmulGradWithAdagradAndCsrInput +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradAndCsrInput { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithAdagradAndCsrInput { + fn default() -> Self { + Self { + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithAdagradAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_adagrad_and_csr_input. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + num_minibatches_per_physical_sparse_core: &T8, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulGradWithAdagradAndCsrInput")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradAndCsrInput::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &accumulator, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_adagrad_and_csr_input< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + num_minibatches_per_physical_sparse_core: &T8, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = XlaSparseDenseMatmulGradWithAdagradAndCsrInput::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize { + fn default() -> Self { + Self { + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + max_ids_per_sparse_core: None, + max_unique_ids_per_sparse_core: None, + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_adagrad_and_static_buffer_size. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + num_minibatches_per_physical_sparse_core: &T8, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + // Define Op + let mut op = super::Op::new( + ctx, + "XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize", + )?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + op.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + op.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<2>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &accumulator, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_adagrad_and_static_buffer_size< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + num_minibatches_per_physical_sparse_core: &T8, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 2]> { + let op = XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput { + use_nesterov: ::std::option::Option, + exponent: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput { + fn default() -> Self { + Self { + use_nesterov: None, + exponent: None, + beta1: None, + beta2: None, + epsilon: None, + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_nesterov` attribute. + pub fn use_nesterov>(mut self, value: ArgType) -> Self { + self.use_nesterov = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `exponent` attribute. + pub fn exponent>(mut self, value: ArgType) -> Self { + self.exponent = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_adagrad_momentum_and_csr_input. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + momenta: &T8, + num_minibatches_per_physical_sparse_core: &T9, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new( + ctx, + "XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput", + )?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&momenta.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.use_nesterov { + op.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + op.set_attr_float("exponent", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + op.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + op.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + op.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &accumulator, &momenta, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_adagrad_momentum_and_csr_input< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + momenta: &T8, + num_minibatches_per_physical_sparse_core: &T9, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + momenta, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize { + use_nesterov: ::std::option::Option, + exponent: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize { + fn default() -> Self { + Self { + use_nesterov: None, + exponent: None, + beta1: None, + beta2: None, + epsilon: None, + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + max_ids_per_sparse_core: None, + max_unique_ids_per_sparse_core: None, + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_nesterov` attribute. + pub fn use_nesterov>(mut self, value: ArgType) -> Self { + self.use_nesterov = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `exponent` attribute. + pub fn exponent>(mut self, value: ArgType) -> Self { + self.exponent = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_adagrad_momentum_and_static_buffer_size. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + momenta: &T8, + num_minibatches_per_physical_sparse_core: &T9, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new( + ctx, + "XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize", + )?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&momenta.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.use_nesterov { + op.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + op.set_attr_float("exponent", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + op.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + op.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + op.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + op.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + op.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &accumulator, &momenta, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_adagrad_momentum_and_static_buffer_size< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + momenta: &T8, + num_minibatches_per_physical_sparse_core: &T9, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + momenta, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithAdamAndCsrInput +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithAdamAndCsrInput { + use_sum_inside_sqrt: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithAdamAndCsrInput { + fn default() -> Self { + Self { + use_sum_inside_sqrt: None, + beta1: None, + beta2: None, + epsilon: None, + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithAdamAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithAdamAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_sum_inside_sqrt` attribute. + pub fn use_sum_inside_sqrt>( + mut self, + value: ArgType, + ) -> Self { + self.use_sum_inside_sqrt = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_adam_and_csr_input. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + momenta: &T7, + velocity: &T8, + num_minibatches_per_physical_sparse_core: &T9, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulGradWithAdamAndCsrInput")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&momenta.to_handle(ctx)?)?; + op.add_input(&velocity.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + op.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + op.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + op.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + op.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithAdamAndCsrInput::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &momenta, &velocity, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_adam_and_csr_input< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + momenta: &T7, + velocity: &T8, + num_minibatches_per_physical_sparse_core: &T9, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseDenseMatmulGradWithAdamAndCsrInput::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + momenta, + velocity, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize { + use_sum_inside_sqrt: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize { + fn default() -> Self { + Self { + use_sum_inside_sqrt: None, + beta1: None, + beta2: None, + epsilon: None, + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + max_ids_per_sparse_core: None, + max_unique_ids_per_sparse_core: None, + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_sum_inside_sqrt` attribute. + pub fn use_sum_inside_sqrt>( + mut self, + value: ArgType, + ) -> Self { + self.use_sum_inside_sqrt = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_adam_and_static_buffer_size. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + momenta: &T7, + velocity: &T8, + num_minibatches_per_physical_sparse_core: &T9, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&momenta.to_handle(ctx)?)?; + op.add_input(&velocity.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + op.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + op.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + op.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + op.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + op.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + op.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &momenta, &velocity, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_adam_and_static_buffer_size< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + momenta: &T7, + velocity: &T8, + num_minibatches_per_physical_sparse_core: &T9, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + momenta, + velocity, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithFtrlAndCsrInput +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithFtrlAndCsrInput { + multiply_linear_by_learning_rate: ::std::option::Option, + beta: ::std::option::Option, + learning_rate_power: ::std::option::Option, + l1_regularization_strength: ::std::option::Option, + l2_regularization_strength: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithFtrlAndCsrInput { + fn default() -> Self { + Self { + multiply_linear_by_learning_rate: None, + beta: None, + learning_rate_power: None, + l1_regularization_strength: None, + l2_regularization_strength: None, + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithFtrlAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithFtrlAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `multiply_linear_by_learning_rate` attribute. + pub fn multiply_linear_by_learning_rate>( + mut self, + value: ArgType, + ) -> Self { + self.multiply_linear_by_learning_rate = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta` attribute. + pub fn beta>(mut self, value: ArgType) -> Self { + self.beta = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `learning_rate_power` attribute. + pub fn learning_rate_power>( + mut self, + value: ArgType, + ) -> Self { + self.learning_rate_power = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l1_regularization_strength` attribute. + pub fn l1_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l1_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l2_regularization_strength` attribute. + pub fn l2_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l2_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_ftrl_and_csr_input. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + linear: &T8, + num_minibatches_per_physical_sparse_core: &T9, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulGradWithFtrlAndCsrInput")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&linear.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + op.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta { + op.set_attr_float("beta", *value)?; + } + if let ::std::option::Option::Some(value) = &self.learning_rate_power { + op.set_attr_float("learning_rate_power", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + op.set_attr_float("l1_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l2_regularization_strength { + op.set_attr_float("l2_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithFtrlAndCsrInput::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &accumulator, &linear, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_ftrl_and_csr_input< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + linear: &T8, + num_minibatches_per_physical_sparse_core: &T9, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseDenseMatmulGradWithFtrlAndCsrInput::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + linear, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize { + multiply_linear_by_learning_rate: ::std::option::Option, + beta: ::std::option::Option, + learning_rate_power: ::std::option::Option, + l1_regularization_strength: ::std::option::Option, + l2_regularization_strength: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize { + fn default() -> Self { + Self { + multiply_linear_by_learning_rate: None, + beta: None, + learning_rate_power: None, + l1_regularization_strength: None, + l2_regularization_strength: None, + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + max_ids_per_sparse_core: None, + max_unique_ids_per_sparse_core: None, + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `multiply_linear_by_learning_rate` attribute. + pub fn multiply_linear_by_learning_rate>( + mut self, + value: ArgType, + ) -> Self { + self.multiply_linear_by_learning_rate = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta` attribute. + pub fn beta>(mut self, value: ArgType) -> Self { + self.beta = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `learning_rate_power` attribute. + pub fn learning_rate_power>( + mut self, + value: ArgType, + ) -> Self { + self.learning_rate_power = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l1_regularization_strength` attribute. + pub fn l1_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l1_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l2_regularization_strength` attribute. + pub fn l2_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l2_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_ftrl_and_static_buffer_size. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + linear: &T8, + num_minibatches_per_physical_sparse_core: &T9, + ) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&accumulator.to_handle(ctx)?)?; + op.add_input(&linear.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + op.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta { + op.set_attr_float("beta", *value)?; + } + if let ::std::option::Option::Some(value) = &self.learning_rate_power { + op.set_attr_float("learning_rate_power", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + op.set_attr_float("l1_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l2_regularization_strength { + op.set_attr_float("l2_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + op.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + op.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let handles = op.execute::<3>(ctx)?; + Ok(handles) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &accumulator, &linear, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_ftrl_and_static_buffer_size< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + T8: crate::eager::ToTensorHandle<'a>, + T9: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + accumulator: &T7, + linear: &T8, + num_minibatches_per_physical_sparse_core: &T9, +) -> crate::Result<[crate::eager::TensorHandle<'a>; 3]> { + let op = XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + linear, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithSgdAndCsrInput +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithSgdAndCsrInput { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithSgdAndCsrInput { + fn default() -> Self { + Self { + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithSgdAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithSgdAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_sgd_and_csr_input. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + num_minibatches_per_physical_sparse_core: &T7, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulGradWithSgdAndCsrInput")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithSgdAndCsrInput::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_sgd_and_csr_input< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + num_minibatches_per_physical_sparse_core: &T7, +) -> crate::Result> { + let op = XlaSparseDenseMatmulGradWithSgdAndCsrInput::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize { + fn default() -> Self { + Self { + clip_weight_min: Some(f32::NEG_INFINITY), + clip_weight_max: Some(f32::INFINITY), + max_ids_per_sparse_core: None, + max_unique_ids_per_sparse_core: None, + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_grad_with_sgd_and_static_buffer_size. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + num_minibatches_per_physical_sparse_core: &T7, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&activation_gradients.to_handle(ctx)?)?; + op.add_input(&learning_rate.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + op.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + op.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + op.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + op.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &activation_gradients, &learning_rate, &embedding_table, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_grad_with_sgd_and_static_buffer_size< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + T6: crate::eager::ToTensorHandle<'a>, + T7: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + activation_gradients: &T4, + learning_rate: &T5, + embedding_table: &T6, + num_minibatches_per_physical_sparse_core: &T7, +) -> crate::Result> { + let op = XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulWithCsrInput +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulWithCsrInput { + input_size: ::std::option::Option, + quantization_config_low: ::std::option::Option, + quantization_config_high: ::std::option::Option, + quantization_config_num_buckets: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulWithCsrInput { + fn default() -> Self { + Self { + input_size: None, + quantization_config_low: None, + quantization_config_high: None, + quantization_config_num_buckets: None, + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulWithCsrInput { + /// Creates a new `XlaSparseDenseMatmulWithCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `input_size` attribute. + pub fn input_size>(mut self, value: ArgType) -> Self { + self.input_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_low` attribute. + pub fn quantization_config_low>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_low = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_high` attribute. + pub fn quantization_config_high>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_high = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_num_buckets` attribute. + pub fn quantization_config_num_buckets>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_num_buckets = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_with_csr_input. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + embedding_table: &T4, + num_minibatches_per_physical_sparse_core: &T5, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulWithCsrInput")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.input_size { + op.set_attr_int("input_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_low { + op.set_attr_float("quantization_config_low", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_high { + op.set_attr_float("quantization_config_high", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_num_buckets { + op.set_attr_int("quantization_config_num_buckets", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `XlaSparseDenseMatmulWithCsrInput::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &embedding_table, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_with_csr_input< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + embedding_table: &T4, + num_minibatches_per_physical_sparse_core: &T5, +) -> crate::Result> { + let op = XlaSparseDenseMatmulWithCsrInput::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + embedding_table, + num_minibatches_per_physical_sparse_core, + ) +} + +/// XlaSparseDenseMatmulWithStaticBufferSize +/// +/// See : +#[derive(::std::fmt::Debug, ::std::clone::Clone)] +pub struct XlaSparseDenseMatmulWithStaticBufferSize { + input_size: ::std::option::Option, + quantization_config_low: ::std::option::Option, + quantization_config_high: ::std::option::Option, + quantization_config_num_buckets: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + /// (Rust wrapper specific) A device name where this op will be executed + target_device_name: ::std::option::Option<::std::string::String>, +} +impl ::std::default::Default for XlaSparseDenseMatmulWithStaticBufferSize { + fn default() -> Self { + Self { + input_size: None, + quantization_config_low: None, + quantization_config_high: None, + quantization_config_num_buckets: None, + max_ids_per_sparse_core: None, + max_unique_ids_per_sparse_core: None, + table_name: None, + target_device_name: None, + } + } +} +impl XlaSparseDenseMatmulWithStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulWithStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `input_size` attribute. + pub fn input_size>(mut self, value: ArgType) -> Self { + self.input_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_low` attribute. + pub fn quantization_config_low>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_low = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_high` attribute. + pub fn quantization_config_high>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_high = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_num_buckets` attribute. + pub fn quantization_config_num_buckets>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_num_buckets = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `` attribute. + pub fn target_device_name>( + mut self, + value: ArgType, + ) -> Self { + self.target_device_name = ::std::option::Option::Some(value.into()); + self + } + + /// Execute xla_sparse_dense_matmul_with_static_buffer_size. + pub fn call< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, + >( + &self, + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + embedding_table: &T4, + num_minibatches_per_physical_sparse_core: &T5, + ) -> crate::Result> { + // Define Op + let mut op = super::Op::new(ctx, "XlaSparseDenseMatmulWithStaticBufferSize")?; + + // Required input arguments + op.add_input(&row_pointers.to_handle(ctx)?)?; + op.add_input(&sorted_sample_ids.to_handle(ctx)?)?; + op.add_input(&sorted_token_ids.to_handle(ctx)?)?; + op.add_input(&sorted_gains.to_handle(ctx)?)?; + op.add_input(&embedding_table.to_handle(ctx)?)?; + op.add_input(&num_minibatches_per_physical_sparse_core.to_handle(ctx)?)?; + + // Attributes + if let ::std::option::Option::Some(value) = &self.input_size { + op.set_attr_int("input_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_low { + op.set_attr_float("quantization_config_low", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_high { + op.set_attr_float("quantization_config_high", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_num_buckets { + op.set_attr_int("quantization_config_num_buckets", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + op.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + op.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + op.set_attr_string("table_name", value)?; + } + + // Set the device name where this Op will be executed + if let ::std::option::Option::Some(value) = &self.target_device_name { + op.set_device(value)?; + } + // Execute Op + let [h] = op.execute::<1>(ctx)?; + Ok(h) + } +} + +/// Shorthand for `XlaSparseDenseMatmulWithStaticBufferSize::new().call(&ctx, &row_pointers, &sorted_sample_ids, &sorted_token_ids, &sorted_gains, &embedding_table, &num_minibatches_per_physical_sparse_core)`. +/// +/// See : +pub fn xla_sparse_dense_matmul_with_static_buffer_size< + 'a, + T0: crate::eager::ToTensorHandle<'a>, + T1: crate::eager::ToTensorHandle<'a>, + T2: crate::eager::ToTensorHandle<'a>, + T3: crate::eager::ToTensorHandle<'a>, + T4: crate::eager::ToTensorHandle<'a>, + T5: crate::eager::ToTensorHandle<'a>, +>( + ctx: &'a crate::eager::Context, + row_pointers: &T0, + sorted_sample_ids: &T1, + sorted_token_ids: &T2, + sorted_gains: &T3, + embedding_table: &T4, + num_minibatches_per_physical_sparse_core: &T5, +) -> crate::Result> { + let op = XlaSparseDenseMatmulWithStaticBufferSize::new(); + op.call( + ctx, + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + embedding_table, + num_minibatches_per_physical_sparse_core, + ) +} + /// XlaSplitND /// /// See : diff --git a/src/ops/ops_impl.rs b/src/ops/ops_impl.rs index 9ab98b81c0..874ca3d07b 100644 --- a/src/ops/ops_impl.rs +++ b/src/ops/ops_impl.rs @@ -13822,6 +13822,11 @@ pub struct BatchFunction { container: ::std::option::Option<::std::string::String>, shared_name: ::std::option::Option<::std::string::String>, batching_queue: ::std::option::Option<::std::string::String>, + low_priority_max_batch_size: ::std::option::Option, + low_priority_batch_timeout_micros: ::std::option::Option, + low_priority_allowed_batch_sizes: ::std::option::Option<::std::vec::Vec>, + low_priority_max_enqueued_batches: ::std::option::Option, + mixed_priority_policy: ::std::option::Option<::std::string::String>, Tin: ::std::option::Option<::std::vec::Vec>, Tcaptured: ::std::option::Option<::std::vec::Vec>, Tout: ::std::option::Option<::std::vec::Vec>, @@ -13916,6 +13921,51 @@ impl BatchFunction { self } + /// Sets the `low_priority_max_batch_size` attribute. + pub fn low_priority_max_batch_size>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_max_batch_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `low_priority_batch_timeout_micros` attribute. + pub fn low_priority_batch_timeout_micros>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_batch_timeout_micros = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `low_priority_allowed_batch_sizes` attribute. + pub fn low_priority_allowed_batch_sizes>>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_allowed_batch_sizes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `low_priority_max_enqueued_batches` attribute. + pub fn low_priority_max_enqueued_batches>( + mut self, + value: ArgType, + ) -> Self { + self.low_priority_max_enqueued_batches = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `mixed_priority_policy` attribute. + pub fn mixed_priority_policy>( + mut self, + value: ArgType, + ) -> Self { + self.mixed_priority_policy = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `Tin` attribute. pub fn Tin>>( mut self, @@ -14009,6 +14059,21 @@ impl BatchFunction { if let ::std::option::Option::Some(value) = &self.batching_queue { nd.set_attr_string("batching_queue", value)?; } + if let ::std::option::Option::Some(value) = &self.low_priority_max_batch_size { + nd.set_attr_int("low_priority_max_batch_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_batch_timeout_micros { + nd.set_attr_int("low_priority_batch_timeout_micros", *value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_allowed_batch_sizes { + nd.set_attr_int_list("low_priority_allowed_batch_sizes", value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_max_enqueued_batches { + nd.set_attr_int("low_priority_max_enqueued_batches", *value)?; + } + if let ::std::option::Option::Some(value) = &self.mixed_priority_policy { + nd.set_attr_string("mixed_priority_policy", value)?; + } if let ::std::option::Option::Some(value) = &self.Tin { nd.set_attr_type_list("Tin", value)?; } @@ -14062,6 +14127,21 @@ impl BatchFunction { if let ::std::option::Option::Some(value) = &self.batching_queue { builder.set_attr_string("batching_queue", value)?; } + if let ::std::option::Option::Some(value) = &self.low_priority_max_batch_size { + builder.set_attr_int("low_priority_max_batch_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_batch_timeout_micros { + builder.set_attr_int("low_priority_batch_timeout_micros", *value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_allowed_batch_sizes { + builder.set_attr_int_list("low_priority_allowed_batch_sizes", value)?; + } + if let ::std::option::Option::Some(value) = &self.low_priority_max_enqueued_batches { + builder.set_attr_int("low_priority_max_enqueued_batches", *value)?; + } + if let ::std::option::Option::Some(value) = &self.mixed_priority_policy { + builder.set_attr_string("mixed_priority_policy", value)?; + } if let ::std::option::Option::Some(value) = &self.Tin { builder.set_attr_type_list("Tin", value)?; } @@ -14389,6 +14469,8 @@ pub struct BatchMatMul { T: ::std::option::Option, adj_x: ::std::option::Option, adj_y: ::std::option::Option, + grad_x: ::std::option::Option, + grad_y: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'BatchMatMul' Operation with it's Outputs and Inputs exposed as methods. @@ -14422,6 +14504,18 @@ impl BatchMatMul { self } + /// Sets the `grad_x` attribute. + pub fn grad_x>(mut self, value: ArgType) -> Self { + self.grad_x = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_y` attribute. + pub fn grad_y>(mut self, value: ArgType) -> Self { + self.grad_y = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -14461,6 +14555,12 @@ impl BatchMatMul { if let ::std::option::Option::Some(value) = &self.adj_y { nd.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + nd.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + nd.set_attr_bool("grad_y", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -14484,6 +14584,12 @@ impl BatchMatMul { if let ::std::option::Option::Some(value) = &self.adj_y { builder.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + builder.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + builder.set_attr_bool("grad_y", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(BatchMatMulInst { op }) @@ -14535,6 +14641,8 @@ pub struct BatchMatMulV2 { T: ::std::option::Option, adj_x: ::std::option::Option, adj_y: ::std::option::Option, + grad_x: ::std::option::Option, + grad_y: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'BatchMatMulV2' Operation with it's Outputs and Inputs exposed as methods. @@ -14568,6 +14676,18 @@ impl BatchMatMulV2 { self } + /// Sets the `grad_x` attribute. + pub fn grad_x>(mut self, value: ArgType) -> Self { + self.grad_x = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_y` attribute. + pub fn grad_y>(mut self, value: ArgType) -> Self { + self.grad_y = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -14607,6 +14727,12 @@ impl BatchMatMulV2 { if let ::std::option::Option::Some(value) = &self.adj_y { nd.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + nd.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + nd.set_attr_bool("grad_y", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -14630,6 +14756,12 @@ impl BatchMatMulV2 { if let ::std::option::Option::Some(value) = &self.adj_y { builder.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + builder.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + builder.set_attr_bool("grad_y", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(BatchMatMulV2Inst { op }) @@ -14683,6 +14815,8 @@ pub struct BatchMatMulV3 { Tout: ::std::option::Option, adj_x: ::std::option::Option, adj_y: ::std::option::Option, + grad_x: ::std::option::Option, + grad_y: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'BatchMatMulV3' Operation with it's Outputs and Inputs exposed as methods. @@ -14728,6 +14862,18 @@ impl BatchMatMulV3 { self } + /// Sets the `grad_x` attribute. + pub fn grad_x>(mut self, value: ArgType) -> Self { + self.grad_x = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_y` attribute. + pub fn grad_y>(mut self, value: ArgType) -> Self { + self.grad_y = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -14773,6 +14919,12 @@ impl BatchMatMulV3 { if let ::std::option::Option::Some(value) = &self.adj_y { nd.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + nd.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + nd.set_attr_bool("grad_y", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -14802,6 +14954,12 @@ impl BatchMatMulV3 { if let ::std::option::Option::Some(value) = &self.adj_y { builder.set_attr_bool("adj_y", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_x { + builder.set_attr_bool("grad_x", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_y { + builder.set_attr_bool("grad_y", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(BatchMatMulV3Inst { op }) @@ -25446,8 +25604,8 @@ impl BoostedTreesUpdateEnsembleV2Inst { } /// Returns a Vector of gains for 'gains' Input of this BoostedTreesUpdateEnsembleV2 operation. pub fn gains(&self) -> crate::Result> { - let dynamic_offset = (2 * self.op.get_attr_int("num_features")? - + self.op.get_attr_int("num_groups")? + let dynamic_offset = (self.op.get_attr_int("num_groups")? + + 2 * self.op.get_attr_int("num_features")? + 4) as i32; let mut Inputs = vec![]; for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_features")? as i32 { @@ -25474,8 +25632,8 @@ impl BoostedTreesUpdateEnsembleV2Inst { } /// Returns a Vector of left_node_contribs for 'left_node_contribs' Input of this BoostedTreesUpdateEnsembleV2 operation. pub fn left_node_contribs(&self) -> crate::Result> { - let dynamic_offset = (self.op.get_attr_int("num_groups")? - + 4 * self.op.get_attr_int("num_features")? + let dynamic_offset = (4 * self.op.get_attr_int("num_features")? + + self.op.get_attr_int("num_groups")? + 6) as i32; let mut Inputs = vec![]; for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_features")? as i32 { @@ -25488,8 +25646,8 @@ impl BoostedTreesUpdateEnsembleV2Inst { } /// Returns a Vector of right_node_contribs for 'right_node_contribs' Input of this BoostedTreesUpdateEnsembleV2 operation. pub fn right_node_contribs(&self) -> crate::Result> { - let dynamic_offset = (5 * self.op.get_attr_int("num_features")? - + self.op.get_attr_int("num_groups")? + let dynamic_offset = (self.op.get_attr_int("num_groups")? + + 5 * self.op.get_attr_int("num_features")? + 7) as i32; let mut Inputs = vec![]; for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_features")? as i32 { @@ -25516,8 +25674,8 @@ impl BoostedTreesUpdateEnsembleV2Inst { } /// Returns the 'max_depth' Input of this 'BoostedTreesUpdateEnsembleV2' operation. pub fn max_depth(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("num_groups")? - + 7 * self.op.get_attr_int("num_features")? + let dynamic_offset = (7 * self.op.get_attr_int("num_features")? + + self.op.get_attr_int("num_groups")? + 9) as i32; Ok(crate::Input { operation: &self.op, @@ -25536,8 +25694,8 @@ impl BoostedTreesUpdateEnsembleV2Inst { } /// Returns the 'pruning_mode' Input of this 'BoostedTreesUpdateEnsembleV2' operation. pub fn pruning_mode(&self) -> crate::Result { - let dynamic_offset = (7 * self.op.get_attr_int("num_features")? - + self.op.get_attr_int("num_groups")? + let dynamic_offset = (self.op.get_attr_int("num_groups")? + + 7 * self.op.get_attr_int("num_features")? + 11) as i32; Ok(crate::Input { operation: &self.op, @@ -29813,6 +29971,7 @@ pub struct CollectiveAllToAllV2 { T: ::std::option::Option, communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, Nordering_token: ::std::option::Option, control_inputs: ::std::vec::Vec, } @@ -29850,6 +30009,12 @@ impl CollectiveAllToAllV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `Nordering_token` attribute. pub fn Nordering_token>(mut self, value: ArgType) -> Self { self.Nordering_token = ::std::option::Option::Some(value.into()); @@ -29914,6 +30079,9 @@ impl CollectiveAllToAllV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { nd.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + nd.set_attr_bool("is_stateless", *value)?; + } if let ::std::option::Option::Some(value) = &self.Nordering_token { nd.set_attr_int("Nordering_token", *value)?; } @@ -29946,6 +30114,9 @@ impl CollectiveAllToAllV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { builder.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + builder.set_attr_bool("is_stateless", *value)?; + } builder.set_attr_int("Nordering_token", ordering_token.clone().len() as i64)?; ::std::result::Result::Ok(()) })?; @@ -31234,6 +31405,7 @@ pub struct CollectiveGatherV2 { T: ::std::option::Option, communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, Nordering_token: ::std::option::Option, control_inputs: ::std::vec::Vec, } @@ -31271,6 +31443,12 @@ impl CollectiveGatherV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `Nordering_token` attribute. pub fn Nordering_token>(mut self, value: ArgType) -> Self { self.Nordering_token = ::std::option::Option::Some(value.into()); @@ -31335,6 +31513,9 @@ impl CollectiveGatherV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { nd.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + nd.set_attr_bool("is_stateless", *value)?; + } if let ::std::option::Option::Some(value) = &self.Nordering_token { nd.set_attr_int("Nordering_token", *value)?; } @@ -31367,6 +31548,9 @@ impl CollectiveGatherV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { builder.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + builder.set_attr_bool("is_stateless", *value)?; + } builder.set_attr_int("Nordering_token", ordering_token.clone().len() as i64)?; ::std::result::Result::Ok(()) })?; @@ -31963,6 +32147,7 @@ pub struct CollectiveReduceScatterV2 { final_op: ::std::option::Option<::std::string::String>, communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, Nordering_token: ::std::option::Option, max_subdivs_per_device: ::std::option::Option, control_inputs: ::std::vec::Vec, @@ -32019,6 +32204,12 @@ impl CollectiveReduceScatterV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `Nordering_token` attribute. pub fn Nordering_token>(mut self, value: ArgType) -> Self { self.Nordering_token = ::std::option::Option::Some(value.into()); @@ -32098,6 +32289,9 @@ impl CollectiveReduceScatterV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { nd.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + nd.set_attr_bool("is_stateless", *value)?; + } if let ::std::option::Option::Some(value) = &self.Nordering_token { nd.set_attr_int("Nordering_token", *value)?; } @@ -32139,6 +32333,9 @@ impl CollectiveReduceScatterV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { builder.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + builder.set_attr_bool("is_stateless", *value)?; + } builder.set_attr_int("Nordering_token", ordering_token.clone().len() as i64)?; if let ::std::option::Option::Some(value) = &self.max_subdivs_per_device { builder.set_attr_int("max_subdivs_per_device", *value)?; @@ -32234,6 +32431,7 @@ pub struct CollectiveReduceV2 { final_op: ::std::option::Option<::std::string::String>, communication_hint: ::std::option::Option<::std::string::String>, timeout_seconds: ::std::option::Option, + is_stateless: ::std::option::Option, Nordering_token: ::std::option::Option, max_subdivs_per_device: ::std::option::Option, control_inputs: ::std::vec::Vec, @@ -32290,6 +32488,12 @@ impl CollectiveReduceV2 { self } + /// Sets the `is_stateless` attribute. + pub fn is_stateless>(mut self, value: ArgType) -> Self { + self.is_stateless = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `Nordering_token` attribute. pub fn Nordering_token>(mut self, value: ArgType) -> Self { self.Nordering_token = ::std::option::Option::Some(value.into()); @@ -32369,6 +32573,9 @@ impl CollectiveReduceV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { nd.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + nd.set_attr_bool("is_stateless", *value)?; + } if let ::std::option::Option::Some(value) = &self.Nordering_token { nd.set_attr_int("Nordering_token", *value)?; } @@ -32410,6 +32617,9 @@ impl CollectiveReduceV2 { if let ::std::option::Option::Some(value) = &self.timeout_seconds { builder.set_attr_float("timeout_seconds", *value)?; } + if let ::std::option::Option::Some(value) = &self.is_stateless { + builder.set_attr_bool("is_stateless", *value)?; + } builder.set_attr_int("Nordering_token", ordering_token.clone().len() as i64)?; if let ::std::option::Option::Some(value) = &self.max_subdivs_per_device { builder.set_attr_int("max_subdivs_per_device", *value)?; @@ -33732,6 +33942,220 @@ pub fn compute_batch_size>( ComputeBatchSize::new().build(input_dataset, scope) } +/// Builder for the `ComputeDedupDataSize` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct ComputeDedupDataSize { + config: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'ComputeDedupDataSize' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct ComputeDedupDataSizeInst { + /// An instance of a fully built ComputeDedupDataSize Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl ComputeDedupDataSize { + /// Creates a new `ComputeDedupDataSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `ComputeDedupDataSize` operation. + pub fn build(&self, scope: &mut crate::Scope) -> crate::Result { + self.build_impl(scope) + } + fn build_impl(&self, scope: &mut crate::Scope) -> crate::Result { + scope.new_operation("ComputeDedupDataSize", |nd| { + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.config { + nd.set_attr_string("config", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'ComputeDedupDataSize' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("ComputeDedupDataSize", |builder| { + if let ::std::option::Option::Some(value) = &self.config { + builder.set_attr_string("config", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(ComputeDedupDataSizeInst { op }) + } +} +impl ComputeDedupDataSizeInst { + /// Returns the 'num_elements' Output of this 'ComputeDedupDataSize' operation. + pub fn num_elements(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: ComputeDedupDataSizeInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `ComputeDedupDataSize::new().build(scope)`. +pub fn compute_dedup_data_size(scope: &mut crate::Scope) -> crate::Result { + ComputeDedupDataSize::new().build(scope) +} + +/// Builder for the `ComputeDedupDataSizeV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct ComputeDedupDataSizeV2 { + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'ComputeDedupDataSizeV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct ComputeDedupDataSizeV2Inst { + /// An instance of a fully built ComputeDedupDataSizeV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl ComputeDedupDataSizeV2 { + /// Creates a new `ComputeDedupDataSizeV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `ComputeDedupDataSizeV2` operation. + pub fn build(&self, scope: &mut crate::Scope) -> crate::Result { + self.build_impl(scope) + } + fn build_impl(&self, scope: &mut crate::Scope) -> crate::Result { + scope.new_operation("ComputeDedupDataSizeV2", |nd| { + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.config { + nd.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + nd.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + nd.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + nd.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'ComputeDedupDataSizeV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("ComputeDedupDataSizeV2", |builder| { + if let ::std::option::Option::Some(value) = &self.config { + builder.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + builder.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + builder.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + builder.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(ComputeDedupDataSizeV2Inst { op }) + } +} +impl ComputeDedupDataSizeV2Inst { + /// Returns the 'num_elements' Output of this 'ComputeDedupDataSizeV2' operation. + pub fn num_elements(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: ComputeDedupDataSizeV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `ComputeDedupDataSizeV2::new().build(scope)`. +pub fn compute_dedup_data_size_v2(scope: &mut crate::Scope) -> crate::Result { + ComputeDedupDataSizeV2::new().build(scope) +} + /// Builder for the `ComputeDedupDataTupleMask` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct ComputeDedupDataTupleMask { @@ -33815,6 +34239,139 @@ pub fn compute_dedup_data_tuple_mask(scope: &mut crate::Scope) -> crate::Result< ComputeDedupDataTupleMask::new().build(scope) } +/// Builder for the `ComputeDedupDataTupleMaskV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct ComputeDedupDataTupleMaskV2 { + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'ComputeDedupDataTupleMaskV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct ComputeDedupDataTupleMaskV2Inst { + /// An instance of a fully built ComputeDedupDataTupleMaskV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl ComputeDedupDataTupleMaskV2 { + /// Creates a new `ComputeDedupDataTupleMaskV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `ComputeDedupDataTupleMaskV2` operation. + pub fn build(&self, scope: &mut crate::Scope) -> crate::Result { + self.build_impl(scope) + } + fn build_impl(&self, scope: &mut crate::Scope) -> crate::Result { + scope.new_operation("ComputeDedupDataTupleMaskV2", |nd| { + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.config { + nd.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + nd.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + nd.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + nd.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'ComputeDedupDataTupleMaskV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("ComputeDedupDataTupleMaskV2", |builder| { + if let ::std::option::Option::Some(value) = &self.config { + builder.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + builder.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + builder.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + builder.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(ComputeDedupDataTupleMaskV2Inst { op }) + } +} +impl ComputeDedupDataTupleMaskV2Inst { + /// Returns the 'output_shape' Output of this 'ComputeDedupDataTupleMaskV2' operation. + pub fn output_shape(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: ComputeDedupDataTupleMaskV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `ComputeDedupDataTupleMaskV2::new().build(scope)`. +pub fn compute_dedup_data_tuple_mask_v2( + scope: &mut crate::Scope, +) -> crate::Result { + ComputeDedupDataTupleMaskV2::new().build(scope) +} + /// Builder for the `Concat` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct Concat { @@ -33951,6 +34508,7 @@ pub fn concat, O1: ::std::convert::Into< #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct ConcatOffset { N: ::std::option::Option, + shape_type: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'ConcatOffset' Operation with it's Outputs and Inputs exposed as methods. @@ -33972,6 +34530,15 @@ impl ConcatOffset { self } + /// Sets the `shape_type` attribute. + pub fn shape_type>( + mut self, + value: ArgType, + ) -> Self { + self.shape_type = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -34005,6 +34572,9 @@ impl ConcatOffset { if let ::std::option::Option::Some(value) = &self.N { nd.set_attr_int("N", *value)?; } + if let ::std::option::Option::Some(value) = &self.shape_type { + nd.set_attr_type("shape_type", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -34020,6 +34590,9 @@ impl ConcatOffset { builder.add_input(concat_dim); builder.add_input_list(&shape); builder.set_attr_int("N", shape.clone().len() as i64)?; + if let ::std::option::Option::Some(value) = &self.shape_type { + builder.set_attr_type("shape_type", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(ConcatOffsetInst { op }) @@ -35609,27 +36182,28 @@ pub fn control_trigger(scope: &mut crate::Scope) -> crate::Result, strides: ::std::option::Option<::std::vec::Vec>, - use_cudnn_on_gpu: ::std::option::Option, padding: ::std::option::Option<::std::string::String>, explicit_paddings: ::std::option::Option<::std::vec::Vec>, data_format: ::std::option::Option<::std::string::String>, dilations: ::std::option::Option<::std::vec::Vec>, + batch_dims: ::std::option::Option, + groups: ::std::option::Option, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv2D' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv2DInst { - /// An instance of a fully built Conv2D Operation in a Tensorflow graph. +pub struct ConvInst { + /// An instance of a fully built Conv Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv2D { - /// Creates a new `Conv2D`. +impl Conv { + /// Creates a new `Conv`. pub fn new() -> Self { Self::default() } @@ -35649,12 +36223,6 @@ impl Conv2D { self } - /// Sets the `use_cudnn_on_gpu` attribute. - pub fn use_cudnn_on_gpu>(mut self, value: ArgType) -> Self { - self.use_cudnn_on_gpu = ::std::option::Option::Some(value.into()); - self - } - /// Sets the `padding` attribute. pub fn padding>( mut self, @@ -35691,13 +36259,25 @@ impl Conv2D { self } + /// Sets the `batch_dims` attribute. + pub fn batch_dims>(mut self, value: ArgType) -> Self { + self.batch_dims = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `groups` attribute. + pub fn groups>(mut self, value: ArgType) -> Self { + self.groups = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); self } - /// Builds the `Conv2D` operation. + /// Builds the `Conv` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, @@ -35715,7 +36295,7 @@ impl Conv2D { filter: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("Conv2D", |nd| { + scope.new_operation("Conv", |nd| { nd.add_input(input); nd.add_input(filter); for op in &self.control_inputs { @@ -35727,9 +36307,6 @@ impl Conv2D { if let ::std::option::Option::Some(value) = &self.strides { nd.set_attr_int_list("strides", value)?; } - if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { - nd.set_attr_bool("use_cudnn_on_gpu", *value)?; - } if let ::std::option::Option::Some(value) = &self.padding { nd.set_attr_string("padding", value)?; } @@ -35742,18 +36319,24 @@ impl Conv2D { if let ::std::option::Option::Some(value) = &self.dilations { nd.set_attr_int_list("dilations", value)?; } + if let ::std::option::Option::Some(value) = &self.batch_dims { + nd.set_attr_int("batch_dims", *value)?; + } + if let ::std::option::Option::Some(value) = &self.groups { + nd.set_attr_int("groups", *value)?; + } ::std::result::Result::Ok(()) }) } - /// Builds a new instance of 'Conv2D' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'Conv' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, input: crate::Output, filter: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("Conv2D", |builder| { + ) -> crate::Result { + let op = scope.new_operation("Conv", |builder| { builder.add_input(input); builder.add_input(filter); if let ::std::option::Option::Some(value) = &self.T { @@ -35762,9 +36345,6 @@ impl Conv2D { if let ::std::option::Option::Some(value) = &self.strides { builder.set_attr_int_list("strides", value)?; } - if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { - builder.set_attr_bool("use_cudnn_on_gpu", *value)?; - } if let ::std::option::Option::Some(value) = &self.padding { builder.set_attr_string("padding", value)?; } @@ -35777,27 +36357,33 @@ impl Conv2D { if let ::std::option::Option::Some(value) = &self.dilations { builder.set_attr_int_list("dilations", value)?; } + if let ::std::option::Option::Some(value) = &self.batch_dims { + builder.set_attr_int("batch_dims", *value)?; + } + if let ::std::option::Option::Some(value) = &self.groups { + builder.set_attr_int("groups", *value)?; + } ::std::result::Result::Ok(()) })?; - Ok(Conv2DInst { op }) + Ok(ConvInst { op }) } } -impl Conv2DInst { - /// Returns the 'output' Output of this 'Conv2D' operation. +impl ConvInst { + /// Returns the 'output' Output of this 'Conv' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input' Input of this 'Conv2D' operation. + /// Returns the 'input' Input of this 'Conv' operation. pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'filter' Input of this 'Conv2D' operation. + /// Returns the 'filter' Input of this 'Conv' operation. pub fn filter(&self) -> crate::Input { crate::Input { operation: &self.op, @@ -35805,23 +36391,23 @@ impl Conv2DInst { } } } -impl From for crate::Operation { - fn from(inst: Conv2DInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: ConvInst) -> crate::Operation { inst.op } } -/// Shorthand for `Conv2D::new().build(input, filter, scope)`. -pub fn conv2_d, O1: ::std::convert::Into>( +/// Shorthand for `Conv::new().build(input, filter, scope)`. +pub fn conv, O1: ::std::convert::Into>( input: O0, filter: O1, scope: &mut crate::Scope, ) -> crate::Result { - Conv2D::new().build(input, filter, scope) + Conv::new().build(input, filter, scope) } -/// Builder for the `Conv2DBackpropFilter` operation. +/// Builder for the `Conv2D` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct Conv2DBackpropFilter { +pub struct Conv2D { T: ::std::option::Option, strides: ::std::option::Option<::std::vec::Vec>, use_cudnn_on_gpu: ::std::option::Option, @@ -35831,15 +36417,15 @@ pub struct Conv2DBackpropFilter { dilations: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv2DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv2D' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv2DBackpropFilterInst { - /// An instance of a fully built Conv2DBackpropFilter Operation in a Tensorflow graph. +pub struct Conv2DInst { + /// An instance of a fully built Conv2D Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv2DBackpropFilter { - /// Creates a new `Conv2DBackpropFilter`. +impl Conv2D { + /// Creates a new `Conv2D`. pub fn new() -> Self { Self::default() } @@ -35907,36 +36493,27 @@ impl Conv2DBackpropFilter { self } - /// Builds the `Conv2DBackpropFilter` operation. + /// Builds the `Conv2D` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, - O2: ::std::convert::Into, >( &self, input: O0, - filter_sizes: O1, - out_backprop: O2, + filter: O1, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl( - input.into(), - filter_sizes.into(), - out_backprop.into(), - scope, - ) + self.build_impl(input.into(), filter.into(), scope) } fn build_impl( &self, input: crate::Output, - filter_sizes: crate::Output, - out_backprop: crate::Output, + filter: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("Conv2DBackpropFilter", |nd| { + scope.new_operation("Conv2D", |nd| { nd.add_input(input); - nd.add_input(filter_sizes); - nd.add_input(out_backprop); + nd.add_input(filter); for op in &self.control_inputs { nd.add_control_input(op); } @@ -35965,18 +36542,16 @@ impl Conv2DBackpropFilter { }) } - /// Builds a new instance of 'Conv2DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'Conv2D' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, input: crate::Output, - filter_sizes: crate::Output, - out_backprop: crate::Output, + filter: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("Conv2DBackpropFilter", |builder| { + ) -> crate::Result { + let op = scope.new_operation("Conv2D", |builder| { builder.add_input(input); - builder.add_input(filter_sizes); - builder.add_input(out_backprop); + builder.add_input(filter); if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; } @@ -36000,61 +36575,49 @@ impl Conv2DBackpropFilter { } ::std::result::Result::Ok(()) })?; - Ok(Conv2DBackpropFilterInst { op }) + Ok(Conv2DInst { op }) } } -impl Conv2DBackpropFilterInst { - /// Returns the 'output' Output of this 'Conv2DBackpropFilter' operation. +impl Conv2DInst { + /// Returns the 'output' Output of this 'Conv2D' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input' Input of this 'Conv2DBackpropFilter' operation. + /// Returns the 'input' Input of this 'Conv2D' operation. pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'filter_sizes' Input of this 'Conv2DBackpropFilter' operation. - pub fn filter_sizes(&self) -> crate::Input { + /// Returns the 'filter' Input of this 'Conv2D' operation. + pub fn filter(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } - /// Returns the 'out_backprop' Input of this 'Conv2DBackpropFilter' operation. - pub fn out_backprop(&self) -> crate::Input { - crate::Input { - operation: &self.op, - index: 2, - } - } } -impl From for crate::Operation { - fn from(inst: Conv2DBackpropFilterInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: Conv2DInst) -> crate::Operation { inst.op } } -/// Shorthand for `Conv2DBackpropFilter::new().build(input, filter_sizes, out_backprop, scope)`. -pub fn conv2_dbackprop_filter< - O0: ::std::convert::Into, - O1: ::std::convert::Into, - O2: ::std::convert::Into, ->( +/// Shorthand for `Conv2D::new().build(input, filter, scope)`. +pub fn conv2_d, O1: ::std::convert::Into>( input: O0, - filter_sizes: O1, - out_backprop: O2, + filter: O1, scope: &mut crate::Scope, ) -> crate::Result { - Conv2DBackpropFilter::new().build(input, filter_sizes, out_backprop, scope) + Conv2D::new().build(input, filter, scope) } -/// Builder for the `Conv2DBackpropFilterV2` operation. +/// Builder for the `Conv2DBackpropFilter` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct Conv2DBackpropFilterV2 { +pub struct Conv2DBackpropFilter { T: ::std::option::Option, strides: ::std::option::Option<::std::vec::Vec>, use_cudnn_on_gpu: ::std::option::Option, @@ -36064,15 +36627,15 @@ pub struct Conv2DBackpropFilterV2 { dilations: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv2DBackpropFilterV2' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv2DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv2DBackpropFilterV2Inst { - /// An instance of a fully built Conv2DBackpropFilterV2 Operation in a Tensorflow graph. +pub struct Conv2DBackpropFilterInst { + /// An instance of a fully built Conv2DBackpropFilter Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv2DBackpropFilterV2 { - /// Creates a new `Conv2DBackpropFilterV2`. +impl Conv2DBackpropFilter { + /// Creates a new `Conv2DBackpropFilter`. pub fn new() -> Self { Self::default() } @@ -36140,7 +36703,7 @@ impl Conv2DBackpropFilterV2 { self } - /// Builds the `Conv2DBackpropFilterV2` operation. + /// Builds the `Conv2DBackpropFilter` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, @@ -36148,22 +36711,27 @@ impl Conv2DBackpropFilterV2 { >( &self, input: O0, - filter: O1, + filter_sizes: O1, out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl(input.into(), filter.into(), out_backprop.into(), scope) + self.build_impl( + input.into(), + filter_sizes.into(), + out_backprop.into(), + scope, + ) } fn build_impl( &self, input: crate::Output, - filter: crate::Output, + filter_sizes: crate::Output, out_backprop: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("Conv2DBackpropFilterV2", |nd| { + scope.new_operation("Conv2DBackpropFilter", |nd| { nd.add_input(input); - nd.add_input(filter); + nd.add_input(filter_sizes); nd.add_input(out_backprop); for op in &self.control_inputs { nd.add_control_input(op); @@ -36193,17 +36761,17 @@ impl Conv2DBackpropFilterV2 { }) } - /// Builds a new instance of 'Conv2DBackpropFilterV2' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'Conv2DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, input: crate::Output, - filter: crate::Output, + filter_sizes: crate::Output, out_backprop: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("Conv2DBackpropFilterV2", |builder| { + ) -> crate::Result { + let op = scope.new_operation("Conv2DBackpropFilter", |builder| { builder.add_input(input); - builder.add_input(filter); + builder.add_input(filter_sizes); builder.add_input(out_backprop); if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; @@ -36228,32 +36796,32 @@ impl Conv2DBackpropFilterV2 { } ::std::result::Result::Ok(()) })?; - Ok(Conv2DBackpropFilterV2Inst { op }) + Ok(Conv2DBackpropFilterInst { op }) } } -impl Conv2DBackpropFilterV2Inst { - /// Returns the 'output' Output of this 'Conv2DBackpropFilterV2' operation. +impl Conv2DBackpropFilterInst { + /// Returns the 'output' Output of this 'Conv2DBackpropFilter' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input' Input of this 'Conv2DBackpropFilterV2' operation. + /// Returns the 'input' Input of this 'Conv2DBackpropFilter' operation. pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'filter' Input of this 'Conv2DBackpropFilterV2' operation. - pub fn filter(&self) -> crate::Input { + /// Returns the 'filter_sizes' Input of this 'Conv2DBackpropFilter' operation. + pub fn filter_sizes(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } - /// Returns the 'out_backprop' Input of this 'Conv2DBackpropFilterV2' operation. + /// Returns the 'out_backprop' Input of this 'Conv2DBackpropFilter' operation. pub fn out_backprop(&self) -> crate::Input { crate::Input { operation: &self.op, @@ -36261,28 +36829,28 @@ impl Conv2DBackpropFilterV2Inst { } } } -impl From for crate::Operation { - fn from(inst: Conv2DBackpropFilterV2Inst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: Conv2DBackpropFilterInst) -> crate::Operation { inst.op } } -/// Shorthand for `Conv2DBackpropFilterV2::new().build(input, filter, out_backprop, scope)`. -pub fn conv2_dbackprop_filter_v2< +/// Shorthand for `Conv2DBackpropFilter::new().build(input, filter_sizes, out_backprop, scope)`. +pub fn conv2_dbackprop_filter< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, >( input: O0, - filter: O1, + filter_sizes: O1, out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - Conv2DBackpropFilterV2::new().build(input, filter, out_backprop, scope) + Conv2DBackpropFilter::new().build(input, filter_sizes, out_backprop, scope) } -/// Builder for the `Conv2DBackpropInput` operation. +/// Builder for the `Conv2DBackpropFilterV2` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct Conv2DBackpropInput { +pub struct Conv2DBackpropFilterV2 { T: ::std::option::Option, strides: ::std::option::Option<::std::vec::Vec>, use_cudnn_on_gpu: ::std::option::Option, @@ -36292,15 +36860,15 @@ pub struct Conv2DBackpropInput { dilations: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv2DBackpropInput' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv2DBackpropFilterV2' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv2DBackpropInputInst { - /// An instance of a fully built Conv2DBackpropInput Operation in a Tensorflow graph. +pub struct Conv2DBackpropFilterV2Inst { + /// An instance of a fully built Conv2DBackpropFilterV2 Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv2DBackpropInput { - /// Creates a new `Conv2DBackpropInput`. +impl Conv2DBackpropFilterV2 { + /// Creates a new `Conv2DBackpropFilterV2`. pub fn new() -> Self { Self::default() } @@ -36368,34 +36936,29 @@ impl Conv2DBackpropInput { self } - /// Builds the `Conv2DBackpropInput` operation. + /// Builds the `Conv2DBackpropFilterV2` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, >( &self, - input_sizes: O0, + input: O0, filter: O1, out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl( - input_sizes.into(), - filter.into(), - out_backprop.into(), - scope, - ) + self.build_impl(input.into(), filter.into(), out_backprop.into(), scope) } fn build_impl( &self, - input_sizes: crate::Output, + input: crate::Output, filter: crate::Output, out_backprop: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("Conv2DBackpropInput", |nd| { - nd.add_input(input_sizes); + scope.new_operation("Conv2DBackpropFilterV2", |nd| { + nd.add_input(input); nd.add_input(filter); nd.add_input(out_backprop); for op in &self.control_inputs { @@ -36426,16 +36989,16 @@ impl Conv2DBackpropInput { }) } - /// Builds a new instance of 'Conv2DBackpropInput' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'Conv2DBackpropFilterV2' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, - input_sizes: crate::Output, + input: crate::Output, filter: crate::Output, out_backprop: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("Conv2DBackpropInput", |builder| { - builder.add_input(input_sizes); + ) -> crate::Result { + let op = scope.new_operation("Conv2DBackpropFilterV2", |builder| { + builder.add_input(input); builder.add_input(filter); builder.add_input(out_backprop); if let ::std::option::Option::Some(value) = &self.T { @@ -36461,32 +37024,32 @@ impl Conv2DBackpropInput { } ::std::result::Result::Ok(()) })?; - Ok(Conv2DBackpropInputInst { op }) + Ok(Conv2DBackpropFilterV2Inst { op }) } } -impl Conv2DBackpropInputInst { - /// Returns the 'output' Output of this 'Conv2DBackpropInput' operation. +impl Conv2DBackpropFilterV2Inst { + /// Returns the 'output' Output of this 'Conv2DBackpropFilterV2' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input_sizes' Input of this 'Conv2DBackpropInput' operation. - pub fn input_sizes(&self) -> crate::Input { + /// Returns the 'input' Input of this 'Conv2DBackpropFilterV2' operation. + pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'filter' Input of this 'Conv2DBackpropInput' operation. + /// Returns the 'filter' Input of this 'Conv2DBackpropFilterV2' operation. pub fn filter(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } - /// Returns the 'out_backprop' Input of this 'Conv2DBackpropInput' operation. + /// Returns the 'out_backprop' Input of this 'Conv2DBackpropFilterV2' operation. pub fn out_backprop(&self) -> crate::Input { crate::Input { operation: &self.op, @@ -36494,28 +37057,28 @@ impl Conv2DBackpropInputInst { } } } -impl From for crate::Operation { - fn from(inst: Conv2DBackpropInputInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: Conv2DBackpropFilterV2Inst) -> crate::Operation { inst.op } } -/// Shorthand for `Conv2DBackpropInput::new().build(input_sizes, filter, out_backprop, scope)`. -pub fn conv2_dbackprop_input< +/// Shorthand for `Conv2DBackpropFilterV2::new().build(input, filter, out_backprop, scope)`. +pub fn conv2_dbackprop_filter_v2< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, >( - input_sizes: O0, + input: O0, filter: O1, out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - Conv2DBackpropInput::new().build(input_sizes, filter, out_backprop, scope) + Conv2DBackpropFilterV2::new().build(input, filter, out_backprop, scope) } -/// Builder for the `Conv2DBackpropInputV2` operation. +/// Builder for the `Conv2DBackpropInput` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct Conv2DBackpropInputV2 { +pub struct Conv2DBackpropInput { T: ::std::option::Option, strides: ::std::option::Option<::std::vec::Vec>, use_cudnn_on_gpu: ::std::option::Option, @@ -36525,15 +37088,15 @@ pub struct Conv2DBackpropInputV2 { dilations: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv2DBackpropInputV2' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv2DBackpropInput' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv2DBackpropInputV2Inst { - /// An instance of a fully built Conv2DBackpropInputV2 Operation in a Tensorflow graph. +pub struct Conv2DBackpropInputInst { + /// An instance of a fully built Conv2DBackpropInput Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv2DBackpropInputV2 { - /// Creates a new `Conv2DBackpropInputV2`. +impl Conv2DBackpropInput { + /// Creates a new `Conv2DBackpropInput`. pub fn new() -> Self { Self::default() } @@ -36601,29 +37164,34 @@ impl Conv2DBackpropInputV2 { self } - /// Builds the `Conv2DBackpropInputV2` operation. + /// Builds the `Conv2DBackpropInput` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, >( &self, - input: O0, + input_sizes: O0, filter: O1, out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl(input.into(), filter.into(), out_backprop.into(), scope) + self.build_impl( + input_sizes.into(), + filter.into(), + out_backprop.into(), + scope, + ) } fn build_impl( &self, - input: crate::Output, + input_sizes: crate::Output, filter: crate::Output, out_backprop: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("Conv2DBackpropInputV2", |nd| { - nd.add_input(input); + scope.new_operation("Conv2DBackpropInput", |nd| { + nd.add_input(input_sizes); nd.add_input(filter); nd.add_input(out_backprop); for op in &self.control_inputs { @@ -36654,16 +37222,16 @@ impl Conv2DBackpropInputV2 { }) } - /// Builds a new instance of 'Conv2DBackpropInputV2' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'Conv2DBackpropInput' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, - input: crate::Output, + input_sizes: crate::Output, filter: crate::Output, out_backprop: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("Conv2DBackpropInputV2", |builder| { - builder.add_input(input); + ) -> crate::Result { + let op = scope.new_operation("Conv2DBackpropInput", |builder| { + builder.add_input(input_sizes); builder.add_input(filter); builder.add_input(out_backprop); if let ::std::option::Option::Some(value) = &self.T { @@ -36689,32 +37257,32 @@ impl Conv2DBackpropInputV2 { } ::std::result::Result::Ok(()) })?; - Ok(Conv2DBackpropInputV2Inst { op }) + Ok(Conv2DBackpropInputInst { op }) } } -impl Conv2DBackpropInputV2Inst { - /// Returns the 'output' Output of this 'Conv2DBackpropInputV2' operation. +impl Conv2DBackpropInputInst { + /// Returns the 'output' Output of this 'Conv2DBackpropInput' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input' Input of this 'Conv2DBackpropInputV2' operation. - pub fn input(&self) -> crate::Input { + /// Returns the 'input_sizes' Input of this 'Conv2DBackpropInput' operation. + pub fn input_sizes(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'filter' Input of this 'Conv2DBackpropInputV2' operation. + /// Returns the 'filter' Input of this 'Conv2DBackpropInput' operation. pub fn filter(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } - /// Returns the 'out_backprop' Input of this 'Conv2DBackpropInputV2' operation. + /// Returns the 'out_backprop' Input of this 'Conv2DBackpropInput' operation. pub fn out_backprop(&self) -> crate::Input { crate::Input { operation: &self.op, @@ -36722,44 +37290,46 @@ impl Conv2DBackpropInputV2Inst { } } } -impl From for crate::Operation { - fn from(inst: Conv2DBackpropInputV2Inst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: Conv2DBackpropInputInst) -> crate::Operation { inst.op } } -/// Shorthand for `Conv2DBackpropInputV2::new().build(input, filter, out_backprop, scope)`. -pub fn conv2_dbackprop_input_v2< +/// Shorthand for `Conv2DBackpropInput::new().build(input_sizes, filter, out_backprop, scope)`. +pub fn conv2_dbackprop_input< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, >( - input: O0, + input_sizes: O0, filter: O1, out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - Conv2DBackpropInputV2::new().build(input, filter, out_backprop, scope) + Conv2DBackpropInput::new().build(input_sizes, filter, out_backprop, scope) } -/// Builder for the `Conv3D` operation. +/// Builder for the `Conv2DBackpropInputV2` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct Conv3D { +pub struct Conv2DBackpropInputV2 { T: ::std::option::Option, strides: ::std::option::Option<::std::vec::Vec>, + use_cudnn_on_gpu: ::std::option::Option, padding: ::std::option::Option<::std::string::String>, + explicit_paddings: ::std::option::Option<::std::vec::Vec>, data_format: ::std::option::Option<::std::string::String>, dilations: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv3D' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv2DBackpropInputV2' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv3DInst { - /// An instance of a fully built Conv3D Operation in a Tensorflow graph. +pub struct Conv2DBackpropInputV2Inst { + /// An instance of a fully built Conv2DBackpropInputV2 Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv3D { - /// Creates a new `Conv3D`. +impl Conv2DBackpropInputV2 { + /// Creates a new `Conv2DBackpropInputV2`. pub fn new() -> Self { Self::default() } @@ -36779,6 +37349,12 @@ impl Conv3D { self } + /// Sets the `use_cudnn_on_gpu` attribute. + pub fn use_cudnn_on_gpu>(mut self, value: ArgType) -> Self { + self.use_cudnn_on_gpu = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `padding` attribute. pub fn padding>( mut self, @@ -36788,6 +37364,15 @@ impl Conv3D { self } + /// Sets the `explicit_paddings` attribute. + pub fn explicit_paddings>>( + mut self, + value: ArgType, + ) -> Self { + self.explicit_paddings = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `data_format` attribute. pub fn data_format>( mut self, @@ -36812,27 +37397,31 @@ impl Conv3D { self } - /// Builds the `Conv3D` operation. + /// Builds the `Conv2DBackpropInputV2` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, + O2: ::std::convert::Into, >( &self, input: O0, filter: O1, + out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl(input.into(), filter.into(), scope) + self.build_impl(input.into(), filter.into(), out_backprop.into(), scope) } fn build_impl( &self, input: crate::Output, filter: crate::Output, + out_backprop: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("Conv3D", |nd| { + scope.new_operation("Conv2DBackpropInputV2", |nd| { nd.add_input(input); nd.add_input(filter); + nd.add_input(out_backprop); for op in &self.control_inputs { nd.add_control_input(op); } @@ -36842,9 +37431,15 @@ impl Conv3D { if let ::std::option::Option::Some(value) = &self.strides { nd.set_attr_int_list("strides", value)?; } + if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { + nd.set_attr_bool("use_cudnn_on_gpu", *value)?; + } if let ::std::option::Option::Some(value) = &self.padding { nd.set_attr_string("padding", value)?; } + if let ::std::option::Option::Some(value) = &self.explicit_paddings { + nd.set_attr_int_list("explicit_paddings", value)?; + } if let ::std::option::Option::Some(value) = &self.data_format { nd.set_attr_string("data_format", value)?; } @@ -36855,25 +37450,33 @@ impl Conv3D { }) } - /// Builds a new instance of 'Conv3D' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'Conv2DBackpropInputV2' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, input: crate::Output, filter: crate::Output, + out_backprop: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("Conv3D", |builder| { + ) -> crate::Result { + let op = scope.new_operation("Conv2DBackpropInputV2", |builder| { builder.add_input(input); builder.add_input(filter); + builder.add_input(out_backprop); if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; } if let ::std::option::Option::Some(value) = &self.strides { builder.set_attr_int_list("strides", value)?; } + if let ::std::option::Option::Some(value) = &self.use_cudnn_on_gpu { + builder.set_attr_bool("use_cudnn_on_gpu", *value)?; + } if let ::std::option::Option::Some(value) = &self.padding { builder.set_attr_string("padding", value)?; } + if let ::std::option::Option::Some(value) = &self.explicit_paddings { + builder.set_attr_int_list("explicit_paddings", value)?; + } if let ::std::option::Option::Some(value) = &self.data_format { builder.set_attr_string("data_format", value)?; } @@ -36882,64 +37485,77 @@ impl Conv3D { } ::std::result::Result::Ok(()) })?; - Ok(Conv3DInst { op }) + Ok(Conv2DBackpropInputV2Inst { op }) } } -impl Conv3DInst { - /// Returns the 'output' Output of this 'Conv3D' operation. +impl Conv2DBackpropInputV2Inst { + /// Returns the 'output' Output of this 'Conv2DBackpropInputV2' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input' Input of this 'Conv3D' operation. + /// Returns the 'input' Input of this 'Conv2DBackpropInputV2' operation. pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'filter' Input of this 'Conv3D' operation. + /// Returns the 'filter' Input of this 'Conv2DBackpropInputV2' operation. pub fn filter(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } + /// Returns the 'out_backprop' Input of this 'Conv2DBackpropInputV2' operation. + pub fn out_backprop(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } } -impl From for crate::Operation { - fn from(inst: Conv3DInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: Conv2DBackpropInputV2Inst) -> crate::Operation { inst.op } } -/// Shorthand for `Conv3D::new().build(input, filter, scope)`. -pub fn conv3_d, O1: ::std::convert::Into>( +/// Shorthand for `Conv2DBackpropInputV2::new().build(input, filter, out_backprop, scope)`. +pub fn conv2_dbackprop_input_v2< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( input: O0, filter: O1, + out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - Conv3D::new().build(input, filter, scope) + Conv2DBackpropInputV2::new().build(input, filter, out_backprop, scope) } -/// Builder for the `Conv3DBackpropFilter` operation. +/// Builder for the `Conv3D` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct Conv3DBackpropFilter { +pub struct Conv3D { T: ::std::option::Option, strides: ::std::option::Option<::std::vec::Vec>, padding: ::std::option::Option<::std::string::String>, + data_format: ::std::option::Option<::std::string::String>, dilations: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv3DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv3D' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv3DBackpropFilterInst { - /// An instance of a fully built Conv3DBackpropFilter Operation in a Tensorflow graph. +pub struct Conv3DInst { + /// An instance of a fully built Conv3D Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv3DBackpropFilter { - /// Creates a new `Conv3DBackpropFilter`. +impl Conv3D { + /// Creates a new `Conv3D`. pub fn new() -> Self { Self::default() } @@ -36968,6 +37584,15 @@ impl Conv3DBackpropFilter { self } + /// Sets the `data_format` attribute. + pub fn data_format>( + mut self, + value: ArgType, + ) -> Self { + self.data_format = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `dilations` attribute. pub fn dilations>>( mut self, @@ -36983,31 +37608,27 @@ impl Conv3DBackpropFilter { self } - /// Builds the `Conv3DBackpropFilter` operation. + /// Builds the `Conv3D` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, - O2: ::std::convert::Into, >( &self, input: O0, filter: O1, - out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl(input.into(), filter.into(), out_backprop.into(), scope) + self.build_impl(input.into(), filter.into(), scope) } fn build_impl( &self, input: crate::Output, filter: crate::Output, - out_backprop: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("Conv3DBackpropFilter", |nd| { + scope.new_operation("Conv3D", |nd| { nd.add_input(input); nd.add_input(filter); - nd.add_input(out_backprop); for op in &self.control_inputs { nd.add_control_input(op); } @@ -37020,6 +37641,9 @@ impl Conv3DBackpropFilter { if let ::std::option::Option::Some(value) = &self.padding { nd.set_attr_string("padding", value)?; } + if let ::std::option::Option::Some(value) = &self.data_format { + nd.set_attr_string("data_format", value)?; + } if let ::std::option::Option::Some(value) = &self.dilations { nd.set_attr_int_list("dilations", value)?; } @@ -37027,18 +37651,16 @@ impl Conv3DBackpropFilter { }) } - /// Builds a new instance of 'Conv3DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'Conv3D' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, input: crate::Output, filter: crate::Output, - out_backprop: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("Conv3DBackpropFilter", |builder| { + ) -> crate::Result { + let op = scope.new_operation("Conv3D", |builder| { builder.add_input(input); builder.add_input(filter); - builder.add_input(out_backprop); if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; } @@ -37048,82 +37670,256 @@ impl Conv3DBackpropFilter { if let ::std::option::Option::Some(value) = &self.padding { builder.set_attr_string("padding", value)?; } + if let ::std::option::Option::Some(value) = &self.data_format { + builder.set_attr_string("data_format", value)?; + } if let ::std::option::Option::Some(value) = &self.dilations { builder.set_attr_int_list("dilations", value)?; } ::std::result::Result::Ok(()) })?; - Ok(Conv3DBackpropFilterInst { op }) + Ok(Conv3DInst { op }) } } -impl Conv3DBackpropFilterInst { - /// Returns the 'output' Output of this 'Conv3DBackpropFilter' operation. +impl Conv3DInst { + /// Returns the 'output' Output of this 'Conv3D' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input' Input of this 'Conv3DBackpropFilter' operation. + /// Returns the 'input' Input of this 'Conv3D' operation. pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'filter' Input of this 'Conv3DBackpropFilter' operation. + /// Returns the 'filter' Input of this 'Conv3D' operation. pub fn filter(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } - /// Returns the 'out_backprop' Input of this 'Conv3DBackpropFilter' operation. - pub fn out_backprop(&self) -> crate::Input { - crate::Input { - operation: &self.op, - index: 2, - } - } } -impl From for crate::Operation { - fn from(inst: Conv3DBackpropFilterInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: Conv3DInst) -> crate::Operation { inst.op } } -/// Shorthand for `Conv3DBackpropFilter::new().build(input, filter, out_backprop, scope)`. -pub fn conv3_dbackprop_filter< - O0: ::std::convert::Into, - O1: ::std::convert::Into, - O2: ::std::convert::Into, ->( +/// Shorthand for `Conv3D::new().build(input, filter, scope)`. +pub fn conv3_d, O1: ::std::convert::Into>( input: O0, filter: O1, - out_backprop: O2, scope: &mut crate::Scope, ) -> crate::Result { - Conv3DBackpropFilter::new().build(input, filter, out_backprop, scope) + Conv3D::new().build(input, filter, scope) } -/// Builder for the `Conv3DBackpropFilterV2` operation. +/// Builder for the `Conv3DBackpropFilter` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct Conv3DBackpropFilterV2 { +pub struct Conv3DBackpropFilter { T: ::std::option::Option, strides: ::std::option::Option<::std::vec::Vec>, padding: ::std::option::Option<::std::string::String>, - data_format: ::std::option::Option<::std::string::String>, dilations: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'Conv3DBackpropFilterV2' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'Conv3DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct Conv3DBackpropFilterV2Inst { - /// An instance of a fully built Conv3DBackpropFilterV2 Operation in a Tensorflow graph. +pub struct Conv3DBackpropFilterInst { + /// An instance of a fully built Conv3DBackpropFilter Operation in a Tensorflow graph. pub op: crate::Operation, } -impl Conv3DBackpropFilterV2 { - /// Creates a new `Conv3DBackpropFilterV2`. +impl Conv3DBackpropFilter { + /// Creates a new `Conv3DBackpropFilter`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `T` attribute. + pub fn T>(mut self, value: ArgType) -> Self { + self.T = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `strides` attribute. + pub fn strides>>( + mut self, + value: ArgType, + ) -> Self { + self.strides = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `padding` attribute. + pub fn padding>( + mut self, + value: ArgType, + ) -> Self { + self.padding = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `dilations` attribute. + pub fn dilations>>( + mut self, + value: ArgType, + ) -> Self { + self.dilations = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `Conv3DBackpropFilter` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + input: O0, + filter: O1, + out_backprop: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(input.into(), filter.into(), out_backprop.into(), scope) + } + fn build_impl( + &self, + input: crate::Output, + filter: crate::Output, + out_backprop: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("Conv3DBackpropFilter", |nd| { + nd.add_input(input); + nd.add_input(filter); + nd.add_input(out_backprop); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.T { + nd.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.strides { + nd.set_attr_int_list("strides", value)?; + } + if let ::std::option::Option::Some(value) = &self.padding { + nd.set_attr_string("padding", value)?; + } + if let ::std::option::Option::Some(value) = &self.dilations { + nd.set_attr_int_list("dilations", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'Conv3DBackpropFilter' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input: crate::Output, + filter: crate::Output, + out_backprop: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("Conv3DBackpropFilter", |builder| { + builder.add_input(input); + builder.add_input(filter); + builder.add_input(out_backprop); + if let ::std::option::Option::Some(value) = &self.T { + builder.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.strides { + builder.set_attr_int_list("strides", value)?; + } + if let ::std::option::Option::Some(value) = &self.padding { + builder.set_attr_string("padding", value)?; + } + if let ::std::option::Option::Some(value) = &self.dilations { + builder.set_attr_int_list("dilations", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(Conv3DBackpropFilterInst { op }) + } +} +impl Conv3DBackpropFilterInst { + /// Returns the 'output' Output of this 'Conv3DBackpropFilter' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input' Input of this 'Conv3DBackpropFilter' operation. + pub fn input(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'filter' Input of this 'Conv3DBackpropFilter' operation. + pub fn filter(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'out_backprop' Input of this 'Conv3DBackpropFilter' operation. + pub fn out_backprop(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } +} +impl From for crate::Operation { + fn from(inst: Conv3DBackpropFilterInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `Conv3DBackpropFilter::new().build(input, filter, out_backprop, scope)`. +pub fn conv3_dbackprop_filter< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + input: O0, + filter: O1, + out_backprop: O2, + scope: &mut crate::Scope, +) -> crate::Result { + Conv3DBackpropFilter::new().build(input, filter, out_backprop, scope) +} + +/// Builder for the `Conv3DBackpropFilterV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct Conv3DBackpropFilterV2 { + T: ::std::option::Option, + strides: ::std::option::Option<::std::vec::Vec>, + padding: ::std::option::Option<::std::string::String>, + data_format: ::std::option::Option<::std::string::String>, + dilations: ::std::option::Option<::std::vec::Vec>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'Conv3DBackpropFilterV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct Conv3DBackpropFilterV2Inst { + /// An instance of a fully built Conv3DBackpropFilterV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl Conv3DBackpropFilterV2 { + /// Creates a new `Conv3DBackpropFilterV2`. pub fn new() -> Self { Self::default() } @@ -37712,6 +38508,799 @@ pub fn conv3_dbackprop_input_v2< Conv3DBackpropInputV2::new().build(input_sizes, filter, out_backprop, scope) } +/// Builder for the `ConvertToCooTensor` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct ConvertToCooTensor { + sample_count: ::std::option::Option, + combiner: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'ConvertToCooTensor' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct ConvertToCooTensorInst { + /// An instance of a fully built ConvertToCooTensor Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl ConvertToCooTensor { + /// Creates a new `ConvertToCooTensor`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `combiner` attribute. + pub fn combiner>( + mut self, + value: ArgType, + ) -> Self { + self.combiner = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `ConvertToCooTensor` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + indices_or_row_splits: O0, + values: O1, + weights: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + indices_or_row_splits.into(), + values.into(), + weights.into(), + scope, + ) + } + fn build_impl( + &self, + indices_or_row_splits: crate::Output, + values: crate::Output, + weights: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("ConvertToCooTensor", |nd| { + nd.add_input(indices_or_row_splits); + nd.add_input(values); + nd.add_input(weights); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.sample_count { + nd.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.combiner { + nd.set_attr_string("combiner", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'ConvertToCooTensor' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + indices_or_row_splits: crate::Output, + values: crate::Output, + weights: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("ConvertToCooTensor", |builder| { + builder.add_input(indices_or_row_splits); + builder.add_input(values); + builder.add_input(weights); + if let ::std::option::Option::Some(value) = &self.sample_count { + builder.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.combiner { + builder.set_attr_string("combiner", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(ConvertToCooTensorInst { op }) + } +} +impl ConvertToCooTensorInst { + /// Returns the 'row_ids' Output of this 'ConvertToCooTensor' operation. + pub fn row_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'col_ids' Output of this 'ConvertToCooTensor' operation. + pub fn col_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'gains' Output of this 'ConvertToCooTensor' operation. + pub fn gains(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'indices_or_row_splits' Input of this 'ConvertToCooTensor' operation. + pub fn indices_or_row_splits(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'values' Input of this 'ConvertToCooTensor' operation. + pub fn values(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'weights' Input of this 'ConvertToCooTensor' operation. + pub fn weights(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } +} +impl From for crate::Operation { + fn from(inst: ConvertToCooTensorInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `ConvertToCooTensor::new().build(indices_or_row_splits, values, weights, scope)`. +pub fn convert_to_coo_tensor< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + indices_or_row_splits: O0, + values: O1, + weights: O2, + scope: &mut crate::Scope, +) -> crate::Result { + ConvertToCooTensor::new().build(indices_or_row_splits, values, weights, scope) +} + +/// Builder for the `ConvertToListOfSparseCoreCooTensors` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct ConvertToListOfSparseCoreCooTensors { + sample_count: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + row_offset: ::std::option::Option, + col_offset: ::std::option::Option, + col_shift: ::std::option::Option, + num_sc_shards: ::std::option::Option, + stacked_table_sample_count: ::std::option::Option, + combiner: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'ConvertToListOfSparseCoreCooTensors' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct ConvertToListOfSparseCoreCooTensorsInst { + /// An instance of a fully built ConvertToListOfSparseCoreCooTensors Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl ConvertToListOfSparseCoreCooTensors { + /// Creates a new `ConvertToListOfSparseCoreCooTensors`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `row_offset` attribute. + pub fn row_offset>(mut self, value: ArgType) -> Self { + self.row_offset = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_offset` attribute. + pub fn col_offset>(mut self, value: ArgType) -> Self { + self.col_offset = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_shift` attribute. + pub fn col_shift>(mut self, value: ArgType) -> Self { + self.col_shift = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_shards` attribute. + pub fn num_sc_shards>(mut self, value: ArgType) -> Self { + self.num_sc_shards = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `stacked_table_sample_count` attribute. + pub fn stacked_table_sample_count>( + mut self, + value: ArgType, + ) -> Self { + self.stacked_table_sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `combiner` attribute. + pub fn combiner>( + mut self, + value: ArgType, + ) -> Self { + self.combiner = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `ConvertToListOfSparseCoreCooTensors` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + indices_or_row_splits: O0, + values: O1, + weights: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + indices_or_row_splits.into(), + values.into(), + weights.into(), + scope, + ) + } + fn build_impl( + &self, + indices_or_row_splits: crate::Output, + values: crate::Output, + weights: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("ConvertToListOfSparseCoreCooTensors", |nd| { + nd.add_input(indices_or_row_splits); + nd.add_input(values); + nd.add_input(weights); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.sample_count { + nd.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + nd.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.row_offset { + nd.set_attr_int("row_offset", *value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset { + nd.set_attr_int("col_offset", *value)?; + } + if let ::std::option::Option::Some(value) = &self.col_shift { + nd.set_attr_int("col_shift", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_shards { + nd.set_attr_int("num_sc_shards", *value)?; + } + if let ::std::option::Option::Some(value) = &self.stacked_table_sample_count { + nd.set_attr_int("stacked_table_sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.combiner { + nd.set_attr_string("combiner", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'ConvertToListOfSparseCoreCooTensors' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + indices_or_row_splits: crate::Output, + values: crate::Output, + weights: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("ConvertToListOfSparseCoreCooTensors", |builder| { + builder.add_input(indices_or_row_splits); + builder.add_input(values); + builder.add_input(weights); + if let ::std::option::Option::Some(value) = &self.sample_count { + builder.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + builder.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.row_offset { + builder.set_attr_int("row_offset", *value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset { + builder.set_attr_int("col_offset", *value)?; + } + if let ::std::option::Option::Some(value) = &self.col_shift { + builder.set_attr_int("col_shift", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_shards { + builder.set_attr_int("num_sc_shards", *value)?; + } + if let ::std::option::Option::Some(value) = &self.stacked_table_sample_count { + builder.set_attr_int("stacked_table_sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.combiner { + builder.set_attr_string("combiner", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(ConvertToListOfSparseCoreCooTensorsInst { op }) + } +} +impl ConvertToListOfSparseCoreCooTensorsInst { + /// Returns a Vector of row_ids_list for 'row_ids_list' Output of this ConvertToListOfSparseCoreCooTensors operation. + pub fn row_ids_list(&self) -> crate::Result> { + let mut Outputs = vec![]; + for i in 0..self.op.get_attr_int("num_sc_per_chip")? as i32 { + Outputs.push(crate::Output { + operation: self.op.clone(), + index: i, + }); + } + Ok(Outputs) + } + /// Returns a Vector of col_ids_list for 'col_ids_list' Output of this ConvertToListOfSparseCoreCooTensors operation. + pub fn col_ids_list(&self) -> crate::Result> { + let dynamic_offset = (self.op.get_attr_int("num_sc_per_chip")? + 1) as i32; + let mut Outputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_sc_per_chip")? as i32 { + Outputs.push(crate::Output { + operation: self.op.clone(), + index: i, + }); + } + Ok(Outputs) + } + /// Returns a Vector of gains_list for 'gains_list' Output of this ConvertToListOfSparseCoreCooTensors operation. + pub fn gains_list(&self) -> crate::Result> { + let dynamic_offset = (2 * self.op.get_attr_int("num_sc_per_chip")? + 2) as i32; + let mut Outputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_sc_per_chip")? as i32 { + Outputs.push(crate::Output { + operation: self.op.clone(), + index: i, + }); + } + Ok(Outputs) + } + /// Returns the 'indices_or_row_splits' Input of this 'ConvertToListOfSparseCoreCooTensors' operation. + pub fn indices_or_row_splits(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'values' Input of this 'ConvertToListOfSparseCoreCooTensors' operation. + pub fn values(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'weights' Input of this 'ConvertToListOfSparseCoreCooTensors' operation. + pub fn weights(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } +} +impl From for crate::Operation { + fn from(inst: ConvertToListOfSparseCoreCooTensorsInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `ConvertToListOfSparseCoreCooTensors::new().build(indices_or_row_splits, values, weights, scope)`. +pub fn convert_to_list_of_sparse_core_coo_tensors< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + indices_or_row_splits: O0, + values: O1, + weights: O2, + scope: &mut crate::Scope, +) -> crate::Result { + ConvertToListOfSparseCoreCooTensors::new().build(indices_or_row_splits, values, weights, scope) +} + +/// Builder for the `ConvertToSparseCoreCsrWrappedCooTensor` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct ConvertToSparseCoreCsrWrappedCooTensor { + sample_count_per_sc: ::std::option::Option, + num_replica: ::std::option::Option, + max_minibatches_per_sc: ::std::option::Option, + max_ids_per_chip_per_sample: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + allow_id_dropping: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'ConvertToSparseCoreCsrWrappedCooTensor' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct ConvertToSparseCoreCsrWrappedCooTensorInst { + /// An instance of a fully built ConvertToSparseCoreCsrWrappedCooTensor Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl ConvertToSparseCoreCsrWrappedCooTensor { + /// Creates a new `ConvertToSparseCoreCsrWrappedCooTensor`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count_per_sc` attribute. + pub fn sample_count_per_sc>( + mut self, + value: ArgType, + ) -> Self { + self.sample_count_per_sc = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_minibatches_per_sc` attribute. + pub fn max_minibatches_per_sc>( + mut self, + value: ArgType, + ) -> Self { + self.max_minibatches_per_sc = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_chip_per_sample` attribute. + pub fn max_ids_per_chip_per_sample>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_chip_per_sample = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `allow_id_dropping` attribute. + pub fn allow_id_dropping>( + mut self, + value: ArgType, + ) -> Self { + self.allow_id_dropping = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `ConvertToSparseCoreCsrWrappedCooTensor` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + >( + &self, + sorted_row_ids_list: O0, + sorted_col_ids_list: O1, + sorted_gains_list: O2, + id_counts_list: O3, + splits: O4, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + sorted_row_ids_list.into(), + sorted_col_ids_list.into(), + sorted_gains_list.into(), + id_counts_list.into(), + splits.into(), + scope, + ) + } + fn build_impl( + &self, + sorted_row_ids_list: crate::Output, + sorted_col_ids_list: crate::Output, + sorted_gains_list: crate::Output, + id_counts_list: crate::Output, + splits: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("ConvertToSparseCoreCsrWrappedCooTensor", |nd| { + nd.add_input(sorted_row_ids_list); + nd.add_input(sorted_col_ids_list); + nd.add_input(sorted_gains_list); + nd.add_input(id_counts_list); + nd.add_input(splits); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.sample_count_per_sc { + nd.set_attr_int("sample_count_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + nd.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_minibatches_per_sc { + nd.set_attr_int("max_minibatches_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_chip_per_sample { + nd.set_attr_int("max_ids_per_chip_per_sample", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + nd.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + nd.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.allow_id_dropping { + nd.set_attr_bool("allow_id_dropping", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'ConvertToSparseCoreCsrWrappedCooTensor' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + sorted_row_ids_list: Vec, + sorted_col_ids_list: Vec, + sorted_gains_list: Vec, + id_counts_list: Vec, + splits: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("ConvertToSparseCoreCsrWrappedCooTensor", |builder| { + builder.add_input_list(&sorted_row_ids_list); + builder.add_input_list(&sorted_col_ids_list); + builder.add_input_list(&sorted_gains_list); + builder.add_input_list(&id_counts_list); + builder.add_input(splits); + if let ::std::option::Option::Some(value) = &self.sample_count_per_sc { + builder.set_attr_int("sample_count_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + builder.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_minibatches_per_sc { + builder.set_attr_int("max_minibatches_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_chip_per_sample { + builder.set_attr_int("max_ids_per_chip_per_sample", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + builder.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + builder.set_attr_int("num_sc_per_chip", sorted_row_ids_list.clone().len() as i64)?; + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.allow_id_dropping { + builder.set_attr_bool("allow_id_dropping", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(ConvertToSparseCoreCsrWrappedCooTensorInst { op }) + } +} +impl ConvertToSparseCoreCsrWrappedCooTensorInst { + /// Returns the 'row_pointers' Output of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn row_pointers(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Output of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn sorted_sample_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'sorted_token_ids' Output of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn sorted_token_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'sorted_gains' Output of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn sorted_gains(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 3, + } + } + /// Returns the 'row_pointers_unpadded_size' Output of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn row_pointers_unpadded_size(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 4, + } + } + /// Returns the 'ids_unpadded_size' Output of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn ids_unpadded_size(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 5, + } + } + /// Returns the 'num_minibatches_per_sc' Output of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn num_minibatches_per_sc(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 6, + } + } + /// Returns a Vector of sorted_row_ids_list for 'sorted_row_ids_list' Input of this ConvertToSparseCoreCsrWrappedCooTensor operation. + pub fn sorted_row_ids_list(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 0..self.op.get_attr_int("num_sc_per_chip")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of sorted_col_ids_list for 'sorted_col_ids_list' Input of this ConvertToSparseCoreCsrWrappedCooTensor operation. + pub fn sorted_col_ids_list(&self) -> crate::Result> { + let dynamic_offset = (self.op.get_attr_int("num_sc_per_chip")? + 1) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_sc_per_chip")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of sorted_gains_list for 'sorted_gains_list' Input of this ConvertToSparseCoreCsrWrappedCooTensor operation. + pub fn sorted_gains_list(&self) -> crate::Result> { + let dynamic_offset = (2 * self.op.get_attr_int("num_sc_per_chip")? + 2) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_sc_per_chip")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of id_counts_list for 'id_counts_list' Input of this ConvertToSparseCoreCsrWrappedCooTensor operation. + pub fn id_counts_list(&self) -> crate::Result> { + let dynamic_offset = (3 * self.op.get_attr_int("num_sc_per_chip")? + 3) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_sc_per_chip")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns the 'splits' Input of this 'ConvertToSparseCoreCsrWrappedCooTensor' operation. + pub fn splits(&self) -> crate::Result { + let dynamic_offset = (4 * self.op.get_attr_int("num_sc_per_chip")? + 4) as i32; + Ok(crate::Input { + operation: &self.op, + index: dynamic_offset, + }) + } +} +impl From for crate::Operation { + fn from(inst: ConvertToSparseCoreCsrWrappedCooTensorInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `ConvertToSparseCoreCsrWrappedCooTensor::new().build(sorted_row_ids_list, sorted_col_ids_list, sorted_gains_list, id_counts_list, splits, scope)`. +pub fn convert_to_sparse_core_csr_wrapped_coo_tensor< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, +>( + sorted_row_ids_list: O0, + sorted_col_ids_list: O1, + sorted_gains_list: O2, + id_counts_list: O3, + splits: O4, + scope: &mut crate::Scope, +) -> crate::Result { + ConvertToSparseCoreCsrWrappedCooTensor::new().build( + sorted_row_ids_list, + sorted_col_ids_list, + sorted_gains_list, + id_counts_list, + splits, + scope, + ) +} + /// Builder for the `Copy` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct Copy { @@ -37981,7 +39570,7 @@ pub fn copy_host>( /// Builder for the `CopyToMesh` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct CopyToMesh { - layout: ::std::option::Option<::std::string::String>, + mesh: ::std::option::Option<::std::string::String>, T: ::std::option::Option, control_inputs: ::std::vec::Vec, } @@ -37998,12 +39587,12 @@ impl CopyToMesh { Self::default() } - /// Sets the `layout` attribute. - pub fn layout>( + /// Sets the `mesh` attribute. + pub fn mesh>( mut self, value: ArgType, ) -> Self { - self.layout = ::std::option::Option::Some(value.into()); + self.mesh = ::std::option::Option::Some(value.into()); self } @@ -38037,8 +39626,8 @@ impl CopyToMesh { for op in &self.control_inputs { nd.add_control_input(op); } - if let ::std::option::Option::Some(value) = &self.layout { - nd.set_attr_string("layout", value)?; + if let ::std::option::Option::Some(value) = &self.mesh { + nd.set_attr_string("mesh", value)?; } if let ::std::option::Option::Some(value) = &self.T { nd.set_attr_type("T", *value)?; @@ -38055,8 +39644,8 @@ impl CopyToMesh { ) -> crate::Result { let op = scope.new_operation("CopyToMesh", |builder| { builder.add_input(input); - if let ::std::option::Option::Some(value) = &self.layout { - builder.set_attr_string("layout", value)?; + if let ::std::option::Option::Some(value) = &self.mesh { + builder.set_attr_string("mesh", value)?; } if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; @@ -38098,7 +39687,6 @@ pub fn copy_to_mesh>( /// Builder for the `CopyToMeshGrad` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct CopyToMeshGrad { - reference_layout: ::std::option::Option<::std::string::String>, T: ::std::option::Option, control_inputs: ::std::vec::Vec, } @@ -38115,15 +39703,6 @@ impl CopyToMeshGrad { Self::default() } - /// Sets the `reference_layout` attribute. - pub fn reference_layout>( - mut self, - value: ArgType, - ) -> Self { - self.reference_layout = ::std::option::Option::Some(value.into()); - self - } - /// Sets the `T` attribute. pub fn T>(mut self, value: ArgType) -> Self { self.T = ::std::option::Option::Some(value.into()); @@ -38160,9 +39739,6 @@ impl CopyToMeshGrad { for op in &self.control_inputs { nd.add_control_input(op); } - if let ::std::option::Option::Some(value) = &self.reference_layout { - nd.set_attr_string("reference_layout", value)?; - } if let ::std::option::Option::Some(value) = &self.T { nd.set_attr_type("T", *value)?; } @@ -38180,9 +39756,6 @@ impl CopyToMeshGrad { let op = scope.new_operation("CopyToMeshGrad", |builder| { builder.add_input(input); builder.add_input(forward_input); - if let ::std::option::Option::Some(value) = &self.reference_layout { - builder.set_attr_string("reference_layout", value)?; - } if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; } @@ -45614,6 +47187,94 @@ pub fn dataset_cardinality>( DatasetCardinality::new().build(input_dataset, scope) } +/// Builder for the `DatasetFingerprint` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct DatasetFingerprint { + control_inputs: ::std::vec::Vec, +} +/// An instance of 'DatasetFingerprint' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct DatasetFingerprintInst { + /// An instance of a fully built DatasetFingerprint Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl DatasetFingerprint { + /// Creates a new `DatasetFingerprint`. + pub fn new() -> Self { + Self::default() + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `DatasetFingerprint` operation. + pub fn build>( + &self, + input_dataset: O0, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(input_dataset.into(), scope) + } + fn build_impl( + &self, + input_dataset: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("DatasetFingerprint", |nd| { + nd.add_input(input_dataset); + for op in &self.control_inputs { + nd.add_control_input(op); + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'DatasetFingerprint' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input_dataset: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("DatasetFingerprint", |builder| { + builder.add_input(input_dataset); + ::std::result::Result::Ok(()) + })?; + Ok(DatasetFingerprintInst { op }) + } +} +impl DatasetFingerprintInst { + /// Returns the 'fingerprint' Output of this 'DatasetFingerprint' operation. + pub fn fingerprint(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input_dataset' Input of this 'DatasetFingerprint' operation. + pub fn input_dataset(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: DatasetFingerprintInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `DatasetFingerprint::new().build(input_dataset, scope)`. +pub fn dataset_fingerprint>( + input_dataset: O0, + scope: &mut crate::Scope, +) -> crate::Result { + DatasetFingerprint::new().build(input_dataset, scope) +} + /// Builder for the `DatasetFromGraph` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct DatasetFromGraph { @@ -54890,6 +56551,295 @@ pub fn dynamic_enqueue_tpuembedding_arbitrary_tensor_batch< ) } +/// Builder for the `DynamicEnqueueTPUEmbeddingRaggedTensorBatch` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct DynamicEnqueueTPUEmbeddingRaggedTensorBatch { + T1: ::std::option::Option, + T2: ::std::option::Option, + T3: ::std::option::Option, + N: ::std::option::Option, + combiners: ::std::option::Option<::std::vec::Vec<::std::string::String>>, + table_ids: ::std::option::Option<::std::vec::Vec>, + max_sequence_lengths: ::std::option::Option<::std::vec::Vec>, + num_features: ::std::option::Option<::std::vec::Vec>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'DynamicEnqueueTPUEmbeddingRaggedTensorBatch' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct DynamicEnqueueTPUEmbeddingRaggedTensorBatchInst { + /// An instance of a fully built DynamicEnqueueTPUEmbeddingRaggedTensorBatch Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl DynamicEnqueueTPUEmbeddingRaggedTensorBatch { + /// Creates a new `DynamicEnqueueTPUEmbeddingRaggedTensorBatch`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `T1` attribute. + pub fn T1>(mut self, value: ArgType) -> Self { + self.T1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `T2` attribute. + pub fn T2>(mut self, value: ArgType) -> Self { + self.T2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `T3` attribute. + pub fn T3>(mut self, value: ArgType) -> Self { + self.T3 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `N` attribute. + pub fn N>(mut self, value: ArgType) -> Self { + self.N = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `combiners` attribute. + pub fn combiners>>( + mut self, + value: ArgType, + ) -> Self { + self.combiners = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_ids` attribute. + pub fn table_ids>>( + mut self, + value: ArgType, + ) -> Self { + self.table_ids = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_sequence_lengths` attribute. + pub fn max_sequence_lengths>>( + mut self, + value: ArgType, + ) -> Self { + self.max_sequence_lengths = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_features` attribute. + pub fn num_features>>( + mut self, + value: ArgType, + ) -> Self { + self.num_features = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `DynamicEnqueueTPUEmbeddingRaggedTensorBatch` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + >( + &self, + sample_splits: O0, + embedding_indices: O1, + aggregation_weights: O2, + mode_override: O3, + device_ordinal: O4, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + sample_splits.into(), + embedding_indices.into(), + aggregation_weights.into(), + mode_override.into(), + device_ordinal.into(), + scope, + ) + } + fn build_impl( + &self, + sample_splits: crate::Output, + embedding_indices: crate::Output, + aggregation_weights: crate::Output, + mode_override: crate::Output, + device_ordinal: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("DynamicEnqueueTPUEmbeddingRaggedTensorBatch", |nd| { + nd.add_input(sample_splits); + nd.add_input(embedding_indices); + nd.add_input(aggregation_weights); + nd.add_input(mode_override); + nd.add_input(device_ordinal); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.T1 { + nd.set_attr_type("T1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.T2 { + nd.set_attr_type("T2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.T3 { + nd.set_attr_type("T3", *value)?; + } + if let ::std::option::Option::Some(value) = &self.N { + nd.set_attr_int("N", *value)?; + } + if let ::std::option::Option::Some(value) = &self.combiners { + nd.set_attr_string_list("combiners", value)?; + } + if let ::std::option::Option::Some(value) = &self.table_ids { + nd.set_attr_int_list("table_ids", value)?; + } + if let ::std::option::Option::Some(value) = &self.max_sequence_lengths { + nd.set_attr_int_list("max_sequence_lengths", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_features { + nd.set_attr_int_list("num_features", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'DynamicEnqueueTPUEmbeddingRaggedTensorBatch' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + sample_splits: Vec, + embedding_indices: Vec, + aggregation_weights: Vec, + mode_override: crate::Output, + device_ordinal: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("DynamicEnqueueTPUEmbeddingRaggedTensorBatch", |builder| { + builder.add_input_list(&sample_splits); + builder.add_input_list(&embedding_indices); + builder.add_input_list(&aggregation_weights); + builder.add_input(mode_override); + builder.add_input(device_ordinal); + if let ::std::option::Option::Some(value) = &self.T1 { + builder.set_attr_type("T1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.T2 { + builder.set_attr_type("T2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.T3 { + builder.set_attr_type("T3", *value)?; + } + builder.set_attr_int("N", sample_splits.clone().len() as i64)?; + if let ::std::option::Option::Some(value) = &self.combiners { + builder.set_attr_string_list("combiners", value)?; + } + if let ::std::option::Option::Some(value) = &self.table_ids { + builder.set_attr_int_list("table_ids", value)?; + } + if let ::std::option::Option::Some(value) = &self.max_sequence_lengths { + builder.set_attr_int_list("max_sequence_lengths", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_features { + builder.set_attr_int_list("num_features", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(DynamicEnqueueTPUEmbeddingRaggedTensorBatchInst { op }) + } +} +impl DynamicEnqueueTPUEmbeddingRaggedTensorBatchInst { + /// Returns a Vector of sample_splits for 'sample_splits' Input of this DynamicEnqueueTPUEmbeddingRaggedTensorBatch operation. + pub fn sample_splits(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 0..self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of embedding_indices for 'embedding_indices' Input of this DynamicEnqueueTPUEmbeddingRaggedTensorBatch operation. + pub fn embedding_indices(&self) -> crate::Result> { + let dynamic_offset = (self.op.get_attr_int("N")? + 1) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of aggregation_weights for 'aggregation_weights' Input of this DynamicEnqueueTPUEmbeddingRaggedTensorBatch operation. + pub fn aggregation_weights(&self) -> crate::Result> { + let dynamic_offset = (2 * self.op.get_attr_int("N")? + 2) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns the 'mode_override' Input of this 'DynamicEnqueueTPUEmbeddingRaggedTensorBatch' operation. + pub fn mode_override(&self) -> crate::Result { + let dynamic_offset = (3 * self.op.get_attr_int("N")? + 3) as i32; + Ok(crate::Input { + operation: &self.op, + index: dynamic_offset, + }) + } + /// Returns the 'device_ordinal' Input of this 'DynamicEnqueueTPUEmbeddingRaggedTensorBatch' operation. + pub fn device_ordinal(&self) -> crate::Result { + let dynamic_offset = (3 * self.op.get_attr_int("N")? + 4) as i32; + Ok(crate::Input { + operation: &self.op, + index: dynamic_offset, + }) + } +} +impl From for crate::Operation { + fn from(inst: DynamicEnqueueTPUEmbeddingRaggedTensorBatchInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `DynamicEnqueueTPUEmbeddingRaggedTensorBatch::new().build(sample_splits, embedding_indices, aggregation_weights, mode_override, device_ordinal, scope)`. +pub fn dynamic_enqueue_tpuembedding_ragged_tensor_batch< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, +>( + sample_splits: O0, + embedding_indices: O1, + aggregation_weights: O2, + mode_override: O3, + device_ordinal: O4, + scope: &mut crate::Scope, +) -> crate::Result { + DynamicEnqueueTPUEmbeddingRaggedTensorBatch::new().build( + sample_splits, + embedding_indices, + aggregation_weights, + mode_override, + device_ordinal, + scope, + ) +} + /// Builder for the `DynamicPartition` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct DynamicPartition { @@ -67015,6 +68965,144 @@ pub fn fft3_d>( FFT3D::new().build(input, scope) } +/// Builder for the `FFTND` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct FFTND { + Tcomplex: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'FFTND' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct FFTNDInst { + /// An instance of a fully built FFTND Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl FFTND { + /// Creates a new `FFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `Tcomplex` attribute. + pub fn Tcomplex>( + mut self, + value: ArgType, + ) -> Self { + self.Tcomplex = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `FFTND` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(input.into(), fft_length.into(), axes.into(), scope) + } + fn build_impl( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("FFTND", |nd| { + nd.add_input(input); + nd.add_input(fft_length); + nd.add_input(axes); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.Tcomplex { + nd.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'FFTND' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("FFTND", |builder| { + builder.add_input(input); + builder.add_input(fft_length); + builder.add_input(axes); + if let ::std::option::Option::Some(value) = &self.Tcomplex { + builder.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(FFTNDInst { op }) + } +} +impl FFTNDInst { + /// Returns the 'output' Output of this 'FFTND' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input' Input of this 'FFTND' operation. + pub fn input(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'fft_length' Input of this 'FFTND' operation. + pub fn fft_length(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'axes' Input of this 'FFTND' operation. + pub fn axes(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } +} +impl From for crate::Operation { + fn from(inst: FFTNDInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `FFTND::new().build(input, fft_length, axes, scope)`. +pub fn fftnd< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, +) -> crate::Result { + FFTND::new().build(input, fft_length, axes, scope) +} + /// Builder for the `FIFOQueue` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct FIFOQueue { @@ -69286,6 +71374,120 @@ pub fn finalize_tpuembedding< FinalizeTPUEmbedding::new().build(common_config, memory_config, scope) } +/// Builder for the `FinalizeTPUEmbeddingV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct FinalizeTPUEmbeddingV2 { + control_inputs: ::std::vec::Vec, +} +/// An instance of 'FinalizeTPUEmbeddingV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct FinalizeTPUEmbeddingV2Inst { + /// An instance of a fully built FinalizeTPUEmbeddingV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl FinalizeTPUEmbeddingV2 { + /// Creates a new `FinalizeTPUEmbeddingV2`. + pub fn new() -> Self { + Self::default() + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `FinalizeTPUEmbeddingV2` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + >( + &self, + common_config: O0, + memory_config: O1, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(common_config.into(), memory_config.into(), scope) + } + fn build_impl( + &self, + common_config: crate::Output, + memory_config: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("FinalizeTPUEmbeddingV2", |nd| { + nd.add_input(common_config); + nd.add_input(memory_config); + for op in &self.control_inputs { + nd.add_control_input(op); + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'FinalizeTPUEmbeddingV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + common_config: crate::Output, + memory_config: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("FinalizeTPUEmbeddingV2", |builder| { + builder.add_input(common_config); + builder.add_input(memory_config); + ::std::result::Result::Ok(()) + })?; + Ok(FinalizeTPUEmbeddingV2Inst { op }) + } +} +impl FinalizeTPUEmbeddingV2Inst { + /// Returns the 'embedding_partitions' Output of this 'FinalizeTPUEmbeddingV2' operation. + pub fn embedding_partitions(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'hbm_buffers_config' Output of this 'FinalizeTPUEmbeddingV2' operation. + pub fn hbm_buffers_config(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'common_config' Input of this 'FinalizeTPUEmbeddingV2' operation. + pub fn common_config(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'memory_config' Input of this 'FinalizeTPUEmbeddingV2' operation. + pub fn memory_config(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } +} +impl From for crate::Operation { + fn from(inst: FinalizeTPUEmbeddingV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `FinalizeTPUEmbeddingV2::new().build(common_config, memory_config, scope)`. +pub fn finalize_tpuembedding_v2< + O0: ::std::convert::Into, + O1: ::std::convert::Into, +>( + common_config: O0, + memory_config: O1, + scope: &mut crate::Scope, +) -> crate::Result { + FinalizeTPUEmbeddingV2::new().build(common_config, memory_config, scope) +} + /// Builder for the `Fingerprint` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct Fingerprint { @@ -74750,6 +76952,7 @@ pub fn gather, O1: ::std::convert::Into< pub struct GatherNd { Tparams: ::std::option::Option, Tindices: ::std::option::Option, + bad_indices_policy: ::std::option::Option<::std::string::String>, control_inputs: ::std::vec::Vec, } /// An instance of 'GatherNd' Operation with it's Outputs and Inputs exposed as methods. @@ -74783,6 +76986,15 @@ impl GatherNd { self } + /// Sets the `bad_indices_policy` attribute. + pub fn bad_indices_policy>( + mut self, + value: ArgType, + ) -> Self { + self.bad_indices_policy = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -74819,6 +77031,9 @@ impl GatherNd { if let ::std::option::Option::Some(value) = &self.Tindices { nd.set_attr_type("Tindices", *value)?; } + if let ::std::option::Option::Some(value) = &self.bad_indices_policy { + nd.set_attr_string("bad_indices_policy", value)?; + } ::std::result::Result::Ok(()) }) } @@ -74839,6 +77054,9 @@ impl GatherNd { if let ::std::option::Option::Some(value) = &self.Tindices { builder.set_attr_type("Tindices", *value)?; } + if let ::std::option::Option::Some(value) = &self.bad_indices_policy { + builder.set_attr_string("bad_indices_policy", value)?; + } ::std::result::Result::Ok(()) })?; Ok(GatherNdInst { op }) @@ -75852,6 +78070,642 @@ pub fn get_element_at_index< GetElementAtIndex::new().build(dataset, index, scope) } +/// Builder for the `GetMinibatchSplitsWithPhysicalReplica` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct GetMinibatchSplitsWithPhysicalReplica { + sample_count: ::std::option::Option, + num_replica: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + mini_batch_splits: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'GetMinibatchSplitsWithPhysicalReplica' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct GetMinibatchSplitsWithPhysicalReplicaInst { + /// An instance of a fully built GetMinibatchSplitsWithPhysicalReplica Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl GetMinibatchSplitsWithPhysicalReplica { + /// Creates a new `GetMinibatchSplitsWithPhysicalReplica`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `mini_batch_splits` attribute. + pub fn mini_batch_splits>( + mut self, + value: ArgType, + ) -> Self { + self.mini_batch_splits = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `GetMinibatchSplitsWithPhysicalReplica` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + program_key: O0, + row_ids: O1, + col_ids: O2, + gains: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + program_key.into(), + row_ids.into(), + col_ids.into(), + gains.into(), + scope, + ) + } + fn build_impl( + &self, + program_key: crate::Output, + row_ids: crate::Output, + col_ids: crate::Output, + gains: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("GetMinibatchSplitsWithPhysicalReplica", |nd| { + nd.add_input(program_key); + nd.add_input(row_ids); + nd.add_input(col_ids); + nd.add_input(gains); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.sample_count { + nd.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + nd.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + nd.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + nd.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.mini_batch_splits { + nd.set_attr_string("mini_batch_splits", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'GetMinibatchSplitsWithPhysicalReplica' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + program_key: crate::Output, + row_ids: crate::Output, + col_ids: crate::Output, + gains: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("GetMinibatchSplitsWithPhysicalReplica", |builder| { + builder.add_input(program_key); + builder.add_input(row_ids); + builder.add_input(col_ids); + builder.add_input(gains); + if let ::std::option::Option::Some(value) = &self.sample_count { + builder.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + builder.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + builder.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + builder.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.mini_batch_splits { + builder.set_attr_string("mini_batch_splits", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(GetMinibatchSplitsWithPhysicalReplicaInst { op }) + } +} +impl GetMinibatchSplitsWithPhysicalReplicaInst { + /// Returns the 'sorted_row_ids' Output of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn sorted_row_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'sorted_col_ids' Output of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn sorted_col_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'sorted_gains' Output of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn sorted_gains(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'splits' Output of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn splits(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 3, + } + } + /// Returns the 'id_counts' Output of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn id_counts(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 4, + } + } + /// Returns the 'max_ids' Output of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn max_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 5, + } + } + /// Returns the 'max_uniques' Output of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn max_uniques(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 6, + } + } + /// Returns the 'program_key' Input of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn program_key(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'row_ids' Input of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn row_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'col_ids' Input of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn col_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'gains' Input of this 'GetMinibatchSplitsWithPhysicalReplica' operation. + pub fn gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: GetMinibatchSplitsWithPhysicalReplicaInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `GetMinibatchSplitsWithPhysicalReplica::new().build(program_key, row_ids, col_ids, gains, scope)`. +pub fn get_minibatch_splits_with_physical_replica< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + program_key: O0, + row_ids: O1, + col_ids: O2, + gains: O3, + scope: &mut crate::Scope, +) -> crate::Result { + GetMinibatchSplitsWithPhysicalReplica::new().build(program_key, row_ids, col_ids, gains, scope) +} + +/// Builder for the `GetMinibatchesInCsrWithPhysicalReplica` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct GetMinibatchesInCsrWithPhysicalReplica { + sample_count: ::std::option::Option, + num_replica: ::std::option::Option, + max_minibatches_per_sc: ::std::option::Option, + max_ids_per_chip_per_sample: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + mini_batch_in_csr: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'GetMinibatchesInCsrWithPhysicalReplica' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct GetMinibatchesInCsrWithPhysicalReplicaInst { + /// An instance of a fully built GetMinibatchesInCsrWithPhysicalReplica Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl GetMinibatchesInCsrWithPhysicalReplica { + /// Creates a new `GetMinibatchesInCsrWithPhysicalReplica`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_minibatches_per_sc` attribute. + pub fn max_minibatches_per_sc>( + mut self, + value: ArgType, + ) -> Self { + self.max_minibatches_per_sc = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_chip_per_sample` attribute. + pub fn max_ids_per_chip_per_sample>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_chip_per_sample = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `mini_batch_in_csr` attribute. + pub fn mini_batch_in_csr>( + mut self, + value: ArgType, + ) -> Self { + self.mini_batch_in_csr = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `GetMinibatchesInCsrWithPhysicalReplica` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + >( + &self, + program_key: O0, + row_ids: O1, + col_ids: O2, + gains: O3, + splits: O4, + id_counts: O5, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + program_key.into(), + row_ids.into(), + col_ids.into(), + gains.into(), + splits.into(), + id_counts.into(), + scope, + ) + } + fn build_impl( + &self, + program_key: crate::Output, + row_ids: crate::Output, + col_ids: crate::Output, + gains: crate::Output, + splits: crate::Output, + id_counts: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("GetMinibatchesInCsrWithPhysicalReplica", |nd| { + nd.add_input(program_key); + nd.add_input(row_ids); + nd.add_input(col_ids); + nd.add_input(gains); + nd.add_input(splits); + nd.add_input(id_counts); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.sample_count { + nd.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + nd.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_minibatches_per_sc { + nd.set_attr_int("max_minibatches_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_chip_per_sample { + nd.set_attr_int("max_ids_per_chip_per_sample", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + nd.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + nd.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.mini_batch_in_csr { + nd.set_attr_string("mini_batch_in_csr", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'GetMinibatchesInCsrWithPhysicalReplica' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + program_key: crate::Output, + row_ids: crate::Output, + col_ids: crate::Output, + gains: crate::Output, + splits: crate::Output, + id_counts: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("GetMinibatchesInCsrWithPhysicalReplica", |builder| { + builder.add_input(program_key); + builder.add_input(row_ids); + builder.add_input(col_ids); + builder.add_input(gains); + builder.add_input(splits); + builder.add_input(id_counts); + if let ::std::option::Option::Some(value) = &self.sample_count { + builder.set_attr_int("sample_count", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + builder.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_minibatches_per_sc { + builder.set_attr_int("max_minibatches_per_sc", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_chip_per_sample { + builder.set_attr_int("max_ids_per_chip_per_sample", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + builder.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + builder.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.mini_batch_in_csr { + builder.set_attr_string("mini_batch_in_csr", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(GetMinibatchesInCsrWithPhysicalReplicaInst { op }) + } +} +impl GetMinibatchesInCsrWithPhysicalReplicaInst { + /// Returns the 'row_pointers' Output of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn row_pointers(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Output of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn sorted_sample_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'sorted_token_ids' Output of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn sorted_token_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'sorted_gains' Output of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn sorted_gains(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 3, + } + } + /// Returns the 'row_pointers_unpadded_size' Output of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn row_pointers_unpadded_size(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 4, + } + } + /// Returns the 'ids_unpadded_size' Output of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn ids_unpadded_size(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 5, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Output of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 6, + } + } + /// Returns the 'program_key' Input of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn program_key(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'row_ids' Input of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn row_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'col_ids' Input of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn col_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'gains' Input of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'splits' Input of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn splits(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'id_counts' Input of this 'GetMinibatchesInCsrWithPhysicalReplica' operation. + pub fn id_counts(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } +} +impl From for crate::Operation { + fn from(inst: GetMinibatchesInCsrWithPhysicalReplicaInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `GetMinibatchesInCsrWithPhysicalReplica::new().build(program_key, row_ids, col_ids, gains, splits, id_counts, scope)`. +pub fn get_minibatches_in_csr_with_physical_replica< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, +>( + program_key: O0, + row_ids: O1, + col_ids: O2, + gains: O3, + splits: O4, + id_counts: O5, + scope: &mut crate::Scope, +) -> crate::Result { + GetMinibatchesInCsrWithPhysicalReplica::new().build( + program_key, + row_ids, + col_ids, + gains, + splits, + id_counts, + scope, + ) +} + /// Builder for the `GetOptions` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct GetOptions { @@ -76243,6 +79097,601 @@ pub fn get_session_tensor>( GetSessionTensor::new().build(handle, scope) } +/// Builder for the `GetStatsFromListOfSparseCoreCooTensors` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct GetStatsFromListOfSparseCoreCooTensors { + sample_count_list: ::std::option::Option<::std::vec::Vec>, + col_offset_list: ::std::option::Option<::std::vec::Vec>, + num_replica: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + N: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'GetStatsFromListOfSparseCoreCooTensors' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct GetStatsFromListOfSparseCoreCooTensorsInst { + /// An instance of a fully built GetStatsFromListOfSparseCoreCooTensors Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl GetStatsFromListOfSparseCoreCooTensors { + /// Creates a new `GetStatsFromListOfSparseCoreCooTensors`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count_list` attribute. + pub fn sample_count_list>>( + mut self, + value: ArgType, + ) -> Self { + self.sample_count_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_offset_list` attribute. + pub fn col_offset_list>>( + mut self, + value: ArgType, + ) -> Self { + self.col_offset_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `N` attribute. + pub fn N>(mut self, value: ArgType) -> Self { + self.N = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `GetStatsFromListOfSparseCoreCooTensors` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + row_ids_list: O0, + col_ids_list: O1, + gains_list: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_ids_list.into(), + col_ids_list.into(), + gains_list.into(), + scope, + ) + } + fn build_impl( + &self, + row_ids_list: crate::Output, + col_ids_list: crate::Output, + gains_list: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("GetStatsFromListOfSparseCoreCooTensors", |nd| { + nd.add_input(row_ids_list); + nd.add_input(col_ids_list); + nd.add_input(gains_list); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.sample_count_list { + nd.set_attr_int_list("sample_count_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset_list { + nd.set_attr_int_list("col_offset_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + nd.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + nd.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + nd.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.N { + nd.set_attr_int("N", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'GetStatsFromListOfSparseCoreCooTensors' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_ids_list: Vec, + col_ids_list: Vec, + gains_list: Vec, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("GetStatsFromListOfSparseCoreCooTensors", |builder| { + builder.add_input_list(&row_ids_list); + builder.add_input_list(&col_ids_list); + builder.add_input_list(&gains_list); + if let ::std::option::Option::Some(value) = &self.sample_count_list { + builder.set_attr_int_list("sample_count_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset_list { + builder.set_attr_int_list("col_offset_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + builder.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + builder.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + builder.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + builder.set_attr_int("N", row_ids_list.clone().len() as i64)?; + ::std::result::Result::Ok(()) + })?; + Ok(GetStatsFromListOfSparseCoreCooTensorsInst { op }) + } +} +impl GetStatsFromListOfSparseCoreCooTensorsInst { + /// Returns the 'max_ids_per_sparse_core' Output of this 'GetStatsFromListOfSparseCoreCooTensors' operation. + pub fn max_ids_per_sparse_core(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'max_unique_ids_per_sparse_core' Output of this 'GetStatsFromListOfSparseCoreCooTensors' operation. + pub fn max_unique_ids_per_sparse_core(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns a Vector of row_ids_list for 'row_ids_list' Input of this GetStatsFromListOfSparseCoreCooTensors operation. + pub fn row_ids_list(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 0..self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of col_ids_list for 'col_ids_list' Input of this GetStatsFromListOfSparseCoreCooTensors operation. + pub fn col_ids_list(&self) -> crate::Result> { + let dynamic_offset = (self.op.get_attr_int("N")? + 1) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of gains_list for 'gains_list' Input of this GetStatsFromListOfSparseCoreCooTensors operation. + pub fn gains_list(&self) -> crate::Result> { + let dynamic_offset = (2 * self.op.get_attr_int("N")? + 2) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } +} +impl From for crate::Operation { + fn from(inst: GetStatsFromListOfSparseCoreCooTensorsInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `GetStatsFromListOfSparseCoreCooTensors::new().build(row_ids_list, col_ids_list, gains_list, scope)`. +pub fn get_stats_from_list_of_sparse_core_coo_tensors< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + row_ids_list: O0, + col_ids_list: O1, + gains_list: O2, + scope: &mut crate::Scope, +) -> crate::Result { + GetStatsFromListOfSparseCoreCooTensors::new().build( + row_ids_list, + col_ids_list, + gains_list, + scope, + ) +} + +/// Builder for the `GetTpuTaskId` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct GetTpuTaskId { + control_inputs: ::std::vec::Vec, +} +/// An instance of 'GetTpuTaskId' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct GetTpuTaskIdInst { + /// An instance of a fully built GetTpuTaskId Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl GetTpuTaskId { + /// Creates a new `GetTpuTaskId`. + pub fn new() -> Self { + Self::default() + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `GetTpuTaskId` operation. + pub fn build(&self, scope: &mut crate::Scope) -> crate::Result { + self.build_impl(scope) + } + fn build_impl(&self, scope: &mut crate::Scope) -> crate::Result { + scope.new_operation("GetTpuTaskId", |nd| { + for op in &self.control_inputs { + nd.add_control_input(op); + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'GetTpuTaskId' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance(&self, scope: &mut crate::Scope) -> crate::Result { + let op = scope.new_operation("GetTpuTaskId", |builder| ::std::result::Result::Ok(()))?; + Ok(GetTpuTaskIdInst { op }) + } +} +impl GetTpuTaskIdInst { + /// Returns the 'tpu_task_id' Output of this 'GetTpuTaskId' operation. + pub fn tpu_task_id(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: GetTpuTaskIdInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `GetTpuTaskId::new().build(scope)`. +pub fn get_tpu_task_id(scope: &mut crate::Scope) -> crate::Result { + GetTpuTaskId::new().build(scope) +} + +/// Builder for the `GlobalIterId` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct GlobalIterId { + control_inputs: ::std::vec::Vec, +} +/// An instance of 'GlobalIterId' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct GlobalIterIdInst { + /// An instance of a fully built GlobalIterId Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl GlobalIterId { + /// Creates a new `GlobalIterId`. + pub fn new() -> Self { + Self::default() + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `GlobalIterId` operation. + pub fn build(&self, scope: &mut crate::Scope) -> crate::Result { + self.build_impl(scope) + } + fn build_impl(&self, scope: &mut crate::Scope) -> crate::Result { + scope.new_operation("GlobalIterId", |nd| { + for op in &self.control_inputs { + nd.add_control_input(op); + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'GlobalIterId' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance(&self, scope: &mut crate::Scope) -> crate::Result { + let op = scope.new_operation("GlobalIterId", |builder| ::std::result::Result::Ok(()))?; + Ok(GlobalIterIdInst { op }) + } +} +impl GlobalIterIdInst { + /// Returns the 'iter_id' Output of this 'GlobalIterId' operation. + pub fn iter_id(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: GlobalIterIdInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `GlobalIterId::new().build(scope)`. +pub fn global_iter_id(scope: &mut crate::Scope) -> crate::Result { + GlobalIterId::new().build(scope) +} + +/// Builder for the `GlobalShuffleDataset` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct GlobalShuffleDataset { + reshuffle_each_iteration: ::std::option::Option, + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + metadata: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'GlobalShuffleDataset' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct GlobalShuffleDatasetInst { + /// An instance of a fully built GlobalShuffleDataset Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl GlobalShuffleDataset { + /// Creates a new `GlobalShuffleDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `reshuffle_each_iteration` attribute. + pub fn reshuffle_each_iteration>( + mut self, + value: ArgType, + ) -> Self { + self.reshuffle_each_iteration = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `GlobalShuffleDataset` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + input_dataset: O0, + seed: O1, + seed2: O2, + seed_generator: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + input_dataset.into(), + seed.into(), + seed2.into(), + seed_generator.into(), + scope, + ) + } + fn build_impl( + &self, + input_dataset: crate::Output, + seed: crate::Output, + seed2: crate::Output, + seed_generator: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("GlobalShuffleDataset", |nd| { + nd.add_input(input_dataset); + nd.add_input(seed); + nd.add_input(seed2); + nd.add_input(seed_generator); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.reshuffle_each_iteration { + nd.set_attr_bool("reshuffle_each_iteration", *value)?; + } + if let ::std::option::Option::Some(value) = &self.output_types { + nd.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + nd.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + nd.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'GlobalShuffleDataset' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input_dataset: crate::Output, + seed: crate::Output, + seed2: crate::Output, + seed_generator: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("GlobalShuffleDataset", |builder| { + builder.add_input(input_dataset); + builder.add_input(seed); + builder.add_input(seed2); + builder.add_input(seed_generator); + if let ::std::option::Option::Some(value) = &self.reshuffle_each_iteration { + builder.set_attr_bool("reshuffle_each_iteration", *value)?; + } + if let ::std::option::Option::Some(value) = &self.output_types { + builder.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + builder.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + builder.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(GlobalShuffleDatasetInst { op }) + } +} +impl GlobalShuffleDatasetInst { + /// Returns the 'handle' Output of this 'GlobalShuffleDataset' operation. + pub fn handle(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input_dataset' Input of this 'GlobalShuffleDataset' operation. + pub fn input_dataset(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'seed' Input of this 'GlobalShuffleDataset' operation. + pub fn seed(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'seed2' Input of this 'GlobalShuffleDataset' operation. + pub fn seed2(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'seed_generator' Input of this 'GlobalShuffleDataset' operation. + pub fn seed_generator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: GlobalShuffleDatasetInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `GlobalShuffleDataset::new().build(input_dataset, seed, seed2, seed_generator, scope)`. +pub fn global_shuffle_dataset< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + input_dataset: O0, + seed: O1, + seed2: O2, + seed_generator: O3, + scope: &mut crate::Scope, +) -> crate::Result { + GlobalShuffleDataset::new().build(input_dataset, seed, seed2, seed_generator, scope) +} + /// Builder for the `Greater` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct Greater { @@ -78278,6 +81727,144 @@ pub fn ifft3_d>( IFFT3D::new().build(input, scope) } +/// Builder for the `IFFTND` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct IFFTND { + Tcomplex: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'IFFTND' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct IFFTNDInst { + /// An instance of a fully built IFFTND Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl IFFTND { + /// Creates a new `IFFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `Tcomplex` attribute. + pub fn Tcomplex>( + mut self, + value: ArgType, + ) -> Self { + self.Tcomplex = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `IFFTND` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(input.into(), fft_length.into(), axes.into(), scope) + } + fn build_impl( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("IFFTND", |nd| { + nd.add_input(input); + nd.add_input(fft_length); + nd.add_input(axes); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.Tcomplex { + nd.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'IFFTND' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("IFFTND", |builder| { + builder.add_input(input); + builder.add_input(fft_length); + builder.add_input(axes); + if let ::std::option::Option::Some(value) = &self.Tcomplex { + builder.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(IFFTNDInst { op }) + } +} +impl IFFTNDInst { + /// Returns the 'output' Output of this 'IFFTND' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input' Input of this 'IFFTND' operation. + pub fn input(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'fft_length' Input of this 'IFFTND' operation. + pub fn fft_length(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'axes' Input of this 'IFFTND' operation. + pub fn axes(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } +} +impl From for crate::Operation { + fn from(inst: IFFTNDInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `IFFTND::new().build(input, fft_length, axes, scope)`. +pub fn ifftnd< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, +) -> crate::Result { + IFFTND::new().build(input, fft_length, axes, scope) +} + /// Builder for the `IRFFT` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct IRFFT { @@ -78683,6 +82270,157 @@ pub fn irfft3_d< IRFFT3D::new().build(input, fft_length, scope) } +/// Builder for the `IRFFTND` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct IRFFTND { + Treal: ::std::option::Option, + Tcomplex: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'IRFFTND' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct IRFFTNDInst { + /// An instance of a fully built IRFFTND Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl IRFFTND { + /// Creates a new `IRFFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `Treal` attribute. + pub fn Treal>(mut self, value: ArgType) -> Self { + self.Treal = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tcomplex` attribute. + pub fn Tcomplex>( + mut self, + value: ArgType, + ) -> Self { + self.Tcomplex = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `IRFFTND` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(input.into(), fft_length.into(), axes.into(), scope) + } + fn build_impl( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("IRFFTND", |nd| { + nd.add_input(input); + nd.add_input(fft_length); + nd.add_input(axes); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.Treal { + nd.set_attr_type("Treal", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tcomplex { + nd.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'IRFFTND' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("IRFFTND", |builder| { + builder.add_input(input); + builder.add_input(fft_length); + builder.add_input(axes); + if let ::std::option::Option::Some(value) = &self.Treal { + builder.set_attr_type("Treal", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tcomplex { + builder.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(IRFFTNDInst { op }) + } +} +impl IRFFTNDInst { + /// Returns the 'output' Output of this 'IRFFTND' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input' Input of this 'IRFFTND' operation. + pub fn input(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'fft_length' Input of this 'IRFFTND' operation. + pub fn fft_length(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'axes' Input of this 'IRFFTND' operation. + pub fn axes(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } +} +impl From for crate::Operation { + fn from(inst: IRFFTNDInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `IRFFTND::new().build(input, fft_length, axes, scope)`. +pub fn irfftnd< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, +) -> crate::Result { + IRFFTND::new().build(input, fft_length, axes, scope) +} + /// Builder for the `Identity` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct Identity { @@ -80858,6 +84596,267 @@ pub fn in_top_kv2< InTopKV2::new().build(predictions, targets, k, scope) } +/// Builder for the `IndexFlatMapDataset` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct IndexFlatMapDataset { + map_func: ::std::option::Option<::std::string::String>, + index_map_func: ::std::option::Option<::std::string::String>, + Tmap_func_args: ::std::option::Option<::std::vec::Vec>, + Tindex_map_func_args: ::std::option::Option<::std::vec::Vec>, + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + metadata: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'IndexFlatMapDataset' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct IndexFlatMapDatasetInst { + /// An instance of a fully built IndexFlatMapDataset Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl IndexFlatMapDataset { + /// Creates a new `IndexFlatMapDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `map_func` attribute. + pub fn map_func>( + mut self, + value: ArgType, + ) -> Self { + self.map_func = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `index_map_func` attribute. + pub fn index_map_func>( + mut self, + value: ArgType, + ) -> Self { + self.index_map_func = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tmap_func_args` attribute. + pub fn Tmap_func_args>>( + mut self, + value: ArgType, + ) -> Self { + self.Tmap_func_args = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tindex_map_func_args` attribute. + pub fn Tindex_map_func_args>>( + mut self, + value: ArgType, + ) -> Self { + self.Tindex_map_func_args = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `IndexFlatMapDataset` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + input_dataset: O0, + map_func_other_args: O1, + index_map_func_other_args: O2, + output_cardinality: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + input_dataset.into(), + map_func_other_args.into(), + index_map_func_other_args.into(), + output_cardinality.into(), + scope, + ) + } + fn build_impl( + &self, + input_dataset: crate::Output, + map_func_other_args: crate::Output, + index_map_func_other_args: crate::Output, + output_cardinality: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("IndexFlatMapDataset", |nd| { + nd.add_input(input_dataset); + nd.add_input(map_func_other_args); + nd.add_input(index_map_func_other_args); + nd.add_input(output_cardinality); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.map_func { + nd.set_attr_string("map_func", value)?; + } + if let ::std::option::Option::Some(value) = &self.index_map_func { + nd.set_attr_string("index_map_func", value)?; + } + if let ::std::option::Option::Some(value) = &self.Tmap_func_args { + nd.set_attr_type_list("Tmap_func_args", value)?; + } + if let ::std::option::Option::Some(value) = &self.Tindex_map_func_args { + nd.set_attr_type_list("Tindex_map_func_args", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_types { + nd.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + nd.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + nd.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'IndexFlatMapDataset' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input_dataset: crate::Output, + map_func_other_args: crate::Output, + index_map_func_other_args: crate::Output, + output_cardinality: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("IndexFlatMapDataset", |builder| { + builder.add_input(input_dataset); + builder.add_input(map_func_other_args); + builder.add_input(index_map_func_other_args); + builder.add_input(output_cardinality); + if let ::std::option::Option::Some(value) = &self.map_func { + builder.set_attr_string("map_func", value)?; + } + if let ::std::option::Option::Some(value) = &self.index_map_func { + builder.set_attr_string("index_map_func", value)?; + } + if let ::std::option::Option::Some(value) = &self.Tmap_func_args { + builder.set_attr_type_list("Tmap_func_args", value)?; + } + if let ::std::option::Option::Some(value) = &self.Tindex_map_func_args { + builder.set_attr_type_list("Tindex_map_func_args", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_types { + builder.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + builder.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + builder.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(IndexFlatMapDatasetInst { op }) + } +} +impl IndexFlatMapDatasetInst { + /// Returns the 'handle' Output of this 'IndexFlatMapDataset' operation. + pub fn handle(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input_dataset' Input of this 'IndexFlatMapDataset' operation. + pub fn input_dataset(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'map_func_other_args' Input of this 'IndexFlatMapDataset' operation. + pub fn map_func_other_args(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'index_map_func_other_args' Input of this 'IndexFlatMapDataset' operation. + pub fn index_map_func_other_args(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'output_cardinality' Input of this 'IndexFlatMapDataset' operation. + pub fn output_cardinality(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: IndexFlatMapDatasetInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `IndexFlatMapDataset::new().build(input_dataset, map_func_other_args, index_map_func_other_args, output_cardinality, scope)`. +pub fn index_flat_map_dataset< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + input_dataset: O0, + map_func_other_args: O1, + index_map_func_other_args: O2, + output_cardinality: O3, + scope: &mut crate::Scope, +) -> crate::Result { + IndexFlatMapDataset::new().build( + input_dataset, + map_func_other_args, + index_map_func_other_args, + output_cardinality, + scope, + ) +} + /// Builder for the `InfeedDequeue` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct InfeedDequeue { @@ -84443,6 +88442,94 @@ pub fn iterator_get_device>( IteratorGetDevice::new().build(resource, scope) } +/// Builder for the `IteratorGetModelProto` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct IteratorGetModelProto { + control_inputs: ::std::vec::Vec, +} +/// An instance of 'IteratorGetModelProto' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct IteratorGetModelProtoInst { + /// An instance of a fully built IteratorGetModelProto Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl IteratorGetModelProto { + /// Creates a new `IteratorGetModelProto`. + pub fn new() -> Self { + Self::default() + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `IteratorGetModelProto` operation. + pub fn build>( + &self, + iterator: O0, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(iterator.into(), scope) + } + fn build_impl( + &self, + iterator: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("IteratorGetModelProto", |nd| { + nd.add_input(iterator); + for op in &self.control_inputs { + nd.add_control_input(op); + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'IteratorGetModelProto' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + iterator: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("IteratorGetModelProto", |builder| { + builder.add_input(iterator); + ::std::result::Result::Ok(()) + })?; + Ok(IteratorGetModelProtoInst { op }) + } +} +impl IteratorGetModelProtoInst { + /// Returns the 'model_proto' Output of this 'IteratorGetModelProto' operation. + pub fn model_proto(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'iterator' Input of this 'IteratorGetModelProto' operation. + pub fn iterator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: IteratorGetModelProtoInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `IteratorGetModelProto::new().build(iterator, scope)`. +pub fn iterator_get_model_proto>( + iterator: O0, + scope: &mut crate::Scope, +) -> crate::Result { + IteratorGetModelProto::new().build(iterator, scope) +} + /// Builder for the `IteratorGetNext` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct IteratorGetNext { @@ -88476,6 +92563,126 @@ pub fn list_diff< ListDiff::new().build(x, y, scope) } +/// Builder for the `ListSnapshotChunksDataset` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct ListSnapshotChunksDataset { + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'ListSnapshotChunksDataset' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct ListSnapshotChunksDatasetInst { + /// An instance of a fully built ListSnapshotChunksDataset Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl ListSnapshotChunksDataset { + /// Creates a new `ListSnapshotChunksDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `ListSnapshotChunksDataset` operation. + pub fn build>( + &self, + snapshot_path: O0, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(snapshot_path.into(), scope) + } + fn build_impl( + &self, + snapshot_path: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("ListSnapshotChunksDataset", |nd| { + nd.add_input(snapshot_path); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.output_types { + nd.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + nd.set_attr_shape_list("output_shapes", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'ListSnapshotChunksDataset' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + snapshot_path: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("ListSnapshotChunksDataset", |builder| { + builder.add_input(snapshot_path); + if let ::std::option::Option::Some(value) = &self.output_types { + builder.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + builder.set_attr_shape_list("output_shapes", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(ListSnapshotChunksDatasetInst { op }) + } +} +impl ListSnapshotChunksDatasetInst { + /// Returns the 'handle' Output of this 'ListSnapshotChunksDataset' operation. + pub fn handle(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'snapshot_path' Input of this 'ListSnapshotChunksDataset' operation. + pub fn snapshot_path(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: ListSnapshotChunksDatasetInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `ListSnapshotChunksDataset::new().build(snapshot_path, scope)`. +pub fn list_snapshot_chunks_dataset>( + snapshot_path: O0, + scope: &mut crate::Scope, +) -> crate::Result { + ListSnapshotChunksDataset::new().build(snapshot_path, scope) +} + /// Builder for the `LoadAllTPUEmbeddingParameters` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct LoadAllTPUEmbeddingParameters { @@ -96224,6 +100431,8 @@ pub struct MatMul { transpose_a: ::std::option::Option, transpose_b: ::std::option::Option, T: ::std::option::Option, + grad_a: ::std::option::Option, + grad_b: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'MatMul' Operation with it's Outputs and Inputs exposed as methods. @@ -96257,6 +100466,18 @@ impl MatMul { self } + /// Sets the `grad_a` attribute. + pub fn grad_a>(mut self, value: ArgType) -> Self { + self.grad_a = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `grad_b` attribute. + pub fn grad_b>(mut self, value: ArgType) -> Self { + self.grad_b = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -96296,6 +100517,12 @@ impl MatMul { if let ::std::option::Option::Some(value) = &self.T { nd.set_attr_type("T", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_a { + nd.set_attr_bool("grad_a", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_b { + nd.set_attr_bool("grad_b", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -96319,6 +100546,12 @@ impl MatMul { if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; } + if let ::std::option::Option::Some(value) = &self.grad_a { + builder.set_attr_bool("grad_a", *value)?; + } + if let ::std::option::Option::Some(value) = &self.grad_b { + builder.set_attr_bool("grad_b", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(MatMulInst { op }) @@ -115583,7 +119816,7 @@ impl ParseExampleInst { /// Returns the 'dense_defaults' Input of this 'ParseExample' operation. pub fn dense_defaults(&self) -> crate::Result { let dynamic_offset = - (self.op.get_attr_int("Ndense")? + self.op.get_attr_int("Nsparse")? + 4) as i32; + (self.op.get_attr_int("Nsparse")? + self.op.get_attr_int("Ndense")? + 4) as i32; Ok(crate::Input { operation: &self.op, index: dynamic_offset, @@ -116936,8 +121169,8 @@ impl ParseSequenceExampleInst { } /// Returns the 'feature_list_sparse_values' Output of this 'ParseSequenceExample' operation. pub fn feature_list_sparse_values(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? - + 2 * self.op.get_attr_int("Ncontext_sparse")? + let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? + + self.op.get_attr_int("Nfeature_list_sparse")? + 5) as i32; Ok(crate::Output { operation: self.op.clone(), @@ -116946,8 +121179,8 @@ impl ParseSequenceExampleInst { } /// Returns a Vector of feature_list_sparse_shapes for 'feature_list_sparse_shapes' Output of this ParseSequenceExample operation. pub fn feature_list_sparse_shapes(&self) -> crate::Result> { - let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? - + self.op.get_attr_int("Nfeature_list_sparse")? + let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? + + 2 * self.op.get_attr_int("Ncontext_sparse")? + 6) as i32; let mut Outputs = vec![]; for i in @@ -116972,8 +121205,8 @@ impl ParseSequenceExampleInst { } /// Returns a Vector of feature_list_dense_lengths for 'feature_list_dense_lengths' Output of this ParseSequenceExample operation. pub fn feature_list_dense_lengths(&self) -> crate::Result> { - let dynamic_offset = (2 * self.op.get_attr_int("Nfeature_list_sparse")? - + 2 * self.op.get_attr_int("Ncontext_sparse")? + let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? + + 2 * self.op.get_attr_int("Nfeature_list_sparse")? + 8) as i32; let mut Outputs = vec![]; for i in @@ -117445,8 +121678,8 @@ impl ParseSequenceExampleV2Inst { } /// Returns the 'feature_list_sparse_values' Output of this 'ParseSequenceExampleV2' operation. pub fn feature_list_sparse_values(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? - + 2 * self.op.get_attr_int("Ncontext_sparse")? + let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? + + self.op.get_attr_int("Nfeature_list_sparse")? + 7) as i32; Ok(crate::Output { operation: self.op.clone(), @@ -117455,8 +121688,8 @@ impl ParseSequenceExampleV2Inst { } /// Returns a Vector of feature_list_sparse_shapes for 'feature_list_sparse_shapes' Output of this ParseSequenceExampleV2 operation. pub fn feature_list_sparse_shapes(&self) -> crate::Result> { - let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? - + self.op.get_attr_int("Nfeature_list_sparse")? + let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? + + 2 * self.op.get_attr_int("Ncontext_sparse")? + 8) as i32; let mut Outputs = vec![]; for i in @@ -117471,8 +121704,8 @@ impl ParseSequenceExampleV2Inst { } /// Returns the 'feature_list_dense_values' Output of this 'ParseSequenceExampleV2' operation. pub fn feature_list_dense_values(&self) -> crate::Result { - let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? - + 2 * self.op.get_attr_int("Nfeature_list_sparse")? + let dynamic_offset = (2 * self.op.get_attr_int("Nfeature_list_sparse")? + + 2 * self.op.get_attr_int("Ncontext_sparse")? + 9) as i32; Ok(crate::Output { operation: self.op.clone(), @@ -117497,8 +121730,8 @@ impl ParseSequenceExampleV2Inst { } /// Returns the 'feature_list_ragged_values' Output of this 'ParseSequenceExampleV2' operation. pub fn feature_list_ragged_values(&self) -> crate::Result { - let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? - + 2 * self.op.get_attr_int("Nfeature_list_sparse")? + let dynamic_offset = (2 * self.op.get_attr_int("Nfeature_list_sparse")? + + 2 * self.op.get_attr_int("Ncontext_sparse")? + self.op.get_attr_int("Nfeature_list_dense")? + 11) as i32; Ok(crate::Output { @@ -117508,9 +121741,9 @@ impl ParseSequenceExampleV2Inst { } /// Returns the 'feature_list_ragged_outer_splits' Output of this 'ParseSequenceExampleV2' operation. pub fn feature_list_ragged_outer_splits(&self) -> crate::Result { - let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? + let dynamic_offset = (self.op.get_attr_int("Nfeature_list_dense")? + 2 * self.op.get_attr_int("Nfeature_list_sparse")? - + self.op.get_attr_int("Nfeature_list_dense")? + + 2 * self.op.get_attr_int("Ncontext_sparse")? + 12) as i32; Ok(crate::Output { operation: self.op.clone(), @@ -117519,9 +121752,9 @@ impl ParseSequenceExampleV2Inst { } /// Returns the 'feature_list_ragged_inner_splits' Output of this 'ParseSequenceExampleV2' operation. pub fn feature_list_ragged_inner_splits(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("Nfeature_list_dense")? - + 2 * self.op.get_attr_int("Ncontext_sparse")? + let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? + 2 * self.op.get_attr_int("Nfeature_list_sparse")? + + self.op.get_attr_int("Nfeature_list_dense")? + 13) as i32; Ok(crate::Output { operation: self.op.clone(), @@ -118203,8 +122436,8 @@ impl ParseSingleSequenceExampleInst { } /// Returns the 'feature_list_sparse_values' Output of this 'ParseSingleSequenceExample' operation. pub fn feature_list_sparse_values(&self) -> crate::Result { - let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? - + self.op.get_attr_int("Nfeature_list_sparse")? + let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? + + 2 * self.op.get_attr_int("Ncontext_sparse")? + 5) as i32; Ok(crate::Output { operation: self.op.clone(), @@ -118213,8 +122446,8 @@ impl ParseSingleSequenceExampleInst { } /// Returns a Vector of feature_list_sparse_shapes for 'feature_list_sparse_shapes' Output of this ParseSingleSequenceExample operation. pub fn feature_list_sparse_shapes(&self) -> crate::Result> { - let dynamic_offset = (2 * self.op.get_attr_int("Ncontext_sparse")? - + self.op.get_attr_int("Nfeature_list_sparse")? + let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? + + 2 * self.op.get_attr_int("Ncontext_sparse")? + 6) as i32; let mut Outputs = vec![]; for i in @@ -118292,9 +122525,9 @@ impl ParseSingleSequenceExampleInst { } /// Returns a Vector of feature_list_dense_keys for 'feature_list_dense_keys' Input of this ParseSingleSequenceExample operation. pub fn feature_list_dense_keys(&self) -> crate::Result> { - let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? - + self.op.get_attr_int("Ncontext_sparse")? + let dynamic_offset = (self.op.get_attr_int("Ncontext_sparse")? + self.op.get_attr_int("Ncontext_dense")? + + self.op.get_attr_int("Nfeature_list_sparse")? + 5) as i32; let mut Inputs = vec![]; for i in @@ -118309,9 +122542,9 @@ impl ParseSingleSequenceExampleInst { } /// Returns the 'context_dense_defaults' Input of this 'ParseSingleSequenceExample' operation. pub fn context_dense_defaults(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("Nfeature_list_sparse")? + let dynamic_offset = (self.op.get_attr_int("Nfeature_list_dense")? + self.op.get_attr_int("Ncontext_dense")? - + self.op.get_attr_int("Nfeature_list_dense")? + + self.op.get_attr_int("Nfeature_list_sparse")? + self.op.get_attr_int("Ncontext_sparse")? + 6) as i32; Ok(crate::Input { @@ -118321,9 +122554,9 @@ impl ParseSingleSequenceExampleInst { } /// Returns the 'debug_name' Input of this 'ParseSingleSequenceExample' operation. pub fn debug_name(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("Ncontext_dense")? + let dynamic_offset = (self.op.get_attr_int("Nfeature_list_dense")? + self.op.get_attr_int("Ncontext_sparse")? - + self.op.get_attr_int("Nfeature_list_dense")? + + self.op.get_attr_int("Ncontext_dense")? + self.op.get_attr_int("Nfeature_list_sparse")? + 7) as i32; Ok(crate::Input { @@ -134886,6 +139119,157 @@ pub fn rfft3_d, O1: ::std::convert::Into RFFT3D::new().build(input, fft_length, scope) } +/// Builder for the `RFFTND` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct RFFTND { + Treal: ::std::option::Option, + Tcomplex: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'RFFTND' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct RFFTNDInst { + /// An instance of a fully built RFFTND Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl RFFTND { + /// Creates a new `RFFTND`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `Treal` attribute. + pub fn Treal>(mut self, value: ArgType) -> Self { + self.Treal = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tcomplex` attribute. + pub fn Tcomplex>( + mut self, + value: ArgType, + ) -> Self { + self.Tcomplex = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `RFFTND` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(input.into(), fft_length.into(), axes.into(), scope) + } + fn build_impl( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("RFFTND", |nd| { + nd.add_input(input); + nd.add_input(fft_length); + nd.add_input(axes); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.Treal { + nd.set_attr_type("Treal", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tcomplex { + nd.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'RFFTND' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input: crate::Output, + fft_length: crate::Output, + axes: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("RFFTND", |builder| { + builder.add_input(input); + builder.add_input(fft_length); + builder.add_input(axes); + if let ::std::option::Option::Some(value) = &self.Treal { + builder.set_attr_type("Treal", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tcomplex { + builder.set_attr_type("Tcomplex", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(RFFTNDInst { op }) + } +} +impl RFFTNDInst { + /// Returns the 'output' Output of this 'RFFTND' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'input' Input of this 'RFFTND' operation. + pub fn input(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'fft_length' Input of this 'RFFTND' operation. + pub fn fft_length(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'axes' Input of this 'RFFTND' operation. + pub fn axes(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } +} +impl From for crate::Operation { + fn from(inst: RFFTNDInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `RFFTND::new().build(input, fft_length, axes, scope)`. +pub fn rfftnd< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + input: O0, + fft_length: O1, + axes: O2, + scope: &mut crate::Scope, +) -> crate::Result { + RFFTND::new().build(input, fft_length, axes, scope) +} + /// Builder for the `RGBToHSV` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct RGBToHSV { @@ -144776,21 +149160,22 @@ pub fn relayout>( Relayout::new().build(input, scope) } -/// Builder for the `RelayoutGrad` operation. +/// Builder for the `RelayoutLike` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct RelayoutGrad { +pub struct RelayoutLike { T: ::std::option::Option, + U: ::std::option::Option, control_inputs: ::std::vec::Vec, } -/// An instance of 'RelayoutGrad' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'RelayoutLike' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct RelayoutGradInst { - /// An instance of a fully built RelayoutGrad Operation in a Tensorflow graph. +pub struct RelayoutLikeInst { + /// An instance of a fully built RelayoutLike Operation in a Tensorflow graph. pub op: crate::Operation, } -impl RelayoutGrad { - /// Creates a new `RelayoutGrad`. +impl RelayoutLike { + /// Creates a new `RelayoutLike`. pub fn new() -> Self { Self::default() } @@ -144801,99 +149186,111 @@ impl RelayoutGrad { self } + /// Sets the `U` attribute. + pub fn U>(mut self, value: ArgType) -> Self { + self.U = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); self } - /// Builds the `RelayoutGrad` operation. + /// Builds the `RelayoutLike` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, >( &self, input: O0, - forward_input: O1, + layout_input: O1, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl(input.into(), forward_input.into(), scope) + self.build_impl(input.into(), layout_input.into(), scope) } fn build_impl( &self, input: crate::Output, - forward_input: crate::Output, + layout_input: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("RelayoutGrad", |nd| { + scope.new_operation("RelayoutLike", |nd| { nd.add_input(input); - nd.add_input(forward_input); + nd.add_input(layout_input); for op in &self.control_inputs { nd.add_control_input(op); } if let ::std::option::Option::Some(value) = &self.T { nd.set_attr_type("T", *value)?; } + if let ::std::option::Option::Some(value) = &self.U { + nd.set_attr_type("U", *value)?; + } ::std::result::Result::Ok(()) }) } - /// Builds a new instance of 'RelayoutGrad' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'RelayoutLike' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, input: crate::Output, - forward_input: crate::Output, + layout_input: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("RelayoutGrad", |builder| { + ) -> crate::Result { + let op = scope.new_operation("RelayoutLike", |builder| { builder.add_input(input); - builder.add_input(forward_input); + builder.add_input(layout_input); if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; } + if let ::std::option::Option::Some(value) = &self.U { + builder.set_attr_type("U", *value)?; + } ::std::result::Result::Ok(()) })?; - Ok(RelayoutGradInst { op }) + Ok(RelayoutLikeInst { op }) } } -impl RelayoutGradInst { - /// Returns the 'output' Output of this 'RelayoutGrad' operation. +impl RelayoutLikeInst { + /// Returns the 'output' Output of this 'RelayoutLike' operation. pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'input' Input of this 'RelayoutGrad' operation. + /// Returns the 'input' Input of this 'RelayoutLike' operation. pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'forward_input' Input of this 'RelayoutGrad' operation. - pub fn forward_input(&self) -> crate::Input { + /// Returns the 'layout_input' Input of this 'RelayoutLike' operation. + pub fn layout_input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } } -impl From for crate::Operation { - fn from(inst: RelayoutGradInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: RelayoutLikeInst) -> crate::Operation { inst.op } } -/// Shorthand for `RelayoutGrad::new().build(input, forward_input, scope)`. -pub fn relayout_grad< +/// Shorthand for `RelayoutLike::new().build(input, layout_input, scope)`. +pub fn relayout_like< O0: ::std::convert::Into, O1: ::std::convert::Into, >( input: O0, - forward_input: O1, + layout_input: O1, scope: &mut crate::Scope, ) -> crate::Result { - RelayoutGrad::new().build(input, forward_input, scope) + RelayoutLike::new().build(input, layout_input, scope) } /// Builder for the `Relu` operation. @@ -172626,8 +177023,8 @@ impl SdcaOptimizerInst { } /// Returns a Vector of dense_features for 'dense_features' Input of this SdcaOptimizer operation. pub fn dense_features(&self) -> crate::Result> { - let dynamic_offset = (self.op.get_attr_int("num_sparse_features_with_values")? - + 2 * self.op.get_attr_int("num_sparse_features")? + let dynamic_offset = (2 * self.op.get_attr_int("num_sparse_features")? + + self.op.get_attr_int("num_sparse_features_with_values")? + 3) as i32; let mut Inputs = vec![]; for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_dense_features")? as i32 @@ -172641,9 +177038,9 @@ impl SdcaOptimizerInst { } /// Returns the 'example_weights' Input of this 'SdcaOptimizer' operation. pub fn example_weights(&self) -> crate::Result { - let dynamic_offset = (2 * self.op.get_attr_int("num_sparse_features")? - + self.op.get_attr_int("num_sparse_features_with_values")? + let dynamic_offset = (self.op.get_attr_int("num_sparse_features_with_values")? + self.op.get_attr_int("num_dense_features")? + + 2 * self.op.get_attr_int("num_sparse_features")? + 4) as i32; Ok(crate::Input { operation: &self.op, @@ -172697,9 +177094,9 @@ impl SdcaOptimizerInst { } /// Returns a Vector of dense_weights for 'dense_weights' Input of this SdcaOptimizer operation. pub fn dense_weights(&self) -> crate::Result> { - let dynamic_offset = (self.op.get_attr_int("num_sparse_features_with_values")? + let dynamic_offset = (self.op.get_attr_int("num_dense_features")? + 4 * self.op.get_attr_int("num_sparse_features")? - + self.op.get_attr_int("num_dense_features")? + + self.op.get_attr_int("num_sparse_features_with_values")? + 8) as i32; let mut Inputs = vec![]; for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_dense_features")? as i32 @@ -172713,9 +177110,9 @@ impl SdcaOptimizerInst { } /// Returns the 'example_state_data' Input of this 'SdcaOptimizer' operation. pub fn example_state_data(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("num_sparse_features_with_values")? + let dynamic_offset = (4 * self.op.get_attr_int("num_sparse_features")? + + self.op.get_attr_int("num_sparse_features_with_values")? + 2 * self.op.get_attr_int("num_dense_features")? - + 4 * self.op.get_attr_int("num_sparse_features")? + 9) as i32; Ok(crate::Input { operation: &self.op, @@ -173103,8 +177500,8 @@ impl SdcaOptimizerV2Inst { } /// Returns a Vector of dense_features for 'dense_features' Input of this SdcaOptimizerV2 operation. pub fn dense_features(&self) -> crate::Result> { - let dynamic_offset = (self.op.get_attr_int("num_sparse_features_with_values")? - + 2 * self.op.get_attr_int("num_sparse_features")? + let dynamic_offset = (2 * self.op.get_attr_int("num_sparse_features")? + + self.op.get_attr_int("num_sparse_features_with_values")? + 3) as i32; let mut Inputs = vec![]; for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_dense_features")? as i32 @@ -173119,8 +177516,8 @@ impl SdcaOptimizerV2Inst { /// Returns the 'example_weights' Input of this 'SdcaOptimizerV2' operation. pub fn example_weights(&self) -> crate::Result { let dynamic_offset = (self.op.get_attr_int("num_sparse_features_with_values")? - + self.op.get_attr_int("num_dense_features")? + 2 * self.op.get_attr_int("num_sparse_features")? + + self.op.get_attr_int("num_dense_features")? + 4) as i32; Ok(crate::Input { operation: &self.op, @@ -173140,9 +177537,9 @@ impl SdcaOptimizerV2Inst { } /// Returns a Vector of sparse_indices for 'sparse_indices' Input of this SdcaOptimizerV2 operation. pub fn sparse_indices(&self) -> crate::Result> { - let dynamic_offset = (self.op.get_attr_int("num_sparse_features_with_values")? - + self.op.get_attr_int("num_dense_features")? + let dynamic_offset = (self.op.get_attr_int("num_dense_features")? + 2 * self.op.get_attr_int("num_sparse_features")? + + self.op.get_attr_int("num_sparse_features_with_values")? + 6) as i32; let mut Inputs = vec![]; for i in @@ -173158,8 +177555,8 @@ impl SdcaOptimizerV2Inst { /// Returns a Vector of sparse_weights for 'sparse_weights' Input of this SdcaOptimizerV2 operation. pub fn sparse_weights(&self) -> crate::Result> { let dynamic_offset = (self.op.get_attr_int("num_dense_features")? - + 3 * self.op.get_attr_int("num_sparse_features")? + self.op.get_attr_int("num_sparse_features_with_values")? + + 3 * self.op.get_attr_int("num_sparse_features")? + 7) as i32; let mut Inputs = vec![]; for i in @@ -173175,8 +177572,8 @@ impl SdcaOptimizerV2Inst { /// Returns a Vector of dense_weights for 'dense_weights' Input of this SdcaOptimizerV2 operation. pub fn dense_weights(&self) -> crate::Result> { let dynamic_offset = (4 * self.op.get_attr_int("num_sparse_features")? - + self.op.get_attr_int("num_dense_features")? + self.op.get_attr_int("num_sparse_features_with_values")? + + self.op.get_attr_int("num_dense_features")? + 8) as i32; let mut Inputs = vec![]; for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("num_dense_features")? as i32 @@ -181954,6 +186351,311 @@ pub fn softsign_grad< SoftsignGrad::new().build(gradients, features, scope) } +/// Builder for the `SortListOfSparseCoreCooTensors` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct SortListOfSparseCoreCooTensors { + sample_count_list: ::std::option::Option<::std::vec::Vec>, + col_offset_list: ::std::option::Option<::std::vec::Vec>, + num_replica: ::std::option::Option, + table_vocab_size: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + N: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'SortListOfSparseCoreCooTensors' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct SortListOfSparseCoreCooTensorsInst { + /// An instance of a fully built SortListOfSparseCoreCooTensors Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl SortListOfSparseCoreCooTensors { + /// Creates a new `SortListOfSparseCoreCooTensors`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `sample_count_list` attribute. + pub fn sample_count_list>>( + mut self, + value: ArgType, + ) -> Self { + self.sample_count_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `col_offset_list` attribute. + pub fn col_offset_list>>( + mut self, + value: ArgType, + ) -> Self { + self.col_offset_list = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_vocab_size` attribute. + pub fn table_vocab_size>(mut self, value: ArgType) -> Self { + self.table_vocab_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `N` attribute. + pub fn N>(mut self, value: ArgType) -> Self { + self.N = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `SortListOfSparseCoreCooTensors` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + row_ids_list: O0, + col_ids_list: O1, + gains_list: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_ids_list.into(), + col_ids_list.into(), + gains_list.into(), + scope, + ) + } + fn build_impl( + &self, + row_ids_list: crate::Output, + col_ids_list: crate::Output, + gains_list: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("SortListOfSparseCoreCooTensors", |nd| { + nd.add_input(row_ids_list); + nd.add_input(col_ids_list); + nd.add_input(gains_list); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.sample_count_list { + nd.set_attr_int_list("sample_count_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset_list { + nd.set_attr_int_list("col_offset_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + nd.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + nd.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + nd.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + nd.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + nd.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + if let ::std::option::Option::Some(value) = &self.N { + nd.set_attr_int("N", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'SortListOfSparseCoreCooTensors' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_ids_list: Vec, + col_ids_list: Vec, + gains_list: Vec, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("SortListOfSparseCoreCooTensors", |builder| { + builder.add_input_list(&row_ids_list); + builder.add_input_list(&col_ids_list); + builder.add_input_list(&gains_list); + if let ::std::option::Option::Some(value) = &self.sample_count_list { + builder.set_attr_int_list("sample_count_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.col_offset_list { + builder.set_attr_int_list("col_offset_list", value)?; + } + if let ::std::option::Option::Some(value) = &self.num_replica { + builder.set_attr_int("num_replica", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_vocab_size { + builder.set_attr_int("table_vocab_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + builder.set_attr_int("num_sc_per_chip", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + builder.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + builder.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + builder.set_attr_int("N", row_ids_list.clone().len() as i64)?; + ::std::result::Result::Ok(()) + })?; + Ok(SortListOfSparseCoreCooTensorsInst { op }) + } +} +impl SortListOfSparseCoreCooTensorsInst { + /// Returns the 'sorted_row_ids' Output of this 'SortListOfSparseCoreCooTensors' operation. + pub fn sorted_row_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'sorted_col_ids' Output of this 'SortListOfSparseCoreCooTensors' operation. + pub fn sorted_col_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'sorted_gains' Output of this 'SortListOfSparseCoreCooTensors' operation. + pub fn sorted_gains(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'id_counts' Output of this 'SortListOfSparseCoreCooTensors' operation. + pub fn id_counts(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 3, + } + } + /// Returns a Vector of row_ids_list for 'row_ids_list' Input of this SortListOfSparseCoreCooTensors operation. + pub fn row_ids_list(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 0..self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of col_ids_list for 'col_ids_list' Input of this SortListOfSparseCoreCooTensors operation. + pub fn col_ids_list(&self) -> crate::Result> { + let dynamic_offset = (self.op.get_attr_int("N")? + 1) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of gains_list for 'gains_list' Input of this SortListOfSparseCoreCooTensors operation. + pub fn gains_list(&self) -> crate::Result> { + let dynamic_offset = (2 * self.op.get_attr_int("N")? + 2) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } +} +impl From for crate::Operation { + fn from(inst: SortListOfSparseCoreCooTensorsInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `SortListOfSparseCoreCooTensors::new().build(row_ids_list, col_ids_list, gains_list, scope)`. +pub fn sort_list_of_sparse_core_coo_tensors< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + row_ids_list: O0, + col_ids_list: O1, + gains_list: O2, + scope: &mut crate::Scope, +) -> crate::Result { + SortListOfSparseCoreCooTensors::new().build(row_ids_list, col_ids_list, gains_list, scope) +} + /// Builder for the `SpaceToBatch` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct SpaceToBatch { @@ -190792,6 +195494,7 @@ pub struct SparseSegmentMean { T: ::std::option::Option, Tidx: ::std::option::Option, Tsegmentids: ::std::option::Option, + sparse_gradient: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'SparseSegmentMean' Operation with it's Outputs and Inputs exposed as methods. @@ -190828,6 +195531,12 @@ impl SparseSegmentMean { self } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -190871,6 +195580,9 @@ impl SparseSegmentMean { if let ::std::option::Option::Some(value) = &self.Tsegmentids { nd.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + nd.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -190896,6 +195608,9 @@ impl SparseSegmentMean { if let ::std::option::Option::Some(value) = &self.Tsegmentids { builder.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + builder.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(SparseSegmentMeanInst { op }) @@ -191135,6 +195850,198 @@ pub fn sparse_segment_mean_grad< SparseSegmentMeanGrad::new().build(grad, indices, segment_ids, output_dim0, scope) } +/// Builder for the `SparseSegmentMeanGradV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct SparseSegmentMeanGradV2 { + T: ::std::option::Option, + Tidx: ::std::option::Option, + Tsegmentids: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'SparseSegmentMeanGradV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct SparseSegmentMeanGradV2Inst { + /// An instance of a fully built SparseSegmentMeanGradV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl SparseSegmentMeanGradV2 { + /// Creates a new `SparseSegmentMeanGradV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `T` attribute. + pub fn T>(mut self, value: ArgType) -> Self { + self.T = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tidx` attribute. + pub fn Tidx>(mut self, value: ArgType) -> Self { + self.Tidx = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tsegmentids` attribute. + pub fn Tsegmentids>( + mut self, + value: ArgType, + ) -> Self { + self.Tsegmentids = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `SparseSegmentMeanGradV2` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + grad: O0, + indices: O1, + segment_ids: O2, + dense_output_dim0: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + grad.into(), + indices.into(), + segment_ids.into(), + dense_output_dim0.into(), + scope, + ) + } + fn build_impl( + &self, + grad: crate::Output, + indices: crate::Output, + segment_ids: crate::Output, + dense_output_dim0: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("SparseSegmentMeanGradV2", |nd| { + nd.add_input(grad); + nd.add_input(indices); + nd.add_input(segment_ids); + nd.add_input(dense_output_dim0); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.T { + nd.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tidx { + nd.set_attr_type("Tidx", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tsegmentids { + nd.set_attr_type("Tsegmentids", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'SparseSegmentMeanGradV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + grad: crate::Output, + indices: crate::Output, + segment_ids: crate::Output, + dense_output_dim0: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("SparseSegmentMeanGradV2", |builder| { + builder.add_input(grad); + builder.add_input(indices); + builder.add_input(segment_ids); + builder.add_input(dense_output_dim0); + if let ::std::option::Option::Some(value) = &self.T { + builder.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tidx { + builder.set_attr_type("Tidx", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tsegmentids { + builder.set_attr_type("Tsegmentids", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(SparseSegmentMeanGradV2Inst { op }) + } +} +impl SparseSegmentMeanGradV2Inst { + /// Returns the 'output' Output of this 'SparseSegmentMeanGradV2' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'sorted_unique_indices' Output of this 'SparseSegmentMeanGradV2' operation. + pub fn sorted_unique_indices(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'grad' Input of this 'SparseSegmentMeanGradV2' operation. + pub fn grad(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'indices' Input of this 'SparseSegmentMeanGradV2' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'segment_ids' Input of this 'SparseSegmentMeanGradV2' operation. + pub fn segment_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'dense_output_dim0' Input of this 'SparseSegmentMeanGradV2' operation. + pub fn dense_output_dim0(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: SparseSegmentMeanGradV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `SparseSegmentMeanGradV2::new().build(grad, indices, segment_ids, dense_output_dim0, scope)`. +pub fn sparse_segment_mean_grad_v2< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + grad: O0, + indices: O1, + segment_ids: O2, + dense_output_dim0: O3, + scope: &mut crate::Scope, +) -> crate::Result { + SparseSegmentMeanGradV2::new().build(grad, indices, segment_ids, dense_output_dim0, scope) +} + /// Builder for the `SparseSegmentMeanWithNumSegments` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct SparseSegmentMeanWithNumSegments { @@ -191142,6 +196049,7 @@ pub struct SparseSegmentMeanWithNumSegments { Tidx: ::std::option::Option, Tnumsegments: ::std::option::Option, Tsegmentids: ::std::option::Option, + sparse_gradient: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'SparseSegmentMeanWithNumSegments' Operation with it's Outputs and Inputs exposed as methods. @@ -191187,6 +196095,12 @@ impl SparseSegmentMeanWithNumSegments { self } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -191243,6 +196157,9 @@ impl SparseSegmentMeanWithNumSegments { if let ::std::option::Option::Some(value) = &self.Tsegmentids { nd.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + nd.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -191273,6 +196190,9 @@ impl SparseSegmentMeanWithNumSegments { if let ::std::option::Option::Some(value) = &self.Tsegmentids { builder.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + builder.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(SparseSegmentMeanWithNumSegmentsInst { op }) @@ -191342,6 +196262,7 @@ pub struct SparseSegmentSqrtN { T: ::std::option::Option, Tidx: ::std::option::Option, Tsegmentids: ::std::option::Option, + sparse_gradient: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'SparseSegmentSqrtN' Operation with it's Outputs and Inputs exposed as methods. @@ -191378,6 +196299,12 @@ impl SparseSegmentSqrtN { self } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -191421,6 +196348,9 @@ impl SparseSegmentSqrtN { if let ::std::option::Option::Some(value) = &self.Tsegmentids { nd.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + nd.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -191446,6 +196376,9 @@ impl SparseSegmentSqrtN { if let ::std::option::Option::Some(value) = &self.Tsegmentids { builder.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + builder.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(SparseSegmentSqrtNInst { op }) @@ -191685,6 +196618,198 @@ pub fn sparse_segment_sqrt_ngrad< SparseSegmentSqrtNGrad::new().build(grad, indices, segment_ids, output_dim0, scope) } +/// Builder for the `SparseSegmentSqrtNGradV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct SparseSegmentSqrtNGradV2 { + T: ::std::option::Option, + Tidx: ::std::option::Option, + Tsegmentids: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'SparseSegmentSqrtNGradV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct SparseSegmentSqrtNGradV2Inst { + /// An instance of a fully built SparseSegmentSqrtNGradV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl SparseSegmentSqrtNGradV2 { + /// Creates a new `SparseSegmentSqrtNGradV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `T` attribute. + pub fn T>(mut self, value: ArgType) -> Self { + self.T = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tidx` attribute. + pub fn Tidx>(mut self, value: ArgType) -> Self { + self.Tidx = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tsegmentids` attribute. + pub fn Tsegmentids>( + mut self, + value: ArgType, + ) -> Self { + self.Tsegmentids = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `SparseSegmentSqrtNGradV2` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + grad: O0, + indices: O1, + segment_ids: O2, + dense_output_dim0: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + grad.into(), + indices.into(), + segment_ids.into(), + dense_output_dim0.into(), + scope, + ) + } + fn build_impl( + &self, + grad: crate::Output, + indices: crate::Output, + segment_ids: crate::Output, + dense_output_dim0: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("SparseSegmentSqrtNGradV2", |nd| { + nd.add_input(grad); + nd.add_input(indices); + nd.add_input(segment_ids); + nd.add_input(dense_output_dim0); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.T { + nd.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tidx { + nd.set_attr_type("Tidx", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tsegmentids { + nd.set_attr_type("Tsegmentids", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'SparseSegmentSqrtNGradV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + grad: crate::Output, + indices: crate::Output, + segment_ids: crate::Output, + dense_output_dim0: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("SparseSegmentSqrtNGradV2", |builder| { + builder.add_input(grad); + builder.add_input(indices); + builder.add_input(segment_ids); + builder.add_input(dense_output_dim0); + if let ::std::option::Option::Some(value) = &self.T { + builder.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tidx { + builder.set_attr_type("Tidx", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tsegmentids { + builder.set_attr_type("Tsegmentids", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(SparseSegmentSqrtNGradV2Inst { op }) + } +} +impl SparseSegmentSqrtNGradV2Inst { + /// Returns the 'output' Output of this 'SparseSegmentSqrtNGradV2' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'sorted_unique_indices' Output of this 'SparseSegmentSqrtNGradV2' operation. + pub fn sorted_unique_indices(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'grad' Input of this 'SparseSegmentSqrtNGradV2' operation. + pub fn grad(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'indices' Input of this 'SparseSegmentSqrtNGradV2' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'segment_ids' Input of this 'SparseSegmentSqrtNGradV2' operation. + pub fn segment_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'dense_output_dim0' Input of this 'SparseSegmentSqrtNGradV2' operation. + pub fn dense_output_dim0(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: SparseSegmentSqrtNGradV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `SparseSegmentSqrtNGradV2::new().build(grad, indices, segment_ids, dense_output_dim0, scope)`. +pub fn sparse_segment_sqrt_ngrad_v2< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + grad: O0, + indices: O1, + segment_ids: O2, + dense_output_dim0: O3, + scope: &mut crate::Scope, +) -> crate::Result { + SparseSegmentSqrtNGradV2::new().build(grad, indices, segment_ids, dense_output_dim0, scope) +} + /// Builder for the `SparseSegmentSqrtNWithNumSegments` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct SparseSegmentSqrtNWithNumSegments { @@ -191692,6 +196817,7 @@ pub struct SparseSegmentSqrtNWithNumSegments { Tidx: ::std::option::Option, Tnumsegments: ::std::option::Option, Tsegmentids: ::std::option::Option, + sparse_gradient: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'SparseSegmentSqrtNWithNumSegments' Operation with it's Outputs and Inputs exposed as methods. @@ -191737,6 +196863,12 @@ impl SparseSegmentSqrtNWithNumSegments { self } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -191793,6 +196925,9 @@ impl SparseSegmentSqrtNWithNumSegments { if let ::std::option::Option::Some(value) = &self.Tsegmentids { nd.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + nd.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -191823,6 +196958,9 @@ impl SparseSegmentSqrtNWithNumSegments { if let ::std::option::Option::Some(value) = &self.Tsegmentids { builder.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + builder.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(SparseSegmentSqrtNWithNumSegmentsInst { op }) @@ -191892,6 +197030,7 @@ pub struct SparseSegmentSum { T: ::std::option::Option, Tidx: ::std::option::Option, Tsegmentids: ::std::option::Option, + sparse_gradient: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'SparseSegmentSum' Operation with it's Outputs and Inputs exposed as methods. @@ -191928,6 +197067,12 @@ impl SparseSegmentSum { self } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -191971,6 +197116,9 @@ impl SparseSegmentSum { if let ::std::option::Option::Some(value) = &self.Tsegmentids { nd.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + nd.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -191996,6 +197144,9 @@ impl SparseSegmentSum { if let ::std::option::Option::Some(value) = &self.Tsegmentids { builder.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + builder.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(SparseSegmentSumInst { op }) @@ -192235,6 +197386,198 @@ pub fn sparse_segment_sum_grad< SparseSegmentSumGrad::new().build(grad, indices, segment_ids, output_dim0, scope) } +/// Builder for the `SparseSegmentSumGradV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct SparseSegmentSumGradV2 { + T: ::std::option::Option, + Tidx: ::std::option::Option, + Tsegmentids: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'SparseSegmentSumGradV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct SparseSegmentSumGradV2Inst { + /// An instance of a fully built SparseSegmentSumGradV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl SparseSegmentSumGradV2 { + /// Creates a new `SparseSegmentSumGradV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `T` attribute. + pub fn T>(mut self, value: ArgType) -> Self { + self.T = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tidx` attribute. + pub fn Tidx>(mut self, value: ArgType) -> Self { + self.Tidx = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tsegmentids` attribute. + pub fn Tsegmentids>( + mut self, + value: ArgType, + ) -> Self { + self.Tsegmentids = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `SparseSegmentSumGradV2` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + grad: O0, + indices: O1, + segment_ids: O2, + dense_output_dim0: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + grad.into(), + indices.into(), + segment_ids.into(), + dense_output_dim0.into(), + scope, + ) + } + fn build_impl( + &self, + grad: crate::Output, + indices: crate::Output, + segment_ids: crate::Output, + dense_output_dim0: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("SparseSegmentSumGradV2", |nd| { + nd.add_input(grad); + nd.add_input(indices); + nd.add_input(segment_ids); + nd.add_input(dense_output_dim0); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.T { + nd.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tidx { + nd.set_attr_type("Tidx", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tsegmentids { + nd.set_attr_type("Tsegmentids", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'SparseSegmentSumGradV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + grad: crate::Output, + indices: crate::Output, + segment_ids: crate::Output, + dense_output_dim0: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("SparseSegmentSumGradV2", |builder| { + builder.add_input(grad); + builder.add_input(indices); + builder.add_input(segment_ids); + builder.add_input(dense_output_dim0); + if let ::std::option::Option::Some(value) = &self.T { + builder.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tidx { + builder.set_attr_type("Tidx", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Tsegmentids { + builder.set_attr_type("Tsegmentids", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(SparseSegmentSumGradV2Inst { op }) + } +} +impl SparseSegmentSumGradV2Inst { + /// Returns the 'output' Output of this 'SparseSegmentSumGradV2' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'sorted_unique_indices' Output of this 'SparseSegmentSumGradV2' operation. + pub fn sorted_unique_indices(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'grad' Input of this 'SparseSegmentSumGradV2' operation. + pub fn grad(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'indices' Input of this 'SparseSegmentSumGradV2' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'segment_ids' Input of this 'SparseSegmentSumGradV2' operation. + pub fn segment_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'dense_output_dim0' Input of this 'SparseSegmentSumGradV2' operation. + pub fn dense_output_dim0(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: SparseSegmentSumGradV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `SparseSegmentSumGradV2::new().build(grad, indices, segment_ids, dense_output_dim0, scope)`. +pub fn sparse_segment_sum_grad_v2< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + grad: O0, + indices: O1, + segment_ids: O2, + dense_output_dim0: O3, + scope: &mut crate::Scope, +) -> crate::Result { + SparseSegmentSumGradV2::new().build(grad, indices, segment_ids, dense_output_dim0, scope) +} + /// Builder for the `SparseSegmentSumWithNumSegments` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct SparseSegmentSumWithNumSegments { @@ -192242,6 +197585,7 @@ pub struct SparseSegmentSumWithNumSegments { Tidx: ::std::option::Option, Tnumsegments: ::std::option::Option, Tsegmentids: ::std::option::Option, + sparse_gradient: ::std::option::Option, control_inputs: ::std::vec::Vec, } /// An instance of 'SparseSegmentSumWithNumSegments' Operation with it's Outputs and Inputs exposed as methods. @@ -192287,6 +197631,12 @@ impl SparseSegmentSumWithNumSegments { self } + /// Sets the `sparse_gradient` attribute. + pub fn sparse_gradient>(mut self, value: ArgType) -> Self { + self.sparse_gradient = ::std::option::Option::Some(value.into()); + self + } + /// Adds a control input. pub fn add_control_input(mut self, op: crate::Operation) -> Self { self.control_inputs.push(op); @@ -192343,6 +197693,9 @@ impl SparseSegmentSumWithNumSegments { if let ::std::option::Option::Some(value) = &self.Tsegmentids { nd.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + nd.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) }) } @@ -192373,6 +197726,9 @@ impl SparseSegmentSumWithNumSegments { if let ::std::option::Option::Some(value) = &self.Tsegmentids { builder.set_attr_type("Tsegmentids", *value)?; } + if let ::std::option::Option::Some(value) = &self.sparse_gradient { + builder.set_attr_bool("sparse_gradient", *value)?; + } ::std::result::Result::Ok(()) })?; Ok(SparseSegmentSumWithNumSegmentsInst { op }) @@ -203597,70 +208953,69 @@ pub fn stop_gradient>( StopGradient::new().build(input, scope) } -/// Builder for the `StridedSlice` operation. +/// Builder for the `StoreMinibatchStatisticsInFdo` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct StridedSlice { - T: ::std::option::Option, - Index: ::std::option::Option, - begin_mask: ::std::option::Option, - end_mask: ::std::option::Option, - ellipsis_mask: ::std::option::Option, - new_axis_mask: ::std::option::Option, - shrink_axis_mask: ::std::option::Option, +pub struct StoreMinibatchStatisticsInFdo { + sample_count: ::std::option::Option, + num_replica: ::std::option::Option, + feature_width: ::std::option::Option, + num_sc_per_chip: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + mini_batch_splits: ::std::option::Option<::std::string::String>, control_inputs: ::std::vec::Vec, } -/// An instance of 'StridedSlice' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'StoreMinibatchStatisticsInFdo' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct StridedSliceInst { - /// An instance of a fully built StridedSlice Operation in a Tensorflow graph. +pub struct StoreMinibatchStatisticsInFdoInst { + /// An instance of a fully built StoreMinibatchStatisticsInFdo Operation in a Tensorflow graph. pub op: crate::Operation, } -impl StridedSlice { - /// Creates a new `StridedSlice`. +impl StoreMinibatchStatisticsInFdo { + /// Creates a new `StoreMinibatchStatisticsInFdo`. pub fn new() -> Self { Self::default() } - /// Sets the `T` attribute. - pub fn T>(mut self, value: ArgType) -> Self { - self.T = ::std::option::Option::Some(value.into()); + /// Sets the `sample_count` attribute. + pub fn sample_count>(mut self, value: ArgType) -> Self { + self.sample_count = ::std::option::Option::Some(value.into()); self } - /// Sets the `Index` attribute. - pub fn Index>(mut self, value: ArgType) -> Self { - self.Index = ::std::option::Option::Some(value.into()); + /// Sets the `num_replica` attribute. + pub fn num_replica>(mut self, value: ArgType) -> Self { + self.num_replica = ::std::option::Option::Some(value.into()); self } - /// Sets the `begin_mask` attribute. - pub fn begin_mask>(mut self, value: ArgType) -> Self { - self.begin_mask = ::std::option::Option::Some(value.into()); + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); self } - /// Sets the `end_mask` attribute. - pub fn end_mask>(mut self, value: ArgType) -> Self { - self.end_mask = ::std::option::Option::Some(value.into()); + /// Sets the `num_sc_per_chip` attribute. + pub fn num_sc_per_chip>(mut self, value: ArgType) -> Self { + self.num_sc_per_chip = ::std::option::Option::Some(value.into()); self } - /// Sets the `ellipsis_mask` attribute. - pub fn ellipsis_mask>(mut self, value: ArgType) -> Self { - self.ellipsis_mask = ::std::option::Option::Some(value.into()); - self - } - - /// Sets the `new_axis_mask` attribute. - pub fn new_axis_mask>(mut self, value: ArgType) -> Self { - self.new_axis_mask = ::std::option::Option::Some(value.into()); + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); self } - /// Sets the `shrink_axis_mask` attribute. - pub fn shrink_axis_mask>(mut self, value: ArgType) -> Self { - self.shrink_axis_mask = ::std::option::Option::Some(value.into()); + /// Sets the `mini_batch_splits` attribute. + pub fn mini_batch_splits>( + mut self, + value: ArgType, + ) -> Self { + self.mini_batch_splits = ::std::option::Option::Some(value.into()); self } @@ -203670,170 +209025,141 @@ impl StridedSlice { self } - /// Builds the `StridedSlice` operation. + /// Builds the `StoreMinibatchStatisticsInFdo` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, - O3: ::std::convert::Into, >( &self, - input: O0, - begin: O1, - end: O2, - strides: O3, + program_key: O0, + max_ids: O1, + max_uniques: O2, scope: &mut crate::Scope, ) -> crate::Result { self.build_impl( - input.into(), - begin.into(), - end.into(), - strides.into(), + program_key.into(), + max_ids.into(), + max_uniques.into(), scope, ) } fn build_impl( &self, - input: crate::Output, - begin: crate::Output, - end: crate::Output, - strides: crate::Output, + program_key: crate::Output, + max_ids: crate::Output, + max_uniques: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("StridedSlice", |nd| { - nd.add_input(input); - nd.add_input(begin); - nd.add_input(end); - nd.add_input(strides); + scope.new_operation("StoreMinibatchStatisticsInFdo", |nd| { + nd.add_input(program_key); + nd.add_input(max_ids); + nd.add_input(max_uniques); for op in &self.control_inputs { nd.add_control_input(op); } - if let ::std::option::Option::Some(value) = &self.T { - nd.set_attr_type("T", *value)?; + if let ::std::option::Option::Some(value) = &self.sample_count { + nd.set_attr_int("sample_count", *value)?; } - if let ::std::option::Option::Some(value) = &self.Index { - nd.set_attr_type("Index", *value)?; + if let ::std::option::Option::Some(value) = &self.num_replica { + nd.set_attr_int("num_replica", *value)?; } - if let ::std::option::Option::Some(value) = &self.begin_mask { - nd.set_attr_int("begin_mask", *value)?; - } - if let ::std::option::Option::Some(value) = &self.end_mask { - nd.set_attr_int("end_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; } - if let ::std::option::Option::Some(value) = &self.ellipsis_mask { - nd.set_attr_int("ellipsis_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + nd.set_attr_int("num_sc_per_chip", *value)?; } - if let ::std::option::Option::Some(value) = &self.new_axis_mask { - nd.set_attr_int("new_axis_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; } - if let ::std::option::Option::Some(value) = &self.shrink_axis_mask { - nd.set_attr_int("shrink_axis_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.mini_batch_splits { + nd.set_attr_string("mini_batch_splits", value)?; } ::std::result::Result::Ok(()) }) } - /// Builds a new instance of 'StridedSlice' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'StoreMinibatchStatisticsInFdo' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, - input: crate::Output, - begin: crate::Output, - end: crate::Output, - strides: crate::Output, + program_key: crate::Output, + max_ids: crate::Output, + max_uniques: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("StridedSlice", |builder| { - builder.add_input(input); - builder.add_input(begin); - builder.add_input(end); - builder.add_input(strides); - if let ::std::option::Option::Some(value) = &self.T { - builder.set_attr_type("T", *value)?; + ) -> crate::Result { + let op = scope.new_operation("StoreMinibatchStatisticsInFdo", |builder| { + builder.add_input(program_key); + builder.add_input(max_ids); + builder.add_input(max_uniques); + if let ::std::option::Option::Some(value) = &self.sample_count { + builder.set_attr_int("sample_count", *value)?; } - if let ::std::option::Option::Some(value) = &self.Index { - builder.set_attr_type("Index", *value)?; + if let ::std::option::Option::Some(value) = &self.num_replica { + builder.set_attr_int("num_replica", *value)?; } - if let ::std::option::Option::Some(value) = &self.begin_mask { - builder.set_attr_int("begin_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; } - if let ::std::option::Option::Some(value) = &self.end_mask { - builder.set_attr_int("end_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.num_sc_per_chip { + builder.set_attr_int("num_sc_per_chip", *value)?; } - if let ::std::option::Option::Some(value) = &self.ellipsis_mask { - builder.set_attr_int("ellipsis_mask", *value)?; - } - if let ::std::option::Option::Some(value) = &self.new_axis_mask { - builder.set_attr_int("new_axis_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; } - if let ::std::option::Option::Some(value) = &self.shrink_axis_mask { - builder.set_attr_int("shrink_axis_mask", *value)?; + if let ::std::option::Option::Some(value) = &self.mini_batch_splits { + builder.set_attr_string("mini_batch_splits", value)?; } ::std::result::Result::Ok(()) })?; - Ok(StridedSliceInst { op }) + Ok(StoreMinibatchStatisticsInFdoInst { op }) } } -impl StridedSliceInst { - /// Returns the 'output' Output of this 'StridedSlice' operation. - pub fn output(&self) -> crate::Output { - crate::Output { - operation: self.op.clone(), - index: 0, - } - } - /// Returns the 'input' Input of this 'StridedSlice' operation. - pub fn input(&self) -> crate::Input { +impl StoreMinibatchStatisticsInFdoInst { + /// Returns the 'program_key' Input of this 'StoreMinibatchStatisticsInFdo' operation. + pub fn program_key(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'begin' Input of this 'StridedSlice' operation. - pub fn begin(&self) -> crate::Input { + /// Returns the 'max_ids' Input of this 'StoreMinibatchStatisticsInFdo' operation. + pub fn max_ids(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } - /// Returns the 'end' Input of this 'StridedSlice' operation. - pub fn end(&self) -> crate::Input { + /// Returns the 'max_uniques' Input of this 'StoreMinibatchStatisticsInFdo' operation. + pub fn max_uniques(&self) -> crate::Input { crate::Input { operation: &self.op, index: 2, } } - /// Returns the 'strides' Input of this 'StridedSlice' operation. - pub fn strides(&self) -> crate::Input { - crate::Input { - operation: &self.op, - index: 3, - } - } } -impl From for crate::Operation { - fn from(inst: StridedSliceInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: StoreMinibatchStatisticsInFdoInst) -> crate::Operation { inst.op } } -/// Shorthand for `StridedSlice::new().build(input, begin, end, strides, scope)`. -pub fn strided_slice< +/// Shorthand for `StoreMinibatchStatisticsInFdo::new().build(program_key, max_ids, max_uniques, scope)`. +pub fn store_minibatch_statistics_in_fdo< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, - O3: ::std::convert::Into, >( - input: O0, - begin: O1, - end: O2, - strides: O3, + program_key: O0, + max_ids: O1, + max_uniques: O2, scope: &mut crate::Scope, ) -> crate::Result { - StridedSlice::new().build(input, begin, end, strides, scope) + StoreMinibatchStatisticsInFdo::new().build(program_key, max_ids, max_uniques, scope) } -/// Builder for the `StridedSliceAssign` operation. +/// Builder for the `StridedSlice` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct StridedSliceAssign { +pub struct StridedSlice { T: ::std::option::Option, Index: ::std::option::Option, begin_mask: ::std::option::Option, @@ -203843,15 +209169,15 @@ pub struct StridedSliceAssign { shrink_axis_mask: ::std::option::Option, control_inputs: ::std::vec::Vec, } -/// An instance of 'StridedSliceAssign' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'StridedSlice' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct StridedSliceAssignInst { - /// An instance of a fully built StridedSliceAssign Operation in a Tensorflow graph. +pub struct StridedSliceInst { + /// An instance of a fully built StridedSlice Operation in a Tensorflow graph. pub op: crate::Operation, } -impl StridedSliceAssign { - /// Creates a new `StridedSliceAssign`. +impl StridedSlice { + /// Creates a new `StridedSlice`. pub fn new() -> Self { Self::default() } @@ -203904,46 +209230,41 @@ impl StridedSliceAssign { self } - /// Builds the `StridedSliceAssign` operation. + /// Builds the `StridedSlice` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, O3: ::std::convert::Into, - O4: ::std::convert::Into, >( &self, - ref_: O0, + input: O0, begin: O1, end: O2, strides: O3, - value: O4, scope: &mut crate::Scope, ) -> crate::Result { self.build_impl( - ref_.into(), + input.into(), begin.into(), end.into(), strides.into(), - value.into(), scope, ) } fn build_impl( &self, - ref_: crate::Output, + input: crate::Output, begin: crate::Output, end: crate::Output, strides: crate::Output, - value: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("StridedSliceAssign", |nd| { - nd.add_input(ref_); + scope.new_operation("StridedSlice", |nd| { + nd.add_input(input); nd.add_input(begin); nd.add_input(end); nd.add_input(strides); - nd.add_input(value); for op in &self.control_inputs { nd.add_control_input(op); } @@ -203972,22 +209293,20 @@ impl StridedSliceAssign { }) } - /// Builds a new instance of 'StridedSliceAssign' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'StridedSlice' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, - ref_: crate::Output, + input: crate::Output, begin: crate::Output, end: crate::Output, strides: crate::Output, - value: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("StridedSliceAssign", |builder| { - builder.add_input(ref_); + ) -> crate::Result { + let op = scope.new_operation("StridedSlice", |builder| { + builder.add_input(input); builder.add_input(begin); builder.add_input(end); builder.add_input(strides); - builder.add_input(value); if let ::std::option::Option::Some(value) = &self.T { builder.set_attr_type("T", *value)?; } @@ -204011,79 +209330,70 @@ impl StridedSliceAssign { } ::std::result::Result::Ok(()) })?; - Ok(StridedSliceAssignInst { op }) + Ok(StridedSliceInst { op }) } } -impl StridedSliceAssignInst { - /// Returns the 'output_ref' Output of this 'StridedSliceAssign' operation. - pub fn output_ref(&self) -> crate::Output { +impl StridedSliceInst { + /// Returns the 'output' Output of this 'StridedSlice' operation. + pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'ref_' Input of this 'StridedSliceAssign' operation. - pub fn ref_(&self) -> crate::Input { + /// Returns the 'input' Input of this 'StridedSlice' operation. + pub fn input(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'begin' Input of this 'StridedSliceAssign' operation. + /// Returns the 'begin' Input of this 'StridedSlice' operation. pub fn begin(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } - /// Returns the 'end' Input of this 'StridedSliceAssign' operation. + /// Returns the 'end' Input of this 'StridedSlice' operation. pub fn end(&self) -> crate::Input { crate::Input { operation: &self.op, index: 2, } } - /// Returns the 'strides' Input of this 'StridedSliceAssign' operation. + /// Returns the 'strides' Input of this 'StridedSlice' operation. pub fn strides(&self) -> crate::Input { crate::Input { operation: &self.op, index: 3, } } - /// Returns the 'value' Input of this 'StridedSliceAssign' operation. - pub fn value(&self) -> crate::Input { - crate::Input { - operation: &self.op, - index: 4, - } - } } -impl From for crate::Operation { - fn from(inst: StridedSliceAssignInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: StridedSliceInst) -> crate::Operation { inst.op } } -/// Shorthand for `StridedSliceAssign::new().build(ref_, begin, end, strides, value, scope)`. -pub fn strided_slice_assign< +/// Shorthand for `StridedSlice::new().build(input, begin, end, strides, scope)`. +pub fn strided_slice< O0: ::std::convert::Into, O1: ::std::convert::Into, O2: ::std::convert::Into, O3: ::std::convert::Into, - O4: ::std::convert::Into, >( - ref_: O0, + input: O0, begin: O1, end: O2, strides: O3, - value: O4, scope: &mut crate::Scope, ) -> crate::Result { - StridedSliceAssign::new().build(ref_, begin, end, strides, value, scope) + StridedSlice::new().build(input, begin, end, strides, scope) } -/// Builder for the `StridedSliceGrad` operation. +/// Builder for the `StridedSliceAssign` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct StridedSliceGrad { +pub struct StridedSliceAssign { T: ::std::option::Option, Index: ::std::option::Option, begin_mask: ::std::option::Option, @@ -204093,15 +209403,15 @@ pub struct StridedSliceGrad { shrink_axis_mask: ::std::option::Option, control_inputs: ::std::vec::Vec, } -/// An instance of 'StridedSliceGrad' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'StridedSliceAssign' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct StridedSliceGradInst { - /// An instance of a fully built StridedSliceGrad Operation in a Tensorflow graph. +pub struct StridedSliceAssignInst { + /// An instance of a fully built StridedSliceAssign Operation in a Tensorflow graph. pub op: crate::Operation, } -impl StridedSliceGrad { - /// Creates a new `StridedSliceGrad`. +impl StridedSliceAssign { + /// Creates a new `StridedSliceAssign`. pub fn new() -> Self { Self::default() } @@ -204154,7 +209464,7 @@ impl StridedSliceGrad { self } - /// Builds the `StridedSliceGrad` operation. + /// Builds the `StridedSliceAssign` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, @@ -204163,37 +209473,287 @@ impl StridedSliceGrad { O4: ::std::convert::Into, >( &self, - shape: O0, + ref_: O0, begin: O1, end: O2, strides: O3, - dy: O4, + value: O4, scope: &mut crate::Scope, ) -> crate::Result { self.build_impl( - shape.into(), + ref_.into(), begin.into(), end.into(), strides.into(), - dy.into(), + value.into(), scope, ) } fn build_impl( &self, - shape: crate::Output, + ref_: crate::Output, begin: crate::Output, end: crate::Output, strides: crate::Output, - dy: crate::Output, + value: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("StridedSliceGrad", |nd| { - nd.add_input(shape); + scope.new_operation("StridedSliceAssign", |nd| { + nd.add_input(ref_); nd.add_input(begin); nd.add_input(end); nd.add_input(strides); - nd.add_input(dy); + nd.add_input(value); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.T { + nd.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Index { + nd.set_attr_type("Index", *value)?; + } + if let ::std::option::Option::Some(value) = &self.begin_mask { + nd.set_attr_int("begin_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.end_mask { + nd.set_attr_int("end_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.ellipsis_mask { + nd.set_attr_int("ellipsis_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.new_axis_mask { + nd.set_attr_int("new_axis_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.shrink_axis_mask { + nd.set_attr_int("shrink_axis_mask", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'StridedSliceAssign' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + ref_: crate::Output, + begin: crate::Output, + end: crate::Output, + strides: crate::Output, + value: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("StridedSliceAssign", |builder| { + builder.add_input(ref_); + builder.add_input(begin); + builder.add_input(end); + builder.add_input(strides); + builder.add_input(value); + if let ::std::option::Option::Some(value) = &self.T { + builder.set_attr_type("T", *value)?; + } + if let ::std::option::Option::Some(value) = &self.Index { + builder.set_attr_type("Index", *value)?; + } + if let ::std::option::Option::Some(value) = &self.begin_mask { + builder.set_attr_int("begin_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.end_mask { + builder.set_attr_int("end_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.ellipsis_mask { + builder.set_attr_int("ellipsis_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.new_axis_mask { + builder.set_attr_int("new_axis_mask", *value)?; + } + if let ::std::option::Option::Some(value) = &self.shrink_axis_mask { + builder.set_attr_int("shrink_axis_mask", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(StridedSliceAssignInst { op }) + } +} +impl StridedSliceAssignInst { + /// Returns the 'output_ref' Output of this 'StridedSliceAssign' operation. + pub fn output_ref(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'ref_' Input of this 'StridedSliceAssign' operation. + pub fn ref_(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'begin' Input of this 'StridedSliceAssign' operation. + pub fn begin(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'end' Input of this 'StridedSliceAssign' operation. + pub fn end(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'strides' Input of this 'StridedSliceAssign' operation. + pub fn strides(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'value' Input of this 'StridedSliceAssign' operation. + pub fn value(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } +} +impl From for crate::Operation { + fn from(inst: StridedSliceAssignInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `StridedSliceAssign::new().build(ref_, begin, end, strides, value, scope)`. +pub fn strided_slice_assign< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, +>( + ref_: O0, + begin: O1, + end: O2, + strides: O3, + value: O4, + scope: &mut crate::Scope, +) -> crate::Result { + StridedSliceAssign::new().build(ref_, begin, end, strides, value, scope) +} + +/// Builder for the `StridedSliceGrad` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct StridedSliceGrad { + T: ::std::option::Option, + Index: ::std::option::Option, + begin_mask: ::std::option::Option, + end_mask: ::std::option::Option, + ellipsis_mask: ::std::option::Option, + new_axis_mask: ::std::option::Option, + shrink_axis_mask: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'StridedSliceGrad' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct StridedSliceGradInst { + /// An instance of a fully built StridedSliceGrad Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl StridedSliceGrad { + /// Creates a new `StridedSliceGrad`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `T` attribute. + pub fn T>(mut self, value: ArgType) -> Self { + self.T = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Index` attribute. + pub fn Index>(mut self, value: ArgType) -> Self { + self.Index = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `begin_mask` attribute. + pub fn begin_mask>(mut self, value: ArgType) -> Self { + self.begin_mask = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `end_mask` attribute. + pub fn end_mask>(mut self, value: ArgType) -> Self { + self.end_mask = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `ellipsis_mask` attribute. + pub fn ellipsis_mask>(mut self, value: ArgType) -> Self { + self.ellipsis_mask = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `new_axis_mask` attribute. + pub fn new_axis_mask>(mut self, value: ArgType) -> Self { + self.new_axis_mask = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `shrink_axis_mask` attribute. + pub fn shrink_axis_mask>(mut self, value: ArgType) -> Self { + self.shrink_axis_mask = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `StridedSliceGrad` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + >( + &self, + shape: O0, + begin: O1, + end: O2, + strides: O3, + dy: O4, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + shape.into(), + begin.into(), + end.into(), + strides.into(), + dy.into(), + scope, + ) + } + fn build_impl( + &self, + shape: crate::Output, + begin: crate::Output, + end: crate::Output, + strides: crate::Output, + dy: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("StridedSliceGrad", |nd| { + nd.add_input(shape); + nd.add_input(begin); + nd.add_input(end); + nd.add_input(strides); + nd.add_input(dy); for op in &self.control_inputs { nd.add_control_input(op); } @@ -207018,6 +212578,171 @@ pub fn tfrecord_dataset< TFRecordDataset::new().build(filenames, compression_type, buffer_size, scope) } +/// Builder for the `TFRecordDatasetV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct TFRecordDatasetV2 { + metadata: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'TFRecordDatasetV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct TFRecordDatasetV2Inst { + /// An instance of a fully built TFRecordDatasetV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl TFRecordDatasetV2 { + /// Creates a new `TFRecordDatasetV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `TFRecordDatasetV2` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + filenames: O0, + compression_type: O1, + buffer_size: O2, + byte_offsets: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + filenames.into(), + compression_type.into(), + buffer_size.into(), + byte_offsets.into(), + scope, + ) + } + fn build_impl( + &self, + filenames: crate::Output, + compression_type: crate::Output, + buffer_size: crate::Output, + byte_offsets: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("TFRecordDatasetV2", |nd| { + nd.add_input(filenames); + nd.add_input(compression_type); + nd.add_input(buffer_size); + nd.add_input(byte_offsets); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.metadata { + nd.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'TFRecordDatasetV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + filenames: crate::Output, + compression_type: crate::Output, + buffer_size: crate::Output, + byte_offsets: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("TFRecordDatasetV2", |builder| { + builder.add_input(filenames); + builder.add_input(compression_type); + builder.add_input(buffer_size); + builder.add_input(byte_offsets); + if let ::std::option::Option::Some(value) = &self.metadata { + builder.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(TFRecordDatasetV2Inst { op }) + } +} +impl TFRecordDatasetV2Inst { + /// Returns the 'handle' Output of this 'TFRecordDatasetV2' operation. + pub fn handle(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'filenames' Input of this 'TFRecordDatasetV2' operation. + pub fn filenames(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'compression_type' Input of this 'TFRecordDatasetV2' operation. + pub fn compression_type(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'buffer_size' Input of this 'TFRecordDatasetV2' operation. + pub fn buffer_size(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'byte_offsets' Input of this 'TFRecordDatasetV2' operation. + pub fn byte_offsets(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: TFRecordDatasetV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `TFRecordDatasetV2::new().build(filenames, compression_type, buffer_size, byte_offsets, scope)`. +pub fn tfrecord_dataset_v2< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + filenames: O0, + compression_type: O1, + buffer_size: O2, + byte_offsets: O3, + scope: &mut crate::Scope, +) -> crate::Result { + TFRecordDatasetV2::new().build( + filenames, + compression_type, + buffer_size, + byte_offsets, + scope, + ) +} + /// Builder for the `TFRecordReader` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct TFRecordReader { @@ -207242,6 +212967,110 @@ pub fn tfrecord_reader_v2(scope: &mut crate::Scope) -> crate::Result>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'TPUAnnotateTensorsWithDynamicShape' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct TPUAnnotateTensorsWithDynamicShapeInst { + /// An instance of a fully built TPUAnnotateTensorsWithDynamicShape Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl TPUAnnotateTensorsWithDynamicShape { + /// Creates a new `TPUAnnotateTensorsWithDynamicShape`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `T` attribute. + pub fn T>>( + mut self, + value: ArgType, + ) -> Self { + self.T = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `TPUAnnotateTensorsWithDynamicShape` operation. + pub fn build>( + &self, + tensors: O0, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(tensors.into(), scope) + } + fn build_impl( + &self, + tensors: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("TPUAnnotateTensorsWithDynamicShape", |nd| { + nd.add_input(tensors); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.T { + nd.set_attr_type_list("T", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'TPUAnnotateTensorsWithDynamicShape' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + tensors: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("TPUAnnotateTensorsWithDynamicShape", |builder| { + builder.add_input(tensors); + if let ::std::option::Option::Some(value) = &self.T { + builder.set_attr_type_list("T", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(TPUAnnotateTensorsWithDynamicShapeInst { op }) + } +} +impl TPUAnnotateTensorsWithDynamicShapeInst { + /// Returns the 'tpu_tensors' Output of this 'TPUAnnotateTensorsWithDynamicShape' operation. + pub fn tpu_tensors(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'tensors' Input of this 'TPUAnnotateTensorsWithDynamicShape' operation. + pub fn tensors(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: TPUAnnotateTensorsWithDynamicShapeInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `TPUAnnotateTensorsWithDynamicShape::new().build(tensors, scope)`. +pub fn tpuannotate_tensors_with_dynamic_shape>( + tensors: O0, + scope: &mut crate::Scope, +) -> crate::Result { + TPUAnnotateTensorsWithDynamicShape::new().build(tensors, scope) +} + /// Builder for the `TPUCompilationResult` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct TPUCompilationResult { @@ -207599,35 +213428,38 @@ pub fn tpucompile_succeeded_assert>( TPUCompileSucceededAssert::new().build(compilation_status, scope) } -/// Builder for the `TPUEmbeddingActivations` operation. +/// Builder for the `TPUCopyWithDynamicShape` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct TPUEmbeddingActivations { - table_id: ::std::option::Option, - lookup_id: ::std::option::Option, +pub struct TPUCopyWithDynamicShape { + N: ::std::option::Option, + T: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'TPUEmbeddingActivations' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'TPUCopyWithDynamicShape' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct TPUEmbeddingActivationsInst { - /// An instance of a fully built TPUEmbeddingActivations Operation in a Tensorflow graph. +pub struct TPUCopyWithDynamicShapeInst { + /// An instance of a fully built TPUCopyWithDynamicShape Operation in a Tensorflow graph. pub op: crate::Operation, } -impl TPUEmbeddingActivations { - /// Creates a new `TPUEmbeddingActivations`. +impl TPUCopyWithDynamicShape { + /// Creates a new `TPUCopyWithDynamicShape`. pub fn new() -> Self { Self::default() } - /// Sets the `table_id` attribute. - pub fn table_id>(mut self, value: ArgType) -> Self { - self.table_id = ::std::option::Option::Some(value.into()); + /// Sets the `N` attribute. + pub fn N>(mut self, value: ArgType) -> Self { + self.N = ::std::option::Option::Some(value.into()); self } - /// Sets the `lookup_id` attribute. - pub fn lookup_id>(mut self, value: ArgType) -> Self { - self.lookup_id = ::std::option::Option::Some(value.into()); + /// Sets the `T` attribute. + pub fn T>>( + mut self, + value: ArgType, + ) -> Self { + self.T = ::std::option::Option::Some(value.into()); self } @@ -207637,136 +213469,132 @@ impl TPUEmbeddingActivations { self } - /// Builds the `TPUEmbeddingActivations` operation. + /// Builds the `TPUCopyWithDynamicShape` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, >( &self, - embedding_variable: O0, - sliced_activations: O1, + tensors: O0, + unpadded_sizes: O1, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl(embedding_variable.into(), sliced_activations.into(), scope) + self.build_impl(tensors.into(), unpadded_sizes.into(), scope) } fn build_impl( &self, - embedding_variable: crate::Output, - sliced_activations: crate::Output, + tensors: crate::Output, + unpadded_sizes: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("TPUEmbeddingActivations", |nd| { - nd.add_input(embedding_variable); - nd.add_input(sliced_activations); + scope.new_operation("TPUCopyWithDynamicShape", |nd| { + nd.add_input(tensors); + nd.add_input(unpadded_sizes); for op in &self.control_inputs { nd.add_control_input(op); } - if let ::std::option::Option::Some(value) = &self.table_id { - nd.set_attr_int("table_id", *value)?; + if let ::std::option::Option::Some(value) = &self.N { + nd.set_attr_int("N", *value)?; } - if let ::std::option::Option::Some(value) = &self.lookup_id { - nd.set_attr_int("lookup_id", *value)?; + if let ::std::option::Option::Some(value) = &self.T { + nd.set_attr_type_list("T", value)?; } ::std::result::Result::Ok(()) }) } - /// Builds a new instance of 'TPUEmbeddingActivations' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'TPUCopyWithDynamicShape' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, - embedding_variable: crate::Output, - sliced_activations: crate::Output, + tensors: crate::Output, + unpadded_sizes: Vec, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("TPUEmbeddingActivations", |builder| { - builder.add_input(embedding_variable); - builder.add_input(sliced_activations); - if let ::std::option::Option::Some(value) = &self.table_id { - builder.set_attr_int("table_id", *value)?; - } - if let ::std::option::Option::Some(value) = &self.lookup_id { - builder.set_attr_int("lookup_id", *value)?; + ) -> crate::Result { + let op = scope.new_operation("TPUCopyWithDynamicShape", |builder| { + builder.add_input(tensors); + builder.add_input_list(&unpadded_sizes); + builder.set_attr_int("N", unpadded_sizes.clone().len() as i64)?; + if let ::std::option::Option::Some(value) = &self.T { + builder.set_attr_type_list("T", value)?; } ::std::result::Result::Ok(()) })?; - Ok(TPUEmbeddingActivationsInst { op }) + Ok(TPUCopyWithDynamicShapeInst { op }) } } -impl TPUEmbeddingActivationsInst { - /// Returns the 'output' Output of this 'TPUEmbeddingActivations' operation. - pub fn output(&self) -> crate::Output { +impl TPUCopyWithDynamicShapeInst { + /// Returns the 'tpu_tensors' Output of this 'TPUCopyWithDynamicShape' operation. + pub fn tpu_tensors(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'embedding_variable' Input of this 'TPUEmbeddingActivations' operation. - pub fn embedding_variable(&self) -> crate::Input { + /// Returns the 'tensors' Input of this 'TPUCopyWithDynamicShape' operation. + pub fn tensors(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'sliced_activations' Input of this 'TPUEmbeddingActivations' operation. - pub fn sliced_activations(&self) -> crate::Input { - crate::Input { - operation: &self.op, - index: 1, + /// Returns a Vector of unpadded_sizes for 'unpadded_sizes' Input of this TPUCopyWithDynamicShape operation. + pub fn unpadded_sizes(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 1..self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); } + Ok(Inputs) } } -impl From for crate::Operation { - fn from(inst: TPUEmbeddingActivationsInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: TPUCopyWithDynamicShapeInst) -> crate::Operation { inst.op } } -/// Shorthand for `TPUEmbeddingActivations::new().build(embedding_variable, sliced_activations, scope)`. -pub fn tpuembedding_activations< +/// Shorthand for `TPUCopyWithDynamicShape::new().build(tensors, unpadded_sizes, scope)`. +pub fn tpucopy_with_dynamic_shape< O0: ::std::convert::Into, O1: ::std::convert::Into, >( - embedding_variable: O0, - sliced_activations: O1, + tensors: O0, + unpadded_sizes: O1, scope: &mut crate::Scope, ) -> crate::Result { - TPUEmbeddingActivations::new().build(embedding_variable, sliced_activations, scope) + TPUCopyWithDynamicShape::new().build(tensors, unpadded_sizes, scope) } -/// Builder for the `TPUExecute` operation. +/// Builder for the `TPUEmbeddingActivations` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct TPUExecute { - Targs: ::std::option::Option<::std::vec::Vec>, - Tresults: ::std::option::Option<::std::vec::Vec>, +pub struct TPUEmbeddingActivations { + table_id: ::std::option::Option, + lookup_id: ::std::option::Option, control_inputs: ::std::vec::Vec, } -/// An instance of 'TPUExecute' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'TPUEmbeddingActivations' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct TPUExecuteInst { - /// An instance of a fully built TPUExecute Operation in a Tensorflow graph. +pub struct TPUEmbeddingActivationsInst { + /// An instance of a fully built TPUEmbeddingActivations Operation in a Tensorflow graph. pub op: crate::Operation, } -impl TPUExecute { - /// Creates a new `TPUExecute`. +impl TPUEmbeddingActivations { + /// Creates a new `TPUEmbeddingActivations`. pub fn new() -> Self { Self::default() } - /// Sets the `Targs` attribute. - pub fn Targs>>( - mut self, - value: ArgType, - ) -> Self { - self.Targs = ::std::option::Option::Some(value.into()); + /// Sets the `table_id` attribute. + pub fn table_id>(mut self, value: ArgType) -> Self { + self.table_id = ::std::option::Option::Some(value.into()); self } - /// Sets the `Tresults` attribute. - pub fn Tresults>>( - mut self, - value: ArgType, - ) -> Self { - self.Tresults = ::std::option::Option::Some(value.into()); + /// Sets the `lookup_id` attribute. + pub fn lookup_id>(mut self, value: ArgType) -> Self { + self.lookup_id = ::std::option::Option::Some(value.into()); self } @@ -207776,119 +213604,258 @@ impl TPUExecute { self } - /// Builds the `TPUExecute` operation. + /// Builds the `TPUEmbeddingActivations` operation. pub fn build< O0: ::std::convert::Into, O1: ::std::convert::Into, >( &self, - args: O0, - key: O1, + embedding_variable: O0, + sliced_activations: O1, scope: &mut crate::Scope, ) -> crate::Result { - self.build_impl(args.into(), key.into(), scope) + self.build_impl(embedding_variable.into(), sliced_activations.into(), scope) } fn build_impl( &self, - args: crate::Output, - key: crate::Output, + embedding_variable: crate::Output, + sliced_activations: crate::Output, scope: &mut crate::Scope, ) -> crate::Result { - scope.new_operation("TPUExecute", |nd| { - nd.add_input(args); - nd.add_input(key); + scope.new_operation("TPUEmbeddingActivations", |nd| { + nd.add_input(embedding_variable); + nd.add_input(sliced_activations); for op in &self.control_inputs { nd.add_control_input(op); } - if let ::std::option::Option::Some(value) = &self.Targs { - nd.set_attr_type_list("Targs", value)?; + if let ::std::option::Option::Some(value) = &self.table_id { + nd.set_attr_int("table_id", *value)?; } - if let ::std::option::Option::Some(value) = &self.Tresults { - nd.set_attr_type_list("Tresults", value)?; + if let ::std::option::Option::Some(value) = &self.lookup_id { + nd.set_attr_int("lookup_id", *value)?; } ::std::result::Result::Ok(()) }) } - /// Builds a new instance of 'TPUExecute' Operation with it's Outputs and Inputs exposed as methods. + /// Builds a new instance of 'TPUEmbeddingActivations' Operation with it's Outputs and Inputs exposed as methods. pub fn build_instance( &self, - args: crate::Output, - key: crate::Output, + embedding_variable: crate::Output, + sliced_activations: crate::Output, scope: &mut crate::Scope, - ) -> crate::Result { - let op = scope.new_operation("TPUExecute", |builder| { - builder.add_input(args); - builder.add_input(key); - if let ::std::option::Option::Some(value) = &self.Targs { - builder.set_attr_type_list("Targs", value)?; + ) -> crate::Result { + let op = scope.new_operation("TPUEmbeddingActivations", |builder| { + builder.add_input(embedding_variable); + builder.add_input(sliced_activations); + if let ::std::option::Option::Some(value) = &self.table_id { + builder.set_attr_int("table_id", *value)?; } - if let ::std::option::Option::Some(value) = &self.Tresults { - builder.set_attr_type_list("Tresults", value)?; + if let ::std::option::Option::Some(value) = &self.lookup_id { + builder.set_attr_int("lookup_id", *value)?; } ::std::result::Result::Ok(()) })?; - Ok(TPUExecuteInst { op }) + Ok(TPUEmbeddingActivationsInst { op }) } } -impl TPUExecuteInst { - /// Returns the 'results' Output of this 'TPUExecute' operation. - pub fn results(&self) -> crate::Output { +impl TPUEmbeddingActivationsInst { + /// Returns the 'output' Output of this 'TPUEmbeddingActivations' operation. + pub fn output(&self) -> crate::Output { crate::Output { operation: self.op.clone(), index: 0, } } - /// Returns the 'args' Input of this 'TPUExecute' operation. - pub fn args(&self) -> crate::Input { + /// Returns the 'embedding_variable' Input of this 'TPUEmbeddingActivations' operation. + pub fn embedding_variable(&self) -> crate::Input { crate::Input { operation: &self.op, index: 0, } } - /// Returns the 'key' Input of this 'TPUExecute' operation. - pub fn key(&self) -> crate::Input { + /// Returns the 'sliced_activations' Input of this 'TPUEmbeddingActivations' operation. + pub fn sliced_activations(&self) -> crate::Input { crate::Input { operation: &self.op, index: 1, } } } -impl From for crate::Operation { - fn from(inst: TPUExecuteInst) -> crate::Operation { +impl From for crate::Operation { + fn from(inst: TPUEmbeddingActivationsInst) -> crate::Operation { inst.op } } -/// Shorthand for `TPUExecute::new().build(args, key, scope)`. -pub fn tpuexecute< +/// Shorthand for `TPUEmbeddingActivations::new().build(embedding_variable, sliced_activations, scope)`. +pub fn tpuembedding_activations< O0: ::std::convert::Into, O1: ::std::convert::Into, >( - args: O0, - key: O1, + embedding_variable: O0, + sliced_activations: O1, scope: &mut crate::Scope, ) -> crate::Result { - TPUExecute::new().build(args, key, scope) + TPUEmbeddingActivations::new().build(embedding_variable, sliced_activations, scope) } -/// Builder for the `TPUExecuteAndUpdateVariables` operation. +/// Builder for the `TPUExecute` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] -pub struct TPUExecuteAndUpdateVariables { +pub struct TPUExecute { Targs: ::std::option::Option<::std::vec::Vec>, Tresults: ::std::option::Option<::std::vec::Vec>, - device_var_reads_indices: ::std::option::Option<::std::vec::Vec>, - device_var_updates_indices: ::std::option::Option<::std::vec::Vec>, control_inputs: ::std::vec::Vec, } -/// An instance of 'TPUExecuteAndUpdateVariables' Operation with it's Outputs and Inputs exposed as methods. +/// An instance of 'TPUExecute' Operation with it's Outputs and Inputs exposed as methods. #[derive(Debug, Clone)] -pub struct TPUExecuteAndUpdateVariablesInst { - /// An instance of a fully built TPUExecuteAndUpdateVariables Operation in a Tensorflow graph. +pub struct TPUExecuteInst { + /// An instance of a fully built TPUExecute Operation in a Tensorflow graph. pub op: crate::Operation, } -impl TPUExecuteAndUpdateVariables { - /// Creates a new `TPUExecuteAndUpdateVariables`. +impl TPUExecute { + /// Creates a new `TPUExecute`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `Targs` attribute. + pub fn Targs>>( + mut self, + value: ArgType, + ) -> Self { + self.Targs = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `Tresults` attribute. + pub fn Tresults>>( + mut self, + value: ArgType, + ) -> Self { + self.Tresults = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `TPUExecute` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + >( + &self, + args: O0, + key: O1, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(args.into(), key.into(), scope) + } + fn build_impl( + &self, + args: crate::Output, + key: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("TPUExecute", |nd| { + nd.add_input(args); + nd.add_input(key); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.Targs { + nd.set_attr_type_list("Targs", value)?; + } + if let ::std::option::Option::Some(value) = &self.Tresults { + nd.set_attr_type_list("Tresults", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'TPUExecute' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + args: crate::Output, + key: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("TPUExecute", |builder| { + builder.add_input(args); + builder.add_input(key); + if let ::std::option::Option::Some(value) = &self.Targs { + builder.set_attr_type_list("Targs", value)?; + } + if let ::std::option::Option::Some(value) = &self.Tresults { + builder.set_attr_type_list("Tresults", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(TPUExecuteInst { op }) + } +} +impl TPUExecuteInst { + /// Returns the 'results' Output of this 'TPUExecute' operation. + pub fn results(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'args' Input of this 'TPUExecute' operation. + pub fn args(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'key' Input of this 'TPUExecute' operation. + pub fn key(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } +} +impl From for crate::Operation { + fn from(inst: TPUExecuteInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `TPUExecute::new().build(args, key, scope)`. +pub fn tpuexecute< + O0: ::std::convert::Into, + O1: ::std::convert::Into, +>( + args: O0, + key: O1, + scope: &mut crate::Scope, +) -> crate::Result { + TPUExecute::new().build(args, key, scope) +} + +/// Builder for the `TPUExecuteAndUpdateVariables` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct TPUExecuteAndUpdateVariables { + Targs: ::std::option::Option<::std::vec::Vec>, + Tresults: ::std::option::Option<::std::vec::Vec>, + device_var_reads_indices: ::std::option::Option<::std::vec::Vec>, + device_var_updates_indices: ::std::option::Option<::std::vec::Vec>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'TPUExecuteAndUpdateVariables' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct TPUExecuteAndUpdateVariablesInst { + /// An instance of a fully built TPUExecuteAndUpdateVariables Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl TPUExecuteAndUpdateVariables { + /// Creates a new `TPUExecuteAndUpdateVariables`. pub fn new() -> Self { Self::default() } @@ -228928,6 +234895,102 @@ pub fn unwrap_dataset_variant>( UnwrapDatasetVariant::new().build(input_handle, scope) } +/// Builder for the `UpdateTaskIdAndGlobalCoreArray` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct UpdateTaskIdAndGlobalCoreArray { + task_count: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'UpdateTaskIdAndGlobalCoreArray' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct UpdateTaskIdAndGlobalCoreArrayInst { + /// An instance of a fully built UpdateTaskIdAndGlobalCoreArray Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl UpdateTaskIdAndGlobalCoreArray { + /// Creates a new `UpdateTaskIdAndGlobalCoreArray`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `task_count` attribute. + pub fn task_count>(mut self, value: ArgType) -> Self { + self.task_count = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `UpdateTaskIdAndGlobalCoreArray` operation. + pub fn build>( + &self, + tpu_task_id_to_shard_id: O0, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(tpu_task_id_to_shard_id.into(), scope) + } + fn build_impl( + &self, + tpu_task_id_to_shard_id: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("UpdateTaskIdAndGlobalCoreArray", |nd| { + nd.add_input(tpu_task_id_to_shard_id); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.task_count { + nd.set_attr_int("task_count", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'UpdateTaskIdAndGlobalCoreArray' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + tpu_task_id_to_shard_id: Vec, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("UpdateTaskIdAndGlobalCoreArray", |builder| { + builder.add_input_list(&tpu_task_id_to_shard_id); + builder.set_attr_int("task_count", tpu_task_id_to_shard_id.clone().len() as i64)?; + ::std::result::Result::Ok(()) + })?; + Ok(UpdateTaskIdAndGlobalCoreArrayInst { op }) + } +} +impl UpdateTaskIdAndGlobalCoreArrayInst { + /// Returns a Vector of tpu_task_id_to_shard_id for 'tpu_task_id_to_shard_id' Input of this UpdateTaskIdAndGlobalCoreArray operation. + pub fn tpu_task_id_to_shard_id(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 0..self.op.get_attr_int("task_count")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } +} +impl From for crate::Operation { + fn from(inst: UpdateTaskIdAndGlobalCoreArrayInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `UpdateTaskIdAndGlobalCoreArray::new().build(tpu_task_id_to_shard_id, scope)`. +pub fn update_task_id_and_global_core_array>( + tpu_task_id_to_shard_id: O0, + scope: &mut crate::Scope, +) -> crate::Result { + UpdateTaskIdAndGlobalCoreArray::new().build(tpu_task_id_to_shard_id, scope) +} + /// Builder for the `UpperBound` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct UpperBound { @@ -229069,6 +235132,7 @@ pub fn upper_bound< pub struct VarHandleOp { container: ::std::option::Option<::std::string::String>, shared_name: ::std::option::Option<::std::string::String>, + debug_name: ::std::option::Option<::std::string::String>, dtype: ::std::option::Option, shape: ::std::option::Option, allowed_devices: ::std::option::Option<::std::vec::Vec<::std::string::String>>, @@ -229105,6 +235169,15 @@ impl VarHandleOp { self } + /// Sets the `debug_name` attribute. + pub fn debug_name>( + mut self, + value: ArgType, + ) -> Self { + self.debug_name = ::std::option::Option::Some(value.into()); + self + } + /// Sets the `dtype` attribute. pub fn dtype>(mut self, value: ArgType) -> Self { self.dtype = ::std::option::Option::Some(value.into()); @@ -229149,6 +235222,9 @@ impl VarHandleOp { if let ::std::option::Option::Some(value) = &self.shared_name { nd.set_attr_string("shared_name", value)?; } + if let ::std::option::Option::Some(value) = &self.debug_name { + nd.set_attr_string("debug_name", value)?; + } if let ::std::option::Option::Some(value) = &self.dtype { nd.set_attr_type("dtype", *value)?; } @@ -229171,6 +235247,9 @@ impl VarHandleOp { if let ::std::option::Option::Some(value) = &self.shared_name { builder.set_attr_string("shared_name", value)?; } + if let ::std::option::Option::Some(value) = &self.debug_name { + builder.set_attr_string("debug_name", value)?; + } if let ::std::option::Option::Some(value) = &self.dtype { builder.set_attr_type("dtype", *value)?; } @@ -229640,6 +235719,192 @@ pub fn variable_v2(scope: &mut crate::Scope) -> crate::Result VariableV2::new().build(scope) } +/// Builder for the `WeightedFlatMapDataset` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct WeightedFlatMapDataset { + N: ::std::option::Option, + M: ::std::option::Option, + output_types: ::std::option::Option<::std::vec::Vec>, + output_shapes: ::std::option::Option<::std::vec::Vec>, + metadata: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'WeightedFlatMapDataset' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct WeightedFlatMapDatasetInst { + /// An instance of a fully built WeightedFlatMapDataset Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl WeightedFlatMapDataset { + /// Creates a new `WeightedFlatMapDataset`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `N` attribute. + pub fn N>(mut self, value: ArgType) -> Self { + self.N = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `M` attribute. + pub fn M>(mut self, value: ArgType) -> Self { + self.M = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_types` attribute. + pub fn output_types>>( + mut self, + value: ArgType, + ) -> Self { + self.output_types = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `output_shapes` attribute. + pub fn output_shapes>>( + mut self, + value: ArgType, + ) -> Self { + self.output_shapes = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `metadata` attribute. + pub fn metadata>( + mut self, + value: ArgType, + ) -> Self { + self.metadata = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `WeightedFlatMapDataset` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + >( + &self, + input_datasets: O0, + weights: O1, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(input_datasets.into(), weights.into(), scope) + } + fn build_impl( + &self, + input_datasets: crate::Output, + weights: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("WeightedFlatMapDataset", |nd| { + nd.add_input(input_datasets); + nd.add_input(weights); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.N { + nd.set_attr_int("N", *value)?; + } + if let ::std::option::Option::Some(value) = &self.M { + nd.set_attr_int("M", *value)?; + } + if let ::std::option::Option::Some(value) = &self.output_types { + nd.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + nd.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + nd.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'WeightedFlatMapDataset' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + input_datasets: Vec, + weights: Vec, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("WeightedFlatMapDataset", |builder| { + builder.add_input_list(&input_datasets); + builder.add_input_list(&weights); + builder.set_attr_int("N", input_datasets.clone().len() as i64)?; + builder.set_attr_int("M", weights.clone().len() as i64)?; + if let ::std::option::Option::Some(value) = &self.output_types { + builder.set_attr_type_list("output_types", value)?; + } + if let ::std::option::Option::Some(value) = &self.output_shapes { + builder.set_attr_shape_list("output_shapes", value)?; + } + if let ::std::option::Option::Some(value) = &self.metadata { + builder.set_attr_string("metadata", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(WeightedFlatMapDatasetInst { op }) + } +} +impl WeightedFlatMapDatasetInst { + /// Returns the 'handle' Output of this 'WeightedFlatMapDataset' operation. + pub fn handle(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns a Vector of input_datasets for 'input_datasets' Input of this WeightedFlatMapDataset operation. + pub fn input_datasets(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 0..self.op.get_attr_int("N")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of weights for 'weights' Input of this WeightedFlatMapDataset operation. + pub fn weights(&self) -> crate::Result> { + let dynamic_offset = (self.op.get_attr_int("N")? + 1) as i32; + let mut Inputs = vec![]; + for i in dynamic_offset..dynamic_offset + self.op.get_attr_int("M")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } +} +impl From for crate::Operation { + fn from(inst: WeightedFlatMapDatasetInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `WeightedFlatMapDataset::new().build(input_datasets, weights, scope)`. +pub fn weighted_flat_map_dataset< + O0: ::std::convert::Into, + O1: ::std::convert::Into, +>( + input_datasets: O0, + weights: O1, + scope: &mut crate::Scope, +) -> crate::Result { + WeightedFlatMapDataset::new().build(input_datasets, weights, scope) +} + /// Builder for the `Where` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct Where { @@ -232481,6 +238746,175 @@ pub fn xla_recv_tpuembedding_activations XlaRecvTPUEmbeddingActivations::new().build(deduplication_data, scope) } +/// Builder for the `XlaRecvTPUEmbeddingActivationsV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaRecvTPUEmbeddingActivationsV2 { + num_tables: ::std::option::Option, + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaRecvTPUEmbeddingActivationsV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaRecvTPUEmbeddingActivationsV2Inst { + /// An instance of a fully built XlaRecvTPUEmbeddingActivationsV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaRecvTPUEmbeddingActivationsV2 { + /// Creates a new `XlaRecvTPUEmbeddingActivationsV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `num_tables` attribute. + pub fn num_tables>(mut self, value: ArgType) -> Self { + self.num_tables = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaRecvTPUEmbeddingActivationsV2` operation. + pub fn build>( + &self, + deduplication_data: O0, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl(deduplication_data.into(), scope) + } + fn build_impl( + &self, + deduplication_data: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaRecvTPUEmbeddingActivationsV2", |nd| { + nd.add_input(deduplication_data); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.num_tables { + nd.set_attr_int("num_tables", *value)?; + } + if let ::std::option::Option::Some(value) = &self.config { + nd.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + nd.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + nd.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + nd.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaRecvTPUEmbeddingActivationsV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + deduplication_data: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaRecvTPUEmbeddingActivationsV2", |builder| { + builder.add_input(deduplication_data); + if let ::std::option::Option::Some(value) = &self.num_tables { + builder.set_attr_int("num_tables", *value)?; + } + if let ::std::option::Option::Some(value) = &self.config { + builder.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + builder.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + builder.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + builder.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaRecvTPUEmbeddingActivationsV2Inst { op }) + } +} +impl XlaRecvTPUEmbeddingActivationsV2Inst { + /// Returns a Vector of outputs for 'outputs' Output of this XlaRecvTPUEmbeddingActivationsV2 operation. + pub fn outputs(&self) -> crate::Result> { + let mut Outputs = vec![]; + for i in 0..self.op.get_attr_int("num_tables")? as i32 { + Outputs.push(crate::Output { + operation: self.op.clone(), + index: i, + }); + } + Ok(Outputs) + } + /// Returns the 'deduplication_data' Input of this 'XlaRecvTPUEmbeddingActivationsV2' operation. + pub fn deduplication_data(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaRecvTPUEmbeddingActivationsV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaRecvTPUEmbeddingActivationsV2::new().build(deduplication_data, scope)`. +pub fn xla_recv_tpuembedding_activations_v2>( + deduplication_data: O0, + scope: &mut crate::Scope, +) -> crate::Result { + XlaRecvTPUEmbeddingActivationsV2::new().build(deduplication_data, scope) +} + /// Builder for the `XlaRecvTPUEmbeddingDeduplicationData` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct XlaRecvTPUEmbeddingDeduplicationData { @@ -232566,6 +239000,139 @@ pub fn xla_recv_tpuembedding_deduplication_data( XlaRecvTPUEmbeddingDeduplicationData::new().build(scope) } +/// Builder for the `XlaRecvTPUEmbeddingDeduplicationDataV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaRecvTPUEmbeddingDeduplicationDataV2 { + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaRecvTPUEmbeddingDeduplicationDataV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaRecvTPUEmbeddingDeduplicationDataV2Inst { + /// An instance of a fully built XlaRecvTPUEmbeddingDeduplicationDataV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaRecvTPUEmbeddingDeduplicationDataV2 { + /// Creates a new `XlaRecvTPUEmbeddingDeduplicationDataV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaRecvTPUEmbeddingDeduplicationDataV2` operation. + pub fn build(&self, scope: &mut crate::Scope) -> crate::Result { + self.build_impl(scope) + } + fn build_impl(&self, scope: &mut crate::Scope) -> crate::Result { + scope.new_operation("XlaRecvTPUEmbeddingDeduplicationDataV2", |nd| { + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.config { + nd.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + nd.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + nd.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + nd.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaRecvTPUEmbeddingDeduplicationDataV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaRecvTPUEmbeddingDeduplicationDataV2", |builder| { + if let ::std::option::Option::Some(value) = &self.config { + builder.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + builder.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + builder.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + builder.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaRecvTPUEmbeddingDeduplicationDataV2Inst { op }) + } +} +impl XlaRecvTPUEmbeddingDeduplicationDataV2Inst { + /// Returns the 'output' Output of this 'XlaRecvTPUEmbeddingDeduplicationDataV2' operation. + pub fn output(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaRecvTPUEmbeddingDeduplicationDataV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaRecvTPUEmbeddingDeduplicationDataV2::new().build(scope)`. +pub fn xla_recv_tpuembedding_deduplication_data_v2( + scope: &mut crate::Scope, +) -> crate::Result { + XlaRecvTPUEmbeddingDeduplicationDataV2::new().build(scope) +} + /// Builder for the `XlaSendTPUEmbeddingGradients` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct XlaSendTPUEmbeddingGradients { @@ -232713,8 +239280,8 @@ impl XlaSendTPUEmbeddingGradientsInst { } /// Returns the 'deduplication_data' Input of this 'XlaSendTPUEmbeddingGradients' operation. pub fn deduplication_data(&self) -> crate::Result { - let dynamic_offset = (self.op.get_attr_int("NumTables")? - + self.op.get_attr_int("NumLearningRateTags")? + let dynamic_offset = (self.op.get_attr_int("NumLearningRateTags")? + + self.op.get_attr_int("NumTables")? + 2) as i32; Ok(crate::Input { operation: &self.op, @@ -232741,6 +239308,234 @@ pub fn xla_send_tpuembedding_gradients< XlaSendTPUEmbeddingGradients::new().build(gradients, learning_rates, deduplication_data, scope) } +/// Builder for the `XlaSendTPUEmbeddingGradientsV2` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSendTPUEmbeddingGradientsV2 { + NumTables: ::std::option::Option, + NumLearningRateTags: ::std::option::Option, + config: ::std::option::Option<::std::string::String>, + embedding_partitions: ::std::option::Option<::std::string::String>, + hbm_buffers_config: ::std::option::Option<::std::string::String>, + tpu_topology: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSendTPUEmbeddingGradientsV2' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSendTPUEmbeddingGradientsV2Inst { + /// An instance of a fully built XlaSendTPUEmbeddingGradientsV2 Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSendTPUEmbeddingGradientsV2 { + /// Creates a new `XlaSendTPUEmbeddingGradientsV2`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `NumTables` attribute. + pub fn NumTables>(mut self, value: ArgType) -> Self { + self.NumTables = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `NumLearningRateTags` attribute. + pub fn NumLearningRateTags>( + mut self, + value: ArgType, + ) -> Self { + self.NumLearningRateTags = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `config` attribute. + pub fn config>( + mut self, + value: ArgType, + ) -> Self { + self.config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `embedding_partitions` attribute. + pub fn embedding_partitions>( + mut self, + value: ArgType, + ) -> Self { + self.embedding_partitions = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `hbm_buffers_config` attribute. + pub fn hbm_buffers_config>( + mut self, + value: ArgType, + ) -> Self { + self.hbm_buffers_config = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `tpu_topology` attribute. + pub fn tpu_topology>( + mut self, + value: ArgType, + ) -> Self { + self.tpu_topology = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSendTPUEmbeddingGradientsV2` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + >( + &self, + gradients: O0, + learning_rates: O1, + deduplication_data: O2, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + gradients.into(), + learning_rates.into(), + deduplication_data.into(), + scope, + ) + } + fn build_impl( + &self, + gradients: crate::Output, + learning_rates: crate::Output, + deduplication_data: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSendTPUEmbeddingGradientsV2", |nd| { + nd.add_input(gradients); + nd.add_input(learning_rates); + nd.add_input(deduplication_data); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.NumTables { + nd.set_attr_int("NumTables", *value)?; + } + if let ::std::option::Option::Some(value) = &self.NumLearningRateTags { + nd.set_attr_int("NumLearningRateTags", *value)?; + } + if let ::std::option::Option::Some(value) = &self.config { + nd.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + nd.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + nd.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + nd.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSendTPUEmbeddingGradientsV2' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + gradients: Vec, + learning_rates: Vec, + deduplication_data: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSendTPUEmbeddingGradientsV2", |builder| { + builder.add_input_list(&gradients); + builder.add_input_list(&learning_rates); + builder.add_input(deduplication_data); + builder.set_attr_int("NumTables", gradients.clone().len() as i64)?; + builder.set_attr_int("NumLearningRateTags", learning_rates.clone().len() as i64)?; + if let ::std::option::Option::Some(value) = &self.config { + builder.set_attr_string("config", value)?; + } + if let ::std::option::Option::Some(value) = &self.embedding_partitions { + builder.set_attr_string("embedding_partitions", value)?; + } + if let ::std::option::Option::Some(value) = &self.hbm_buffers_config { + builder.set_attr_string("hbm_buffers_config", value)?; + } + if let ::std::option::Option::Some(value) = &self.tpu_topology { + builder.set_attr_string("tpu_topology", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSendTPUEmbeddingGradientsV2Inst { op }) + } +} +impl XlaSendTPUEmbeddingGradientsV2Inst { + /// Returns a Vector of gradients for 'gradients' Input of this XlaSendTPUEmbeddingGradientsV2 operation. + pub fn gradients(&self) -> crate::Result> { + let mut Inputs = vec![]; + for i in 0..self.op.get_attr_int("NumTables")? as i32 { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns a Vector of learning_rates for 'learning_rates' Input of this XlaSendTPUEmbeddingGradientsV2 operation. + pub fn learning_rates(&self) -> crate::Result> { + let dynamic_offset = (self.op.get_attr_int("NumTables")? + 1) as i32; + let mut Inputs = vec![]; + for i in + dynamic_offset..dynamic_offset + self.op.get_attr_int("NumLearningRateTags")? as i32 + { + Inputs.push(crate::Input { + operation: &self.op, + index: i, + }); + } + Ok(Inputs) + } + /// Returns the 'deduplication_data' Input of this 'XlaSendTPUEmbeddingGradientsV2' operation. + pub fn deduplication_data(&self) -> crate::Result { + let dynamic_offset = (self.op.get_attr_int("NumTables")? + + self.op.get_attr_int("NumLearningRateTags")? + + 2) as i32; + Ok(crate::Input { + operation: &self.op, + index: dynamic_offset, + }) + } +} +impl From for crate::Operation { + fn from(inst: XlaSendTPUEmbeddingGradientsV2Inst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSendTPUEmbeddingGradientsV2::new().build(gradients, learning_rates, deduplication_data, scope)`. +pub fn xla_send_tpuembedding_gradients_v2< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, +>( + gradients: O0, + learning_rates: O1, + deduplication_data: O2, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSendTPUEmbeddingGradientsV2::new().build( + gradients, + learning_rates, + deduplication_data, + scope, + ) +} + /// Builder for the `XlaSendToHost` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct XlaSendToHost { @@ -232854,6 +239649,5503 @@ pub fn xla_send_to_host>( XlaSendToHost::new().build(input, scope) } +/// Builder for the `XlaSparseCoreAdagrad` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseCoreAdagrad { + feature_width: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseCoreAdagrad' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseCoreAdagradInst { + /// An instance of a fully built XlaSparseCoreAdagrad Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseCoreAdagrad { + /// Creates a new `XlaSparseCoreAdagrad`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseCoreAdagrad` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + >( + &self, + indices: O0, + gradient: O1, + learning_rate: O2, + accumulator: O3, + embedding_table: O4, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + indices.into(), + gradient.into(), + learning_rate.into(), + accumulator.into(), + embedding_table.into(), + scope, + ) + } + fn build_impl( + &self, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + accumulator: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseCoreAdagrad", |nd| { + nd.add_input(indices); + nd.add_input(gradient); + nd.add_input(learning_rate); + nd.add_input(accumulator); + nd.add_input(embedding_table); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseCoreAdagrad' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + accumulator: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseCoreAdagrad", |builder| { + builder.add_input(indices); + builder.add_input(gradient); + builder.add_input(learning_rate); + builder.add_input(accumulator); + builder.add_input(embedding_table); + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseCoreAdagradInst { op }) + } +} +impl XlaSparseCoreAdagradInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseCoreAdagrad' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseCoreAdagrad' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'indices' Input of this 'XlaSparseCoreAdagrad' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'gradient' Input of this 'XlaSparseCoreAdagrad' operation. + pub fn gradient(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseCoreAdagrad' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseCoreAdagrad' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseCoreAdagrad' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseCoreAdagradInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseCoreAdagrad::new().build(indices, gradient, learning_rate, accumulator, embedding_table, scope)`. +pub fn xla_sparse_core_adagrad< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, +>( + indices: O0, + gradient: O1, + learning_rate: O2, + accumulator: O3, + embedding_table: O4, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseCoreAdagrad::new().build( + indices, + gradient, + learning_rate, + accumulator, + embedding_table, + scope, + ) +} + +/// Builder for the `XlaSparseCoreAdagradMomentum` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseCoreAdagradMomentum { + feature_width: ::std::option::Option, + use_nesterov: ::std::option::Option, + beta_2: ::std::option::Option, + exponent: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseCoreAdagradMomentum' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseCoreAdagradMomentumInst { + /// An instance of a fully built XlaSparseCoreAdagradMomentum Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseCoreAdagradMomentum { + /// Creates a new `XlaSparseCoreAdagradMomentum`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `use_nesterov` attribute. + pub fn use_nesterov>(mut self, value: ArgType) -> Self { + self.use_nesterov = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta_2` attribute. + pub fn beta_2>(mut self, value: ArgType) -> Self { + self.beta_2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `exponent` attribute. + pub fn exponent>(mut self, value: ArgType) -> Self { + self.exponent = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseCoreAdagradMomentum` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + >( + &self, + indices: O0, + gradient: O1, + learning_rate: O2, + beta_1: O3, + epsilon: O4, + accumulator: O5, + momentum: O6, + embedding_table: O7, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + indices.into(), + gradient.into(), + learning_rate.into(), + beta_1.into(), + epsilon.into(), + accumulator.into(), + momentum.into(), + embedding_table.into(), + scope, + ) + } + fn build_impl( + &self, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + beta_1: crate::Output, + epsilon: crate::Output, + accumulator: crate::Output, + momentum: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseCoreAdagradMomentum", |nd| { + nd.add_input(indices); + nd.add_input(gradient); + nd.add_input(learning_rate); + nd.add_input(beta_1); + nd.add_input(epsilon); + nd.add_input(accumulator); + nd.add_input(momentum); + nd.add_input(embedding_table); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.use_nesterov { + nd.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta_2 { + nd.set_attr_float("beta_2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + nd.set_attr_float("exponent", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseCoreAdagradMomentum' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + beta_1: crate::Output, + epsilon: crate::Output, + accumulator: crate::Output, + momentum: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseCoreAdagradMomentum", |builder| { + builder.add_input(indices); + builder.add_input(gradient); + builder.add_input(learning_rate); + builder.add_input(beta_1); + builder.add_input(epsilon); + builder.add_input(accumulator); + builder.add_input(momentum); + builder.add_input(embedding_table); + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.use_nesterov { + builder.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta_2 { + builder.set_attr_float("beta_2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + builder.set_attr_float("exponent", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseCoreAdagradMomentumInst { op }) + } +} +impl XlaSparseCoreAdagradMomentumInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_momentum' Output of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn updated_momentum(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'indices' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'gradient' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn gradient(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'beta_1' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn beta_1(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'epsilon' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn epsilon(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'momentum' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn momentum(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseCoreAdagradMomentum' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseCoreAdagradMomentumInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseCoreAdagradMomentum::new().build(indices, gradient, learning_rate, beta_1, epsilon, accumulator, momentum, embedding_table, scope)`. +pub fn xla_sparse_core_adagrad_momentum< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, +>( + indices: O0, + gradient: O1, + learning_rate: O2, + beta_1: O3, + epsilon: O4, + accumulator: O5, + momentum: O6, + embedding_table: O7, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseCoreAdagradMomentum::new().build( + indices, + gradient, + learning_rate, + beta_1, + epsilon, + accumulator, + momentum, + embedding_table, + scope, + ) +} + +/// Builder for the `XlaSparseCoreAdam` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseCoreAdam { + feature_width: ::std::option::Option, + use_sum_inside_sqrt: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseCoreAdam' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseCoreAdamInst { + /// An instance of a fully built XlaSparseCoreAdam Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseCoreAdam { + /// Creates a new `XlaSparseCoreAdam`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `use_sum_inside_sqrt` attribute. + pub fn use_sum_inside_sqrt>( + mut self, + value: ArgType, + ) -> Self { + self.use_sum_inside_sqrt = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseCoreAdam` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + >( + &self, + embedding_table: O0, + indices: O1, + gradient: O2, + learning_rate: O3, + momentum: O4, + velocity: O5, + beta_1: O6, + beta_2: O7, + epsilon: O8, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + embedding_table.into(), + indices.into(), + gradient.into(), + learning_rate.into(), + momentum.into(), + velocity.into(), + beta_1.into(), + beta_2.into(), + epsilon.into(), + scope, + ) + } + fn build_impl( + &self, + embedding_table: crate::Output, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + momentum: crate::Output, + velocity: crate::Output, + beta_1: crate::Output, + beta_2: crate::Output, + epsilon: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseCoreAdam", |nd| { + nd.add_input(embedding_table); + nd.add_input(indices); + nd.add_input(gradient); + nd.add_input(learning_rate); + nd.add_input(momentum); + nd.add_input(velocity); + nd.add_input(beta_1); + nd.add_input(beta_2); + nd.add_input(epsilon); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + nd.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseCoreAdam' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + embedding_table: crate::Output, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + momentum: crate::Output, + velocity: crate::Output, + beta_1: crate::Output, + beta_2: crate::Output, + epsilon: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseCoreAdam", |builder| { + builder.add_input(embedding_table); + builder.add_input(indices); + builder.add_input(gradient); + builder.add_input(learning_rate); + builder.add_input(momentum); + builder.add_input(velocity); + builder.add_input(beta_1); + builder.add_input(beta_2); + builder.add_input(epsilon); + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + builder.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseCoreAdamInst { op }) + } +} +impl XlaSparseCoreAdamInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseCoreAdam' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_velocity' Output of this 'XlaSparseCoreAdam' operation. + pub fn updated_velocity(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_momentum' Output of this 'XlaSparseCoreAdam' operation. + pub fn updated_momentum(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseCoreAdam' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'indices' Input of this 'XlaSparseCoreAdam' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'gradient' Input of this 'XlaSparseCoreAdam' operation. + pub fn gradient(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseCoreAdam' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'momentum' Input of this 'XlaSparseCoreAdam' operation. + pub fn momentum(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'velocity' Input of this 'XlaSparseCoreAdam' operation. + pub fn velocity(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'beta_1' Input of this 'XlaSparseCoreAdam' operation. + pub fn beta_1(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'beta_2' Input of this 'XlaSparseCoreAdam' operation. + pub fn beta_2(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'epsilon' Input of this 'XlaSparseCoreAdam' operation. + pub fn epsilon(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseCoreAdamInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseCoreAdam::new().build(embedding_table, indices, gradient, learning_rate, momentum, velocity, beta_1, beta_2, epsilon, scope)`. +pub fn xla_sparse_core_adam< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, +>( + embedding_table: O0, + indices: O1, + gradient: O2, + learning_rate: O3, + momentum: O4, + velocity: O5, + beta_1: O6, + beta_2: O7, + epsilon: O8, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseCoreAdam::new().build( + embedding_table, + indices, + gradient, + learning_rate, + momentum, + velocity, + beta_1, + beta_2, + epsilon, + scope, + ) +} + +/// Builder for the `XlaSparseCoreFtrl` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseCoreFtrl { + feature_width: ::std::option::Option, + multiply_linear_by_learning_rate: ::std::option::Option, + l1_regularization_strength: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseCoreFtrl' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseCoreFtrlInst { + /// An instance of a fully built XlaSparseCoreFtrl Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseCoreFtrl { + /// Creates a new `XlaSparseCoreFtrl`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `multiply_linear_by_learning_rate` attribute. + pub fn multiply_linear_by_learning_rate>( + mut self, + value: ArgType, + ) -> Self { + self.multiply_linear_by_learning_rate = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l1_regularization_strength` attribute. + pub fn l1_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l1_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseCoreFtrl` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + >( + &self, + embedding_table: O0, + accumulator: O1, + linear: O2, + learning_rate: O3, + indices: O4, + gradient: O5, + beta: O6, + learning_rate_power: O7, + l2_regularization_strength: O8, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + embedding_table.into(), + accumulator.into(), + linear.into(), + learning_rate.into(), + indices.into(), + gradient.into(), + beta.into(), + learning_rate_power.into(), + l2_regularization_strength.into(), + scope, + ) + } + fn build_impl( + &self, + embedding_table: crate::Output, + accumulator: crate::Output, + linear: crate::Output, + learning_rate: crate::Output, + indices: crate::Output, + gradient: crate::Output, + beta: crate::Output, + learning_rate_power: crate::Output, + l2_regularization_strength: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseCoreFtrl", |nd| { + nd.add_input(embedding_table); + nd.add_input(accumulator); + nd.add_input(linear); + nd.add_input(learning_rate); + nd.add_input(indices); + nd.add_input(gradient); + nd.add_input(beta); + nd.add_input(learning_rate_power); + nd.add_input(l2_regularization_strength); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + nd.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + nd.set_attr_float("l1_regularization_strength", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseCoreFtrl' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + embedding_table: crate::Output, + accumulator: crate::Output, + linear: crate::Output, + learning_rate: crate::Output, + indices: crate::Output, + gradient: crate::Output, + beta: crate::Output, + learning_rate_power: crate::Output, + l2_regularization_strength: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseCoreFtrl", |builder| { + builder.add_input(embedding_table); + builder.add_input(accumulator); + builder.add_input(linear); + builder.add_input(learning_rate); + builder.add_input(indices); + builder.add_input(gradient); + builder.add_input(beta); + builder.add_input(learning_rate_power); + builder.add_input(l2_regularization_strength); + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + builder.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + builder.set_attr_float("l1_regularization_strength", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseCoreFtrlInst { op }) + } +} +impl XlaSparseCoreFtrlInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseCoreFtrl' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseCoreFtrl' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_linear' Output of this 'XlaSparseCoreFtrl' operation. + pub fn updated_linear(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseCoreFtrl' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseCoreFtrl' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'linear' Input of this 'XlaSparseCoreFtrl' operation. + pub fn linear(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseCoreFtrl' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'indices' Input of this 'XlaSparseCoreFtrl' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'gradient' Input of this 'XlaSparseCoreFtrl' operation. + pub fn gradient(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'beta' Input of this 'XlaSparseCoreFtrl' operation. + pub fn beta(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'learning_rate_power' Input of this 'XlaSparseCoreFtrl' operation. + pub fn learning_rate_power(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'l2_regularization_strength' Input of this 'XlaSparseCoreFtrl' operation. + pub fn l2_regularization_strength(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseCoreFtrlInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseCoreFtrl::new().build(embedding_table, accumulator, linear, learning_rate, indices, gradient, beta, learning_rate_power, l2_regularization_strength, scope)`. +pub fn xla_sparse_core_ftrl< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, +>( + embedding_table: O0, + accumulator: O1, + linear: O2, + learning_rate: O3, + indices: O4, + gradient: O5, + beta: O6, + learning_rate_power: O7, + l2_regularization_strength: O8, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseCoreFtrl::new().build( + embedding_table, + accumulator, + linear, + learning_rate, + indices, + gradient, + beta, + learning_rate_power, + l2_regularization_strength, + scope, + ) +} + +/// Builder for the `XlaSparseCoreSgd` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseCoreSgd { + feature_width: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseCoreSgd' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseCoreSgdInst { + /// An instance of a fully built XlaSparseCoreSgd Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseCoreSgd { + /// Creates a new `XlaSparseCoreSgd`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `feature_width` attribute. + pub fn feature_width>(mut self, value: ArgType) -> Self { + self.feature_width = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseCoreSgd` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + >( + &self, + indices: O0, + gradient: O1, + learning_rate: O2, + embedding_table: O3, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + indices.into(), + gradient.into(), + learning_rate.into(), + embedding_table.into(), + scope, + ) + } + fn build_impl( + &self, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseCoreSgd", |nd| { + nd.add_input(indices); + nd.add_input(gradient); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.feature_width { + nd.set_attr_int("feature_width", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseCoreSgd' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + indices: crate::Output, + gradient: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseCoreSgd", |builder| { + builder.add_input(indices); + builder.add_input(gradient); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + if let ::std::option::Option::Some(value) = &self.feature_width { + builder.set_attr_int("feature_width", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseCoreSgdInst { op }) + } +} +impl XlaSparseCoreSgdInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseCoreSgd' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'indices' Input of this 'XlaSparseCoreSgd' operation. + pub fn indices(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'gradient' Input of this 'XlaSparseCoreSgd' operation. + pub fn gradient(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseCoreSgd' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseCoreSgd' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseCoreSgdInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseCoreSgd::new().build(indices, gradient, learning_rate, embedding_table, scope)`. +pub fn xla_sparse_core_sgd< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, +>( + indices: O0, + gradient: O1, + learning_rate: O2, + embedding_table: O3, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseCoreSgd::new().build(indices, gradient, learning_rate, embedding_table, scope) +} + +/// Builder for the `XlaSparseDenseMatmul` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmul { + max_ids_per_partition: ::std::option::Option, + max_unique_ids_per_partition: ::std::option::Option, + input_size: ::std::option::Option, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmul' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulInst { + /// An instance of a fully built XlaSparseDenseMatmul Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmul { + /// Creates a new `XlaSparseDenseMatmul`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `max_ids_per_partition` attribute. + pub fn max_ids_per_partition>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_partition = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_partition` attribute. + pub fn max_unique_ids_per_partition>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_partition = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `input_size` attribute. + pub fn input_size>(mut self, value: ArgType) -> Self { + self.input_size = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmul` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + >( + &self, + row_ids: O0, + col_ids: O1, + values: O2, + offsets: O3, + embedding_table: O4, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_ids.into(), + col_ids.into(), + values.into(), + offsets.into(), + embedding_table.into(), + scope, + ) + } + fn build_impl( + &self, + row_ids: crate::Output, + col_ids: crate::Output, + values: crate::Output, + offsets: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmul", |nd| { + nd.add_input(row_ids); + nd.add_input(col_ids); + nd.add_input(values); + nd.add_input(offsets); + nd.add_input(embedding_table); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_partition { + nd.set_attr_int("max_ids_per_partition", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_partition { + nd.set_attr_int("max_unique_ids_per_partition", *value)?; + } + if let ::std::option::Option::Some(value) = &self.input_size { + nd.set_attr_int("input_size", *value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmul' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_ids: crate::Output, + col_ids: crate::Output, + values: crate::Output, + offsets: crate::Output, + embedding_table: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseDenseMatmul", |builder| { + builder.add_input(row_ids); + builder.add_input(col_ids); + builder.add_input(values); + builder.add_input(offsets); + builder.add_input(embedding_table); + if let ::std::option::Option::Some(value) = &self.max_ids_per_partition { + builder.set_attr_int("max_ids_per_partition", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_partition { + builder.set_attr_int("max_unique_ids_per_partition", *value)?; + } + if let ::std::option::Option::Some(value) = &self.input_size { + builder.set_attr_int("input_size", *value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseDenseMatmulInst { op }) + } +} +impl XlaSparseDenseMatmulInst { + /// Returns the 'activations' Output of this 'XlaSparseDenseMatmul' operation. + pub fn activations(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'row_pointers' Output of this 'XlaSparseDenseMatmul' operation. + pub fn row_pointers(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'sorted_embedding_ids' Output of this 'XlaSparseDenseMatmul' operation. + pub fn sorted_embedding_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'sorted_sample_ids' Output of this 'XlaSparseDenseMatmul' operation. + pub fn sorted_sample_ids(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 3, + } + } + /// Returns the 'sorted_gains' Output of this 'XlaSparseDenseMatmul' operation. + pub fn sorted_gains(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 4, + } + } + /// Returns the 'row_ids' Input of this 'XlaSparseDenseMatmul' operation. + pub fn row_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'col_ids' Input of this 'XlaSparseDenseMatmul' operation. + pub fn col_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'values' Input of this 'XlaSparseDenseMatmul' operation. + pub fn values(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'offsets' Input of this 'XlaSparseDenseMatmul' operation. + pub fn offsets(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmul' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmul::new().build(row_ids, col_ids, values, offsets, embedding_table, scope)`. +pub fn xla_sparse_dense_matmul< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, +>( + row_ids: O0, + col_ids: O1, + values: O2, + offsets: O3, + embedding_table: O4, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmul::new().build(row_ids, col_ids, values, offsets, embedding_table, scope) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithAdagradAndCsrInput` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithAdagradAndCsrInput { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradAndCsrInputInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithAdagradAndCsrInput Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithAdagradAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithAdagradAndCsrInput` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + num_minibatches_per_physical_sparse_core: O8, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + accumulator.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmulGradWithAdagradAndCsrInput", |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(accumulator); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation( + "XlaSparseDenseMatmulGradWithAdagradAndCsrInput", + |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(accumulator); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + )?; + Ok(XlaSparseDenseMatmulGradWithAdagradAndCsrInputInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithAdagradAndCsrInputInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndCsrInput' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithAdagradAndCsrInputInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradAndCsrInput::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, accumulator, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_adagrad_and_csr_input< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + num_minibatches_per_physical_sparse_core: O8, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithAdagradAndCsrInput::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSizeInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + num_minibatches_per_physical_sparse_core: O8, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + accumulator.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation( + "XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize", + |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(accumulator); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + nd.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + nd.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + ) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation( + "XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize", + |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(accumulator); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + builder.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + builder.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + )?; + Ok(XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSizeInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSizeInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSizeInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, accumulator, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_adagrad_and_static_buffer_size< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + num_minibatches_per_physical_sparse_core: O8, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput { + use_nesterov: ::std::option::Option, + exponent: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInputInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_nesterov` attribute. + pub fn use_nesterov>(mut self, value: ArgType) -> Self { + self.use_nesterov = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `exponent` attribute. + pub fn exponent>(mut self, value: ArgType) -> Self { + self.exponent = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + momenta: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + accumulator.into(), + momenta.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + momenta: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation( + "XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput", + |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(accumulator); + nd.add_input(momenta); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.use_nesterov { + nd.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + nd.set_attr_float("exponent", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + nd.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + nd.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + nd.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + ) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + momenta: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation( + "XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput", + |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(accumulator); + builder.add_input(momenta); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.use_nesterov { + builder.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + builder.set_attr_float("exponent", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + builder.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + builder.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + builder.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + )?; + Ok(XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInputInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInputInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_momenta' Output of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn updated_momenta(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'momenta' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn momenta(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 9, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInputInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, accumulator, momenta, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_adagrad_momentum_and_csr_input< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + momenta: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + momenta, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize { + use_nesterov: ::std::option::Option, + exponent: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSizeInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_nesterov` attribute. + pub fn use_nesterov>(mut self, value: ArgType) -> Self { + self.use_nesterov = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `exponent` attribute. + pub fn exponent>(mut self, value: ArgType) -> Self { + self.exponent = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + momenta: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + accumulator.into(), + momenta.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + momenta: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation( + "XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize", + |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(accumulator); + nd.add_input(momenta); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.use_nesterov { + nd.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + nd.set_attr_float("exponent", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + nd.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + nd.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + nd.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + nd.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + nd.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + ) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + momenta: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation( + "XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize", + |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(accumulator); + builder.add_input(momenta); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.use_nesterov { + builder.set_attr_bool("use_nesterov", *value)?; + } + if let ::std::option::Option::Some(value) = &self.exponent { + builder.set_attr_float("exponent", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + builder.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + builder.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + builder.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + builder.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + builder.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + )?; + Ok(XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSizeInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSizeInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_momenta' Output of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn updated_momenta(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'momenta' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn momenta(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 9, + } + } +} +impl From for crate::Operation { + fn from( + inst: XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSizeInst, + ) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, accumulator, momenta, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_adagrad_momentum_and_static_buffer_size< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + momenta: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + momenta, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithAdamAndCsrInput` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithAdamAndCsrInput { + use_sum_inside_sqrt: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithAdamAndCsrInputInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithAdamAndCsrInput Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithAdamAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithAdamAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_sum_inside_sqrt` attribute. + pub fn use_sum_inside_sqrt>( + mut self, + value: ArgType, + ) -> Self { + self.use_sum_inside_sqrt = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithAdamAndCsrInput` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + momenta: O7, + velocity: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + momenta.into(), + velocity.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + momenta: crate::Output, + velocity: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmulGradWithAdamAndCsrInput", |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(momenta); + nd.add_input(velocity); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + nd.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + nd.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + nd.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + nd.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + momenta: crate::Output, + velocity: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseDenseMatmulGradWithAdamAndCsrInput", |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(momenta); + builder.add_input(velocity); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + builder.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + builder.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + builder.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + builder.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseDenseMatmulGradWithAdamAndCsrInputInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithAdamAndCsrInputInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_momenta' Output of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn updated_momenta(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_velocity' Output of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn updated_velocity(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'momenta' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn momenta(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'velocity' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn velocity(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithAdamAndCsrInput' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 9, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithAdamAndCsrInputInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithAdamAndCsrInput::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, momenta, velocity, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_adam_and_csr_input< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + momenta: O7, + velocity: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithAdamAndCsrInput::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + momenta, + velocity, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize { + use_sum_inside_sqrt: ::std::option::Option, + beta1: ::std::option::Option, + beta2: ::std::option::Option, + epsilon: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithAdamAndStaticBufferSizeInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `use_sum_inside_sqrt` attribute. + pub fn use_sum_inside_sqrt>( + mut self, + value: ArgType, + ) -> Self { + self.use_sum_inside_sqrt = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta1` attribute. + pub fn beta1>(mut self, value: ArgType) -> Self { + self.beta1 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta2` attribute. + pub fn beta2>(mut self, value: ArgType) -> Self { + self.beta2 = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `epsilon` attribute. + pub fn epsilon>(mut self, value: ArgType) -> Self { + self.epsilon = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + momenta: O7, + velocity: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + momenta.into(), + velocity.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + momenta: crate::Output, + velocity: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation( + "XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize", + |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(momenta); + nd.add_input(velocity); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + nd.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + nd.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + nd.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + nd.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + nd.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + nd.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + ) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + momenta: crate::Output, + velocity: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation( + "XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize", + |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(momenta); + builder.add_input(velocity); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.use_sum_inside_sqrt { + builder.set_attr_bool("use_sum_inside_sqrt", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta1 { + builder.set_attr_float("beta1", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta2 { + builder.set_attr_float("beta2", *value)?; + } + if let ::std::option::Option::Some(value) = &self.epsilon { + builder.set_attr_float("epsilon", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + builder.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + builder.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + )?; + Ok(XlaSparseDenseMatmulGradWithAdamAndStaticBufferSizeInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithAdamAndStaticBufferSizeInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_momenta' Output of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn updated_momenta(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_velocity' Output of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn updated_velocity(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'momenta' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn momenta(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'velocity' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn velocity(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 9, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithAdamAndStaticBufferSizeInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, momenta, velocity, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_adam_and_static_buffer_size< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + momenta: O7, + velocity: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + momenta, + velocity, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithFtrlAndCsrInput` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithFtrlAndCsrInput { + multiply_linear_by_learning_rate: ::std::option::Option, + beta: ::std::option::Option, + learning_rate_power: ::std::option::Option, + l1_regularization_strength: ::std::option::Option, + l2_regularization_strength: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithFtrlAndCsrInputInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithFtrlAndCsrInput Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithFtrlAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithFtrlAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `multiply_linear_by_learning_rate` attribute. + pub fn multiply_linear_by_learning_rate>( + mut self, + value: ArgType, + ) -> Self { + self.multiply_linear_by_learning_rate = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta` attribute. + pub fn beta>(mut self, value: ArgType) -> Self { + self.beta = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `learning_rate_power` attribute. + pub fn learning_rate_power>( + mut self, + value: ArgType, + ) -> Self { + self.learning_rate_power = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l1_regularization_strength` attribute. + pub fn l1_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l1_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l2_regularization_strength` attribute. + pub fn l2_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l2_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithFtrlAndCsrInput` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + linear: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + accumulator.into(), + linear.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + linear: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmulGradWithFtrlAndCsrInput", |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(accumulator); + nd.add_input(linear); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + nd.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta { + nd.set_attr_float("beta", *value)?; + } + if let ::std::option::Option::Some(value) = &self.learning_rate_power { + nd.set_attr_float("learning_rate_power", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + nd.set_attr_float("l1_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l2_regularization_strength { + nd.set_attr_float("l2_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + linear: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseDenseMatmulGradWithFtrlAndCsrInput", |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(accumulator); + builder.add_input(linear); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + builder.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta { + builder.set_attr_float("beta", *value)?; + } + if let ::std::option::Option::Some(value) = &self.learning_rate_power { + builder.set_attr_float("learning_rate_power", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + builder.set_attr_float("l1_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l2_regularization_strength { + builder.set_attr_float("l2_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseDenseMatmulGradWithFtrlAndCsrInputInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithFtrlAndCsrInputInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_linear' Output of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn updated_linear(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'linear' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn linear(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndCsrInput' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 9, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithFtrlAndCsrInputInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithFtrlAndCsrInput::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, accumulator, linear, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_ftrl_and_csr_input< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + linear: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithFtrlAndCsrInput::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + linear, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize { + multiply_linear_by_learning_rate: ::std::option::Option, + beta: ::std::option::Option, + learning_rate_power: ::std::option::Option, + l1_regularization_strength: ::std::option::Option, + l2_regularization_strength: ::std::option::Option, + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSizeInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `multiply_linear_by_learning_rate` attribute. + pub fn multiply_linear_by_learning_rate>( + mut self, + value: ArgType, + ) -> Self { + self.multiply_linear_by_learning_rate = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `beta` attribute. + pub fn beta>(mut self, value: ArgType) -> Self { + self.beta = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `learning_rate_power` attribute. + pub fn learning_rate_power>( + mut self, + value: ArgType, + ) -> Self { + self.learning_rate_power = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l1_regularization_strength` attribute. + pub fn l1_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l1_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `l2_regularization_strength` attribute. + pub fn l2_regularization_strength>( + mut self, + value: ArgType, + ) -> Self { + self.l2_regularization_strength = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + linear: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + accumulator.into(), + linear.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + linear: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation( + "XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize", + |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(accumulator); + nd.add_input(linear); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + nd.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta { + nd.set_attr_float("beta", *value)?; + } + if let ::std::option::Option::Some(value) = &self.learning_rate_power { + nd.set_attr_float("learning_rate_power", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + nd.set_attr_float("l1_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l2_regularization_strength { + nd.set_attr_float("l2_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + nd.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + nd.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + ) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + accumulator: crate::Output, + linear: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation( + "XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize", + |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(accumulator); + builder.add_input(linear); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.multiply_linear_by_learning_rate { + builder.set_attr_bool("multiply_linear_by_learning_rate", *value)?; + } + if let ::std::option::Option::Some(value) = &self.beta { + builder.set_attr_float("beta", *value)?; + } + if let ::std::option::Option::Some(value) = &self.learning_rate_power { + builder.set_attr_float("learning_rate_power", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l1_regularization_strength { + builder.set_attr_float("l1_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.l2_regularization_strength { + builder.set_attr_float("l2_regularization_strength", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + builder.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + builder.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + )?; + Ok(XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSizeInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSizeInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'updated_accumulator' Output of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn updated_accumulator(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 1, + } + } + /// Returns the 'updated_linear' Output of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn updated_linear(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 2, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'accumulator' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn accumulator(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } + /// Returns the 'linear' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn linear(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 8, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 9, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSizeInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, accumulator, linear, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_ftrl_and_static_buffer_size< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + O8: ::std::convert::Into, + O9: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + accumulator: O7, + linear: O8, + num_minibatches_per_physical_sparse_core: O9, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + accumulator, + linear, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithSgdAndCsrInput` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithSgdAndCsrInput { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithSgdAndCsrInputInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithSgdAndCsrInput Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithSgdAndCsrInput { + /// Creates a new `XlaSparseDenseMatmulGradWithSgdAndCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithSgdAndCsrInput` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + num_minibatches_per_physical_sparse_core: O7, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmulGradWithSgdAndCsrInput", |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseDenseMatmulGradWithSgdAndCsrInput", |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseDenseMatmulGradWithSgdAndCsrInputInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithSgdAndCsrInputInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithSgdAndCsrInput' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithSgdAndCsrInputInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithSgdAndCsrInput::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_sgd_and_csr_input< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + num_minibatches_per_physical_sparse_core: O7, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithSgdAndCsrInput::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize { + clip_weight_min: ::std::option::Option, + clip_weight_max: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeInst { + /// An instance of a fully built XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `clip_weight_min` attribute. + pub fn clip_weight_min>(mut self, value: ArgType) -> Self { + self.clip_weight_min = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `clip_weight_max` attribute. + pub fn clip_weight_max>(mut self, value: ArgType) -> Self { + self.clip_weight_max = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + num_minibatches_per_physical_sparse_core: O7, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + activation_gradients.into(), + learning_rate.into(), + embedding_table.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize", |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(activation_gradients); + nd.add_input(learning_rate); + nd.add_input(embedding_table); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + nd.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + nd.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + nd.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + nd.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + activation_gradients: crate::Output, + learning_rate: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation( + "XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize", + |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(activation_gradients); + builder.add_input(learning_rate); + builder.add_input(embedding_table); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.clip_weight_min { + builder.set_attr_float("clip_weight_min", *value)?; + } + if let ::std::option::Option::Some(value) = &self.clip_weight_max { + builder.set_attr_float("clip_weight_max", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + builder.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + builder.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }, + )?; + Ok(XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeInst { op }) + } +} +impl XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeInst { + /// Returns the 'updated_embedding_table' Output of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn updated_embedding_table(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'activation_gradients' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn activation_gradients(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'learning_rate' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn learning_rate(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 6, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 7, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, activation_gradients, learning_rate, embedding_table, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_grad_with_sgd_and_static_buffer_size< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + O6: ::std::convert::Into, + O7: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + activation_gradients: O4, + learning_rate: O5, + embedding_table: O6, + num_minibatches_per_physical_sparse_core: O7, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + activation_gradients, + learning_rate, + embedding_table, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulWithCsrInput` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulWithCsrInput { + input_size: ::std::option::Option, + quantization_config_low: ::std::option::Option, + quantization_config_high: ::std::option::Option, + quantization_config_num_buckets: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulWithCsrInput' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulWithCsrInputInst { + /// An instance of a fully built XlaSparseDenseMatmulWithCsrInput Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulWithCsrInput { + /// Creates a new `XlaSparseDenseMatmulWithCsrInput`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `input_size` attribute. + pub fn input_size>(mut self, value: ArgType) -> Self { + self.input_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_low` attribute. + pub fn quantization_config_low>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_low = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_high` attribute. + pub fn quantization_config_high>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_high = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_num_buckets` attribute. + pub fn quantization_config_num_buckets>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_num_buckets = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulWithCsrInput` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + embedding_table: O4, + num_minibatches_per_physical_sparse_core: O5, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + embedding_table.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmulWithCsrInput", |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(embedding_table); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.input_size { + nd.set_attr_int("input_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_low { + nd.set_attr_float("quantization_config_low", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_high { + nd.set_attr_float("quantization_config_high", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_num_buckets { + nd.set_attr_int("quantization_config_num_buckets", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulWithCsrInput' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseDenseMatmulWithCsrInput", |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(embedding_table); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.input_size { + builder.set_attr_int("input_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_low { + builder.set_attr_float("quantization_config_low", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_high { + builder.set_attr_float("quantization_config_high", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_num_buckets { + builder.set_attr_int("quantization_config_num_buckets", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseDenseMatmulWithCsrInputInst { op }) + } +} +impl XlaSparseDenseMatmulWithCsrInputInst { + /// Returns the 'activations' Output of this 'XlaSparseDenseMatmulWithCsrInput' operation. + pub fn activations(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulWithCsrInput' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulWithCsrInput' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulWithCsrInput' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulWithCsrInput' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulWithCsrInput' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulWithCsrInput' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulWithCsrInputInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulWithCsrInput::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, embedding_table, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_with_csr_input< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + embedding_table: O4, + num_minibatches_per_physical_sparse_core: O5, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulWithCsrInput::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + embedding_table, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + +/// Builder for the `XlaSparseDenseMatmulWithStaticBufferSize` operation. +#[derive(::std::fmt::Debug, ::std::default::Default)] +pub struct XlaSparseDenseMatmulWithStaticBufferSize { + input_size: ::std::option::Option, + quantization_config_low: ::std::option::Option, + quantization_config_high: ::std::option::Option, + quantization_config_num_buckets: ::std::option::Option, + max_ids_per_sparse_core: ::std::option::Option, + max_unique_ids_per_sparse_core: ::std::option::Option, + table_name: ::std::option::Option<::std::string::String>, + control_inputs: ::std::vec::Vec, +} +/// An instance of 'XlaSparseDenseMatmulWithStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. +#[derive(Debug, Clone)] +pub struct XlaSparseDenseMatmulWithStaticBufferSizeInst { + /// An instance of a fully built XlaSparseDenseMatmulWithStaticBufferSize Operation in a Tensorflow graph. + pub op: crate::Operation, +} + +impl XlaSparseDenseMatmulWithStaticBufferSize { + /// Creates a new `XlaSparseDenseMatmulWithStaticBufferSize`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the `input_size` attribute. + pub fn input_size>(mut self, value: ArgType) -> Self { + self.input_size = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_low` attribute. + pub fn quantization_config_low>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_low = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_high` attribute. + pub fn quantization_config_high>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_high = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `quantization_config_num_buckets` attribute. + pub fn quantization_config_num_buckets>( + mut self, + value: ArgType, + ) -> Self { + self.quantization_config_num_buckets = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_ids_per_sparse_core` attribute. + pub fn max_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `max_unique_ids_per_sparse_core` attribute. + pub fn max_unique_ids_per_sparse_core>( + mut self, + value: ArgType, + ) -> Self { + self.max_unique_ids_per_sparse_core = ::std::option::Option::Some(value.into()); + self + } + + /// Sets the `table_name` attribute. + pub fn table_name>( + mut self, + value: ArgType, + ) -> Self { + self.table_name = ::std::option::Option::Some(value.into()); + self + } + + /// Adds a control input. + pub fn add_control_input(mut self, op: crate::Operation) -> Self { + self.control_inputs.push(op); + self + } + + /// Builds the `XlaSparseDenseMatmulWithStaticBufferSize` operation. + pub fn build< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, + >( + &self, + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + embedding_table: O4, + num_minibatches_per_physical_sparse_core: O5, + scope: &mut crate::Scope, + ) -> crate::Result { + self.build_impl( + row_pointers.into(), + sorted_sample_ids.into(), + sorted_token_ids.into(), + sorted_gains.into(), + embedding_table.into(), + num_minibatches_per_physical_sparse_core.into(), + scope, + ) + } + fn build_impl( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + scope.new_operation("XlaSparseDenseMatmulWithStaticBufferSize", |nd| { + nd.add_input(row_pointers); + nd.add_input(sorted_sample_ids); + nd.add_input(sorted_token_ids); + nd.add_input(sorted_gains); + nd.add_input(embedding_table); + nd.add_input(num_minibatches_per_physical_sparse_core); + for op in &self.control_inputs { + nd.add_control_input(op); + } + if let ::std::option::Option::Some(value) = &self.input_size { + nd.set_attr_int("input_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_low { + nd.set_attr_float("quantization_config_low", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_high { + nd.set_attr_float("quantization_config_high", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_num_buckets { + nd.set_attr_int("quantization_config_num_buckets", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + nd.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + nd.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + nd.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + }) + } + + /// Builds a new instance of 'XlaSparseDenseMatmulWithStaticBufferSize' Operation with it's Outputs and Inputs exposed as methods. + pub fn build_instance( + &self, + row_pointers: crate::Output, + sorted_sample_ids: crate::Output, + sorted_token_ids: crate::Output, + sorted_gains: crate::Output, + embedding_table: crate::Output, + num_minibatches_per_physical_sparse_core: crate::Output, + scope: &mut crate::Scope, + ) -> crate::Result { + let op = scope.new_operation("XlaSparseDenseMatmulWithStaticBufferSize", |builder| { + builder.add_input(row_pointers); + builder.add_input(sorted_sample_ids); + builder.add_input(sorted_token_ids); + builder.add_input(sorted_gains); + builder.add_input(embedding_table); + builder.add_input(num_minibatches_per_physical_sparse_core); + if let ::std::option::Option::Some(value) = &self.input_size { + builder.set_attr_int("input_size", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_low { + builder.set_attr_float("quantization_config_low", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_high { + builder.set_attr_float("quantization_config_high", *value)?; + } + if let ::std::option::Option::Some(value) = &self.quantization_config_num_buckets { + builder.set_attr_int("quantization_config_num_buckets", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_ids_per_sparse_core { + builder.set_attr_int("max_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.max_unique_ids_per_sparse_core { + builder.set_attr_int("max_unique_ids_per_sparse_core", *value)?; + } + if let ::std::option::Option::Some(value) = &self.table_name { + builder.set_attr_string("table_name", value)?; + } + ::std::result::Result::Ok(()) + })?; + Ok(XlaSparseDenseMatmulWithStaticBufferSizeInst { op }) + } +} +impl XlaSparseDenseMatmulWithStaticBufferSizeInst { + /// Returns the 'activations' Output of this 'XlaSparseDenseMatmulWithStaticBufferSize' operation. + pub fn activations(&self) -> crate::Output { + crate::Output { + operation: self.op.clone(), + index: 0, + } + } + /// Returns the 'row_pointers' Input of this 'XlaSparseDenseMatmulWithStaticBufferSize' operation. + pub fn row_pointers(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 0, + } + } + /// Returns the 'sorted_sample_ids' Input of this 'XlaSparseDenseMatmulWithStaticBufferSize' operation. + pub fn sorted_sample_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 1, + } + } + /// Returns the 'sorted_token_ids' Input of this 'XlaSparseDenseMatmulWithStaticBufferSize' operation. + pub fn sorted_token_ids(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 2, + } + } + /// Returns the 'sorted_gains' Input of this 'XlaSparseDenseMatmulWithStaticBufferSize' operation. + pub fn sorted_gains(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 3, + } + } + /// Returns the 'embedding_table' Input of this 'XlaSparseDenseMatmulWithStaticBufferSize' operation. + pub fn embedding_table(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 4, + } + } + /// Returns the 'num_minibatches_per_physical_sparse_core' Input of this 'XlaSparseDenseMatmulWithStaticBufferSize' operation. + pub fn num_minibatches_per_physical_sparse_core(&self) -> crate::Input { + crate::Input { + operation: &self.op, + index: 5, + } + } +} +impl From for crate::Operation { + fn from(inst: XlaSparseDenseMatmulWithStaticBufferSizeInst) -> crate::Operation { + inst.op + } +} +/// Shorthand for `XlaSparseDenseMatmulWithStaticBufferSize::new().build(row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains, embedding_table, num_minibatches_per_physical_sparse_core, scope)`. +pub fn xla_sparse_dense_matmul_with_static_buffer_size< + O0: ::std::convert::Into, + O1: ::std::convert::Into, + O2: ::std::convert::Into, + O3: ::std::convert::Into, + O4: ::std::convert::Into, + O5: ::std::convert::Into, +>( + row_pointers: O0, + sorted_sample_ids: O1, + sorted_token_ids: O2, + sorted_gains: O3, + embedding_table: O4, + num_minibatches_per_physical_sparse_core: O5, + scope: &mut crate::Scope, +) -> crate::Result { + XlaSparseDenseMatmulWithStaticBufferSize::new().build( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + embedding_table, + num_minibatches_per_physical_sparse_core, + scope, + ) +} + /// Builder for the `XlaSplitND` operation. #[derive(::std::fmt::Debug, ::std::default::Default)] pub struct XlaSplitND { diff --git a/src/protos/config.rs b/src/protos/config.rs index 710f3fafee..ab0688998f 100644 --- a/src/protos/config.rs +++ b/src/protos/config.rs @@ -495,6 +495,7 @@ impl ::protobuf::reflect::ProtobufValue for GPUOptions { pub struct GPUOptions_Experimental { // message fields pub virtual_devices: ::protobuf::RepeatedField, + pub num_virtual_devices_per_gpu: i32, pub use_unified_memory: bool, pub num_dev_to_dev_copy_streams: i32, pub collective_ring_order: ::std::string::String, @@ -507,6 +508,10 @@ pub struct GPUOptions_Experimental { pub disallow_retry_on_allocation_failure: bool, pub gpu_host_mem_limit_in_mb: f32, pub gpu_host_mem_disallow_growth: bool, + pub gpu_system_memory_size_in_mb: i32, + pub populate_pjrt_gpu_client_creation_info: bool, + pub node_id: i32, + pub stream_merge_options: ::protobuf::SingularPtrField, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -548,6 +553,21 @@ impl GPUOptions_Experimental { ::std::mem::replace(&mut self.virtual_devices, ::protobuf::RepeatedField::new()) } + // int32 num_virtual_devices_per_gpu = 15; + + + pub fn get_num_virtual_devices_per_gpu(&self) -> i32 { + self.num_virtual_devices_per_gpu + } + pub fn clear_num_virtual_devices_per_gpu(&mut self) { + self.num_virtual_devices_per_gpu = 0; + } + + // Param is passed by value, moved + pub fn set_num_virtual_devices_per_gpu(&mut self, v: i32) { + self.num_virtual_devices_per_gpu = v; + } + // bool use_unified_memory = 2; @@ -738,6 +758,84 @@ impl GPUOptions_Experimental { pub fn set_gpu_host_mem_disallow_growth(&mut self, v: bool) { self.gpu_host_mem_disallow_growth = v; } + + // int32 gpu_system_memory_size_in_mb = 16; + + + pub fn get_gpu_system_memory_size_in_mb(&self) -> i32 { + self.gpu_system_memory_size_in_mb + } + pub fn clear_gpu_system_memory_size_in_mb(&mut self) { + self.gpu_system_memory_size_in_mb = 0; + } + + // Param is passed by value, moved + pub fn set_gpu_system_memory_size_in_mb(&mut self, v: i32) { + self.gpu_system_memory_size_in_mb = v; + } + + // bool populate_pjrt_gpu_client_creation_info = 17; + + + pub fn get_populate_pjrt_gpu_client_creation_info(&self) -> bool { + self.populate_pjrt_gpu_client_creation_info + } + pub fn clear_populate_pjrt_gpu_client_creation_info(&mut self) { + self.populate_pjrt_gpu_client_creation_info = false; + } + + // Param is passed by value, moved + pub fn set_populate_pjrt_gpu_client_creation_info(&mut self, v: bool) { + self.populate_pjrt_gpu_client_creation_info = v; + } + + // int32 node_id = 18; + + + pub fn get_node_id(&self) -> i32 { + self.node_id + } + pub fn clear_node_id(&mut self) { + self.node_id = 0; + } + + // Param is passed by value, moved + pub fn set_node_id(&mut self, v: i32) { + self.node_id = v; + } + + // .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19; + + + pub fn get_stream_merge_options(&self) -> &GPUOptions_Experimental_StreamMergeOptions { + self.stream_merge_options.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_stream_merge_options(&mut self) { + self.stream_merge_options.clear(); + } + + pub fn has_stream_merge_options(&self) -> bool { + self.stream_merge_options.is_some() + } + + // Param is passed by value, moved + pub fn set_stream_merge_options(&mut self, v: GPUOptions_Experimental_StreamMergeOptions) { + self.stream_merge_options = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_stream_merge_options(&mut self) -> &mut GPUOptions_Experimental_StreamMergeOptions { + if self.stream_merge_options.is_none() { + self.stream_merge_options.set_default(); + } + self.stream_merge_options.as_mut().unwrap() + } + + // Take field + pub fn take_stream_merge_options(&mut self) -> GPUOptions_Experimental_StreamMergeOptions { + self.stream_merge_options.take().unwrap_or_else(|| GPUOptions_Experimental_StreamMergeOptions::new()) + } } impl ::protobuf::Message for GPUOptions_Experimental { @@ -747,6 +845,11 @@ impl ::protobuf::Message for GPUOptions_Experimental { return false; } }; + for v in &self.stream_merge_options { + if !v.is_initialized() { + return false; + } + }; true } @@ -757,6 +860,13 @@ impl ::protobuf::Message for GPUOptions_Experimental { 1 => { ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.virtual_devices)?; }, + 15 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int32()?; + self.num_virtual_devices_per_gpu = tmp; + }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); @@ -837,6 +947,30 @@ impl ::protobuf::Message for GPUOptions_Experimental { let tmp = is.read_bool()?; self.gpu_host_mem_disallow_growth = tmp; }, + 16 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int32()?; + self.gpu_system_memory_size_in_mb = tmp; + }, + 17 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.populate_pjrt_gpu_client_creation_info = tmp; + }, + 18 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int32()?; + self.node_id = tmp; + }, + 19 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.stream_merge_options)?; + }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, @@ -853,6 +987,9 @@ impl ::protobuf::Message for GPUOptions_Experimental { let len = value.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }; + if self.num_virtual_devices_per_gpu != 0 { + my_size += ::protobuf::rt::value_size(15, self.num_virtual_devices_per_gpu, ::protobuf::wire_format::WireTypeVarint); + } if self.use_unified_memory != false { my_size += 2; } @@ -889,6 +1026,19 @@ impl ::protobuf::Message for GPUOptions_Experimental { if self.gpu_host_mem_disallow_growth != false { my_size += 2; } + if self.gpu_system_memory_size_in_mb != 0 { + my_size += ::protobuf::rt::value_size(16, self.gpu_system_memory_size_in_mb, ::protobuf::wire_format::WireTypeVarint); + } + if self.populate_pjrt_gpu_client_creation_info != false { + my_size += 3; + } + if self.node_id != 0 { + my_size += ::protobuf::rt::value_size(18, self.node_id, ::protobuf::wire_format::WireTypeVarint); + } + if let Some(ref v) = self.stream_merge_options.as_ref() { + let len = v.compute_size(); + my_size += 2 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size @@ -900,6 +1050,9 @@ impl ::protobuf::Message for GPUOptions_Experimental { os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }; + if self.num_virtual_devices_per_gpu != 0 { + os.write_int32(15, self.num_virtual_devices_per_gpu)?; + } if self.use_unified_memory != false { os.write_bool(2, self.use_unified_memory)?; } @@ -936,6 +1089,20 @@ impl ::protobuf::Message for GPUOptions_Experimental { if self.gpu_host_mem_disallow_growth != false { os.write_bool(14, self.gpu_host_mem_disallow_growth)?; } + if self.gpu_system_memory_size_in_mb != 0 { + os.write_int32(16, self.gpu_system_memory_size_in_mb)?; + } + if self.populate_pjrt_gpu_client_creation_info != false { + os.write_bool(17, self.populate_pjrt_gpu_client_creation_info)?; + } + if self.node_id != 0 { + os.write_int32(18, self.node_id)?; + } + if let Some(ref v) = self.stream_merge_options.as_ref() { + os.write_tag(19, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } @@ -979,6 +1146,11 @@ impl ::protobuf::Message for GPUOptions_Experimental { |m: &GPUOptions_Experimental| { &m.virtual_devices }, |m: &mut GPUOptions_Experimental| { &mut m.virtual_devices }, )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + "num_virtual_devices_per_gpu", + |m: &GPUOptions_Experimental| { &m.num_virtual_devices_per_gpu }, + |m: &mut GPUOptions_Experimental| { &mut m.num_virtual_devices_per_gpu }, + )); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( "use_unified_memory", |m: &GPUOptions_Experimental| { &m.use_unified_memory }, @@ -1039,6 +1211,26 @@ impl ::protobuf::Message for GPUOptions_Experimental { |m: &GPUOptions_Experimental| { &m.gpu_host_mem_disallow_growth }, |m: &mut GPUOptions_Experimental| { &mut m.gpu_host_mem_disallow_growth }, )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + "gpu_system_memory_size_in_mb", + |m: &GPUOptions_Experimental| { &m.gpu_system_memory_size_in_mb }, + |m: &mut GPUOptions_Experimental| { &mut m.gpu_system_memory_size_in_mb }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "populate_pjrt_gpu_client_creation_info", + |m: &GPUOptions_Experimental| { &m.populate_pjrt_gpu_client_creation_info }, + |m: &mut GPUOptions_Experimental| { &mut m.populate_pjrt_gpu_client_creation_info }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + "node_id", + |m: &GPUOptions_Experimental| { &m.node_id }, + |m: &mut GPUOptions_Experimental| { &mut m.node_id }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "stream_merge_options", + |m: &GPUOptions_Experimental| { &m.stream_merge_options }, + |m: &mut GPUOptions_Experimental| { &mut m.stream_merge_options }, + )); ::protobuf::reflect::MessageDescriptor::new_pb_name::( "GPUOptions.Experimental", fields, @@ -1056,6 +1248,7 @@ impl ::protobuf::Message for GPUOptions_Experimental { impl ::protobuf::Clear for GPUOptions_Experimental { fn clear(&mut self) { self.virtual_devices.clear(); + self.num_virtual_devices_per_gpu = 0; self.use_unified_memory = false; self.num_dev_to_dev_copy_streams = 0; self.collective_ring_order.clear(); @@ -1068,6 +1261,10 @@ impl ::protobuf::Clear for GPUOptions_Experimental { self.disallow_retry_on_allocation_failure = false; self.gpu_host_mem_limit_in_mb = 0.; self.gpu_host_mem_disallow_growth = false; + self.gpu_system_memory_size_in_mb = 0; + self.populate_pjrt_gpu_client_creation_info = false; + self.node_id = 0; + self.stream_merge_options.clear(); self.unknown_fields.clear(); } } @@ -1322,6 +1519,228 @@ impl ::protobuf::reflect::ProtobufValue for GPUOptions_Experimental_VirtualDevic } } +#[derive(PartialEq,Clone,Default)] +pub struct GPUOptions_Experimental_StreamMergeOptions { + // message fields + pub merge_host_to_device_stream: bool, + pub merge_device_to_host_stream: bool, + pub merge_device_to_device_stream: bool, + // special fields + pub unknown_fields: ::protobuf::UnknownFields, + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a GPUOptions_Experimental_StreamMergeOptions { + fn default() -> &'a GPUOptions_Experimental_StreamMergeOptions { + ::default_instance() + } +} + +impl GPUOptions_Experimental_StreamMergeOptions { + pub fn new() -> GPUOptions_Experimental_StreamMergeOptions { + ::std::default::Default::default() + } + + // bool merge_host_to_device_stream = 1; + + + pub fn get_merge_host_to_device_stream(&self) -> bool { + self.merge_host_to_device_stream + } + pub fn clear_merge_host_to_device_stream(&mut self) { + self.merge_host_to_device_stream = false; + } + + // Param is passed by value, moved + pub fn set_merge_host_to_device_stream(&mut self, v: bool) { + self.merge_host_to_device_stream = v; + } + + // bool merge_device_to_host_stream = 2; + + + pub fn get_merge_device_to_host_stream(&self) -> bool { + self.merge_device_to_host_stream + } + pub fn clear_merge_device_to_host_stream(&mut self) { + self.merge_device_to_host_stream = false; + } + + // Param is passed by value, moved + pub fn set_merge_device_to_host_stream(&mut self, v: bool) { + self.merge_device_to_host_stream = v; + } + + // bool merge_device_to_device_stream = 3; + + + pub fn get_merge_device_to_device_stream(&self) -> bool { + self.merge_device_to_device_stream + } + pub fn clear_merge_device_to_device_stream(&mut self) { + self.merge_device_to_device_stream = false; + } + + // Param is passed by value, moved + pub fn set_merge_device_to_device_stream(&mut self, v: bool) { + self.merge_device_to_device_stream = v; + } +} + +impl ::protobuf::Message for GPUOptions_Experimental_StreamMergeOptions { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.merge_host_to_device_stream = tmp; + }, + 2 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.merge_device_to_host_stream = tmp; + }, + 3 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.merge_device_to_device_stream = tmp; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if self.merge_host_to_device_stream != false { + my_size += 2; + } + if self.merge_device_to_host_stream != false { + my_size += 2; + } + if self.merge_device_to_device_stream != false { + my_size += 2; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if self.merge_host_to_device_stream != false { + os.write_bool(1, self.merge_host_to_device_stream)?; + } + if self.merge_device_to_host_stream != false { + os.write_bool(2, self.merge_device_to_host_stream)?; + } + if self.merge_device_to_device_stream != false { + os.write_bool(3, self.merge_device_to_device_stream)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> GPUOptions_Experimental_StreamMergeOptions { + GPUOptions_Experimental_StreamMergeOptions::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "merge_host_to_device_stream", + |m: &GPUOptions_Experimental_StreamMergeOptions| { &m.merge_host_to_device_stream }, + |m: &mut GPUOptions_Experimental_StreamMergeOptions| { &mut m.merge_host_to_device_stream }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "merge_device_to_host_stream", + |m: &GPUOptions_Experimental_StreamMergeOptions| { &m.merge_device_to_host_stream }, + |m: &mut GPUOptions_Experimental_StreamMergeOptions| { &mut m.merge_device_to_host_stream }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "merge_device_to_device_stream", + |m: &GPUOptions_Experimental_StreamMergeOptions| { &m.merge_device_to_device_stream }, + |m: &mut GPUOptions_Experimental_StreamMergeOptions| { &mut m.merge_device_to_device_stream }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "GPUOptions.Experimental.StreamMergeOptions", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static GPUOptions_Experimental_StreamMergeOptions { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(GPUOptions_Experimental_StreamMergeOptions::new) + } +} + +impl ::protobuf::Clear for GPUOptions_Experimental_StreamMergeOptions { + fn clear(&mut self) { + self.merge_host_to_device_stream = false; + self.merge_device_to_host_stream = false; + self.merge_device_to_device_stream = false; + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for GPUOptions_Experimental_StreamMergeOptions { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for GPUOptions_Experimental_StreamMergeOptions { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + #[derive(PartialEq,Clone,Default)] pub struct OptimizerOptions { // message fields @@ -3510,10 +3929,16 @@ pub struct ConfigProto_Experimental { pub disable_output_partition_graphs: bool, pub xla_fusion_autotuner_thresh: i64, pub use_tfrt: bool, + pub enable_multi_host: bool, + pub backend_server_port: i32, + pub target_tpu: bool, + pub target_gpu: bool, + pub stream_merge_threshold: i32, pub disable_functional_ops_lowering: bool, pub xla_prefer_single_graph_cluster: bool, pub coordination_config: ::protobuf::SingularPtrField, pub disable_optimize_for_static_graph: bool, + pub disable_eager_executor_streaming_enqueue: bool, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -3825,6 +4250,81 @@ impl ConfigProto_Experimental { self.use_tfrt = v; } + // bool enable_multi_host = 27; + + + pub fn get_enable_multi_host(&self) -> bool { + self.enable_multi_host + } + pub fn clear_enable_multi_host(&mut self) { + self.enable_multi_host = false; + } + + // Param is passed by value, moved + pub fn set_enable_multi_host(&mut self, v: bool) { + self.enable_multi_host = v; + } + + // int32 backend_server_port = 28; + + + pub fn get_backend_server_port(&self) -> i32 { + self.backend_server_port + } + pub fn clear_backend_server_port(&mut self) { + self.backend_server_port = 0; + } + + // Param is passed by value, moved + pub fn set_backend_server_port(&mut self, v: i32) { + self.backend_server_port = v; + } + + // bool target_tpu = 29; + + + pub fn get_target_tpu(&self) -> bool { + self.target_tpu + } + pub fn clear_target_tpu(&mut self) { + self.target_tpu = false; + } + + // Param is passed by value, moved + pub fn set_target_tpu(&mut self, v: bool) { + self.target_tpu = v; + } + + // bool target_gpu = 30; + + + pub fn get_target_gpu(&self) -> bool { + self.target_gpu + } + pub fn clear_target_gpu(&mut self) { + self.target_gpu = false; + } + + // Param is passed by value, moved + pub fn set_target_gpu(&mut self, v: bool) { + self.target_gpu = v; + } + + // int32 stream_merge_threshold = 31; + + + pub fn get_stream_merge_threshold(&self) -> i32 { + self.stream_merge_threshold + } + pub fn clear_stream_merge_threshold(&mut self) { + self.stream_merge_threshold = 0; + } + + // Param is passed by value, moved + pub fn set_stream_merge_threshold(&mut self, v: i32) { + self.stream_merge_threshold = v; + } + // bool disable_functional_ops_lowering = 21; @@ -3902,6 +4402,21 @@ impl ConfigProto_Experimental { pub fn set_disable_optimize_for_static_graph(&mut self, v: bool) { self.disable_optimize_for_static_graph = v; } + + // bool disable_eager_executor_streaming_enqueue = 26; + + + pub fn get_disable_eager_executor_streaming_enqueue(&self) -> bool { + self.disable_eager_executor_streaming_enqueue + } + pub fn clear_disable_eager_executor_streaming_enqueue(&mut self) { + self.disable_eager_executor_streaming_enqueue = false; + } + + // Param is passed by value, moved + pub fn set_disable_eager_executor_streaming_enqueue(&mut self, v: bool) { + self.disable_eager_executor_streaming_enqueue = v; + } } impl ::protobuf::Message for ConfigProto_Experimental { @@ -4026,6 +4541,41 @@ impl ::protobuf::Message for ConfigProto_Experimental { let tmp = is.read_bool()?; self.use_tfrt = tmp; }, + 27 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.enable_multi_host = tmp; + }, + 28 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int32()?; + self.backend_server_port = tmp; + }, + 29 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.target_tpu = tmp; + }, + 30 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.target_gpu = tmp; + }, + 31 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int32()?; + self.stream_merge_threshold = tmp; + }, 21 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); @@ -4050,6 +4600,13 @@ impl ::protobuf::Message for ConfigProto_Experimental { let tmp = is.read_bool()?; self.disable_optimize_for_static_graph = tmp; }, + 26 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.disable_eager_executor_streaming_enqueue = tmp; + }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, @@ -4114,6 +4671,21 @@ impl ::protobuf::Message for ConfigProto_Experimental { if self.use_tfrt != false { my_size += 3; } + if self.enable_multi_host != false { + my_size += 3; + } + if self.backend_server_port != 0 { + my_size += ::protobuf::rt::value_size(28, self.backend_server_port, ::protobuf::wire_format::WireTypeVarint); + } + if self.target_tpu != false { + my_size += 3; + } + if self.target_gpu != false { + my_size += 3; + } + if self.stream_merge_threshold != 0 { + my_size += ::protobuf::rt::value_size(31, self.stream_merge_threshold, ::protobuf::wire_format::WireTypeVarint); + } if self.disable_functional_ops_lowering != false { my_size += 3; } @@ -4127,6 +4699,9 @@ impl ::protobuf::Message for ConfigProto_Experimental { if self.disable_optimize_for_static_graph != false { my_size += 3; } + if self.disable_eager_executor_streaming_enqueue != false { + my_size += 3; + } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size @@ -4186,6 +4761,21 @@ impl ::protobuf::Message for ConfigProto_Experimental { if self.use_tfrt != false { os.write_bool(18, self.use_tfrt)?; } + if self.enable_multi_host != false { + os.write_bool(27, self.enable_multi_host)?; + } + if self.backend_server_port != 0 { + os.write_int32(28, self.backend_server_port)?; + } + if self.target_tpu != false { + os.write_bool(29, self.target_tpu)?; + } + if self.target_gpu != false { + os.write_bool(30, self.target_gpu)?; + } + if self.stream_merge_threshold != 0 { + os.write_int32(31, self.stream_merge_threshold)?; + } if self.disable_functional_ops_lowering != false { os.write_bool(21, self.disable_functional_ops_lowering)?; } @@ -4200,6 +4790,9 @@ impl ::protobuf::Message for ConfigProto_Experimental { if self.disable_optimize_for_static_graph != false { os.write_bool(24, self.disable_optimize_for_static_graph)?; } + if self.disable_eager_executor_streaming_enqueue != false { + os.write_bool(26, self.disable_eager_executor_streaming_enqueue)?; + } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } @@ -4323,6 +4916,31 @@ impl ::protobuf::Message for ConfigProto_Experimental { |m: &ConfigProto_Experimental| { &m.use_tfrt }, |m: &mut ConfigProto_Experimental| { &mut m.use_tfrt }, )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "enable_multi_host", + |m: &ConfigProto_Experimental| { &m.enable_multi_host }, + |m: &mut ConfigProto_Experimental| { &mut m.enable_multi_host }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + "backend_server_port", + |m: &ConfigProto_Experimental| { &m.backend_server_port }, + |m: &mut ConfigProto_Experimental| { &mut m.backend_server_port }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "target_tpu", + |m: &ConfigProto_Experimental| { &m.target_tpu }, + |m: &mut ConfigProto_Experimental| { &mut m.target_tpu }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "target_gpu", + |m: &ConfigProto_Experimental| { &m.target_gpu }, + |m: &mut ConfigProto_Experimental| { &mut m.target_gpu }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + "stream_merge_threshold", + |m: &ConfigProto_Experimental| { &m.stream_merge_threshold }, + |m: &mut ConfigProto_Experimental| { &mut m.stream_merge_threshold }, + )); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( "disable_functional_ops_lowering", |m: &ConfigProto_Experimental| { &m.disable_functional_ops_lowering }, @@ -4343,6 +4961,11 @@ impl ::protobuf::Message for ConfigProto_Experimental { |m: &ConfigProto_Experimental| { &m.disable_optimize_for_static_graph }, |m: &mut ConfigProto_Experimental| { &mut m.disable_optimize_for_static_graph }, )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "disable_eager_executor_streaming_enqueue", + |m: &ConfigProto_Experimental| { &m.disable_eager_executor_streaming_enqueue }, + |m: &mut ConfigProto_Experimental| { &mut m.disable_eager_executor_streaming_enqueue }, + )); ::protobuf::reflect::MessageDescriptor::new_pb_name::( "ConfigProto.Experimental", fields, @@ -4376,10 +4999,16 @@ impl ::protobuf::Clear for ConfigProto_Experimental { self.disable_output_partition_graphs = false; self.xla_fusion_autotuner_thresh = 0; self.use_tfrt = false; + self.enable_multi_host = false; + self.backend_server_port = 0; + self.target_tpu = false; + self.target_gpu = false; + self.stream_merge_threshold = 0; self.disable_functional_ops_lowering = false; self.xla_prefer_single_graph_cluster = false; self.coordination_config.clear(); self.disable_optimize_for_static_graph = false; + self.disable_eager_executor_streaming_enqueue = false; self.unknown_fields.clear(); } } @@ -6631,164 +7260,180 @@ static file_descriptor_proto_data: &'static [u8] = b"\ o\x1a*tensorflow/core/framework/step_stats.proto\x1a&tensorflow/core/pro\ tobuf/cluster.proto\x1a$tensorflow/core/protobuf/debug.proto\x1a.tensorf\ low/core/protobuf/rewriter_config.proto\x1a*tensorflow/core/protobuf/rpc\ - _options.proto\x1a1tensorflow/tsl/protobuf/coordination_config.proto\"\ - \xb3\x0b\n\nGPUOptions\x12D\n\x1fper_process_gpu_memory_fraction\x18\x01\ - \x20\x01(\x01R\x1bperProcessGpuMemoryFraction\x12!\n\x0callow_growth\x18\ - \x04\x20\x01(\x08R\x0ballowGrowth\x12%\n\x0eallocator_type\x18\x02\x20\ - \x01(\tR\rallocatorType\x126\n\x17deferred_deletion_bytes\x18\x03\x20\ - \x01(\x03R\x15deferredDeletionBytes\x12.\n\x13visible_device_list\x18\ - \x05\x20\x01(\tR\x11visibleDeviceList\x12;\n\x1apolling_active_delay_use\ - cs\x18\x06\x20\x01(\x05R\x17pollingActiveDelayUsecs\x12?\n\x1cpolling_in\ - active_delay_msecs\x18\x07\x20\x01(\x05R\x19pollingInactiveDelayMsecs\ - \x120\n\x14force_gpu_compatible\x18\x08\x20\x01(\x08R\x12forceGpuCompati\ - ble\x12G\n\x0cexperimental\x18\t\x20\x01(\x0b2#.tensorflow.GPUOptions.Ex\ - perimentalR\x0cexperimental\x1a\xb3\x07\n\x0cExperimental\x12[\n\x0fvirt\ - ual_devices\x18\x01\x20\x03(\x0b22.tensorflow.GPUOptions.Experimental.Vi\ - rtualDevicesR\x0evirtualDevices\x12,\n\x12use_unified_memory\x18\x02\x20\ - \x01(\x08R\x10useUnifiedMemory\x12;\n\x1bnum_dev_to_dev_copy_streams\x18\ - \x03\x20\x01(\x05R\x16numDevToDevCopyStreams\x122\n\x15collective_ring_o\ - rder\x18\x04\x20\x01(\tR\x13collectiveRingOrder\x123\n\x15timestamped_al\ - locator\x18\x05\x20\x01(\x08R\x14timestampedAllocator\x12=\n\x1bkernel_t\ - racker_max_interval\x18\x07\x20\x01(\x05R\x18kernelTrackerMaxInterval\ - \x127\n\x18kernel_tracker_max_bytes\x18\x08\x20\x01(\x05R\x15kernelTrack\ - erMaxBytes\x12;\n\x1akernel_tracker_max_pending\x18\t\x20\x01(\x05R\x17k\ - ernelTrackerMaxPending\x12F\n\x1finternal_fragmentation_fraction\x18\n\ - \x20\x01(\x01R\x1dinternalFragmentationFraction\x121\n\x15use_cuda_mallo\ - c_async\x18\x0b\x20\x01(\x08R\x12useCudaMallocAsync\x12N\n$disallow_retr\ - y_on_allocation_failure\x18\x0c\x20\x01(\x08R\x20disallowRetryOnAllocati\ - onFailure\x125\n\x18gpu_host_mem_limit_in_mb\x18\r\x20\x01(\x02R\x13gpuH\ - ostMemLimitInMb\x12>\n\x1cgpu_host_mem_disallow_growth\x18\x0e\x20\x01(\ - \x08R\x18gpuHostMemDisallowGrowth\x1a{\n\x0eVirtualDevices\x12&\n\x0fmem\ - ory_limit_mb\x18\x01\x20\x03(\x02R\rmemoryLimitMb\x12\x1a\n\x08priority\ - \x18\x02\x20\x03(\x05R\x08priority\x12%\n\x0edevice_ordinal\x18\x03\x20\ - \x03(\x05R\rdeviceOrdinal\"\xa8\x04\n\x10OptimizerOptions\x12M\n#do_comm\ - on_subexpression_elimination\x18\x01\x20\x01(\x08R\x20doCommonSubexpress\ - ionElimination\x12.\n\x13do_constant_folding\x18\x02\x20\x01(\x08R\x11do\ - ConstantFolding\x12>\n\x1cmax_folded_constant_in_bytes\x18\x06\x20\x01(\ - \x03R\x18maxFoldedConstantInBytes\x120\n\x14do_function_inlining\x18\x04\ - \x20\x01(\x08R\x12doFunctionInlining\x12?\n\topt_level\x18\x03\x20\x01(\ - \x0e2\".tensorflow.OptimizerOptions.LevelR\x08optLevel\x12U\n\x10global_\ - jit_level\x18\x05\x20\x01(\x0e2+.tensorflow.OptimizerOptions.GlobalJitLe\ - velR\x0eglobalJitLevel\x12$\n\x0ecpu_global_jit\x18\x07\x20\x01(\x08R\ - \x0ccpuGlobalJit\"\x20\n\x05Level\x12\x06\n\x02L1\x10\0\x12\x0f\n\x02L0\ - \x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"C\n\x0eGlobalJitLevel\x12\ - \x0b\n\x07DEFAULT\x10\0\x12\x10\n\x03OFF\x10\xff\xff\xff\xff\xff\xff\xff\ - \xff\xff\x01\x12\x08\n\x04ON_1\x10\x01\x12\x08\n\x04ON_2\x10\x02\"\x90\ - \x04\n\x0cGraphOptions\x124\n\x16enable_recv_scheduling\x18\x02\x20\x01(\ - \x08R\x14enableRecvScheduling\x12I\n\x11optimizer_options\x18\x03\x20\ - \x01(\x0b2\x1c.tensorflow.OptimizerOptionsR\x10optimizerOptions\x12(\n\ - \x10build_cost_model\x18\x04\x20\x01(\x03R\x0ebuildCostModel\x123\n\x16b\ - uild_cost_model_after\x18\t\x20\x01(\x03R\x13buildCostModelAfter\x12!\n\ - \x0cinfer_shapes\x18\x05\x20\x01(\x08R\x0binferShapes\x12,\n\x12place_pr\ - uned_graph\x18\x06\x20\x01(\x08R\x10placePrunedGraph\x128\n\x18enable_bf\ - loat16_sendrecv\x18\x07\x20\x01(\x08R\x16enableBfloat16Sendrecv\x12#\n\r\ - timeline_step\x18\x08\x20\x01(\x05R\x0ctimelineStep\x12C\n\x0frewrite_op\ - tions\x18\n\x20\x01(\x0b2\x1a.tensorflow.RewriterConfigR\x0erewriteOptio\ - nsJ\x04\x08\x01\x10\x02R%skip_common_subexpression_elimination\"Y\n\x15T\ - hreadPoolOptionProto\x12\x1f\n\x0bnum_threads\x18\x01\x20\x01(\x05R\nnum\ - Threads\x12\x1f\n\x0bglobal_name\x18\x02\x20\x01(\tR\nglobalName\"?\n\ - \x0fSessionMetadata\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x12\ - \x18\n\x07version\x18\x02\x20\x01(\x03R\x07version\"\xb0\x15\n\x0bConfig\ - Proto\x12K\n\x0cdevice_count\x18\x01\x20\x03(\x0b2(.tensorflow.ConfigPro\ - to.DeviceCountEntryR\x0bdeviceCount\x12?\n\x1cintra_op_parallelism_threa\ - ds\x18\x02\x20\x01(\x05R\x19intraOpParallelismThreads\x12?\n\x1cinter_op\ - _parallelism_threads\x18\x05\x20\x01(\x05R\x19interOpParallelismThreads\ - \x125\n\x17use_per_session_threads\x18\t\x20\x01(\x08R\x14usePerSessionT\ - hreads\x12a\n\x1csession_inter_op_thread_pool\x18\x0c\x20\x03(\x0b2!.ten\ - sorflow.ThreadPoolOptionProtoR\x18sessionInterOpThreadPool\x12)\n\x10pla\ - cement_period\x18\x03\x20\x01(\x05R\x0fplacementPeriod\x12%\n\x0edevice_\ - filters\x18\x04\x20\x03(\tR\rdeviceFilters\x127\n\x0bgpu_options\x18\x06\ - \x20\x01(\x0b2\x16.tensorflow.GPUOptionsR\ngpuOptions\x120\n\x14allow_so\ - ft_placement\x18\x07\x20\x01(\x08R\x12allowSoftPlacement\x120\n\x14log_d\ - evice_placement\x18\x08\x20\x01(\x08R\x12logDevicePlacement\x12=\n\rgrap\ - h_options\x18\n\x20\x01(\x0b2\x18.tensorflow.GraphOptionsR\x0cgraphOptio\ - ns\x125\n\x17operation_timeout_in_ms\x18\x0b\x20\x01(\x03R\x14operationT\ - imeoutInMs\x127\n\x0brpc_options\x18\r\x20\x01(\x0b2\x16.tensorflow.RPCO\ - ptionsR\nrpcOptions\x127\n\x0bcluster_def\x18\x0e\x20\x01(\x0b2\x16.tens\ - orflow.ClusterDefR\nclusterDef\x122\n\x15isolate_session_state\x18\x0f\ - \x20\x01(\x08R\x13isolateSessionState\x12F\n\x20share_cluster_devices_in\ - _session\x18\x11\x20\x01(\x08R\x1cshareClusterDevicesInSession\x12H\n\ - \x0cexperimental\x18\x10\x20\x01(\x0b2$.tensorflow.ConfigProto.Experimen\ - talR\x0cexperimental\x1a>\n\x10DeviceCountEntry\x12\x10\n\x03key\x18\x01\ - \x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\x01(\x05R\x05value:\ - \x028\x01\x1a\xda\x0c\n\x0cExperimental\x126\n\x17collective_group_leade\ - r\x18\x01\x20\x01(\tR\x15collectiveGroupLeader\x12#\n\rexecutor_type\x18\ - \x03\x20\x01(\tR\x0cexecutorType\x12+\n\x12recv_buf_max_chunk\x18\x04\ - \x20\x01(\x05R\x0frecvBufMaxChunk\x12*\n\x11use_numa_affinity\x18\x05\ - \x20\x01(\x08R\x0fuseNumaAffinity\x12a\n-collective_deterministic_sequen\ - tial_execution\x18\x06\x20\x01(\x08R*collectiveDeterministicSequentialEx\ - ecution\x12'\n\x0fcollective_nccl\x18\x07\x20\x01(\x08R\x0ecollectiveNcc\ - l\x12a\n.share_session_state_in_clusterspec_propagation\x18\x08\x20\x01(\ - \x08R)shareSessionStateInClusterspecPropagation\x126\n\x17disable_thread\ - _spinning\x18\t\x20\x01(\x08R\x15disableThreadSpinning\x12F\n\x20share_c\ - luster_devices_in_session\x18\n\x20\x01(\x08R\x1cshareClusterDevicesInSe\ - ssion\x12F\n\x10session_metadata\x18\x0b\x20\x01(\x0b2\x1b.tensorflow.Se\ - ssionMetadataR\x0fsessionMetadata\x129\n\x19optimize_for_static_graph\ - \x18\x0c\x20\x01(\x08R\x16optimizeForStaticGraph\x12,\n\x12enable_mlir_b\ - ridge\x18\r\x20\x01(\x08R\x10enableMlirBridge\x12f\n\x13mlir_bridge_roll\ - out\x18\x11\x20\x01(\x0e26.tensorflow.ConfigProto.Experimental.MlirBridg\ - eRolloutR\x11mlirBridgeRollout\x12C\n\x1eenable_mlir_graph_optimization\ - \x18\x10\x20\x01(\x08R\x1benableMlirGraphOptimization\x12E\n\x1fdisable_\ - output_partition_graphs\x18\x0e\x20\x01(\x08R\x1cdisableOutputPartitionG\ - raphs\x12=\n\x1bxla_fusion_autotuner_thresh\x18\x0f\x20\x01(\x03R\x18xla\ - FusionAutotunerThresh\x12\x19\n\x08use_tfrt\x18\x12\x20\x01(\x08R\x07use\ - Tfrt\x12E\n\x1fdisable_functional_ops_lowering\x18\x15\x20\x01(\x08R\x1c\ - disableFunctionalOpsLowering\x12D\n\x1fxla_prefer_single_graph_cluster\ - \x18\x16\x20\x01(\x08R\x1bxlaPreferSingleGraphCluster\x12V\n\x13coordina\ - tion_config\x18\x17\x20\x01(\x0b2%.tensorflow.CoordinationServiceConfigR\ - \x12coordinationConfig\x12H\n!disable_optimize_for_static_graph\x18\x18\ - \x20\x01(\x08R\x1ddisableOptimizeForStaticGraph\"\xde\x01\n\x11MlirBridg\ - eRollout\x12#\n\x1fMLIR_BRIDGE_ROLLOUT_UNSPECIFIED\x10\0\x12\x1f\n\x1bML\ - IR_BRIDGE_ROLLOUT_ENABLED\x10\x01\x12\x20\n\x1cMLIR_BRIDGE_ROLLOUT_DISAB\ - LED\x10\x02\"\x04\x08\x03\x10\x03\"\x04\x08\x04\x10\x04*%MLIR_BRIDGE_ROL\ - LOUT_SAFE_MODE_ENABLED*.MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLEDJ\ - \x04\x08\x02\x10\x03J\x04\x08\x13\x10\x14J\x04\x08\x14\x10\x15J\x04\x08\ - \x19\x10\x1a\"\xa8\x06\n\nRunOptions\x12B\n\x0btrace_level\x18\x01\x20\ - \x01(\x0e2!.tensorflow.RunOptions.TraceLevelR\ntraceLevel\x12\"\n\rtimeo\ - ut_in_ms\x18\x02\x20\x01(\x03R\x0btimeoutInMs\x12/\n\x14inter_op_thread_\ - pool\x18\x03\x20\x01(\x05R\x11interOpThreadPool\x126\n\x17output_partiti\ - on_graphs\x18\x05\x20\x01(\x08R\x15outputPartitionGraphs\x12=\n\rdebug_o\ - ptions\x18\x06\x20\x01(\x0b2\x18.tensorflow.DebugOptionsR\x0cdebugOption\ - s\x12J\n\"report_tensor_allocations_upon_oom\x18\x07\x20\x01(\x08R\x1ere\ - portTensorAllocationsUponOom\x12G\n\x0cexperimental\x18\x08\x20\x01(\x0b\ - 2#.tensorflow.RunOptions.ExperimentalR\x0cexperimental\x1a\x9a\x02\n\x0c\ - Experimental\x120\n\x14collective_graph_key\x18\x01\x20\x01(\x03R\x12col\ - lectiveGraphKey\x12/\n\x14use_run_handler_pool\x18\x02\x20\x01(\x08R\x11\ - useRunHandlerPool\x12r\n\x18run_handler_pool_options\x18\x03\x20\x01(\ - \x0b29.tensorflow.RunOptions.Experimental.RunHandlerPoolOptionsR\x15runH\ - andlerPoolOptions\x1a3\n\x15RunHandlerPoolOptions\x12\x1a\n\x08priority\ - \x18\x01\x20\x01(\x03R\x08priority\"R\n\nTraceLevel\x12\x0c\n\x08NO_TRAC\ - E\x10\0\x12\x12\n\x0eSOFTWARE_TRACE\x10\x01\x12\x12\n\x0eHARDWARE_TRACE\ - \x10\x02\x12\x0e\n\nFULL_TRACE\x10\x03J\x04\x08\x04\x10\x05\"\xc4\x04\n\ - \x0bRunMetadata\x124\n\nstep_stats\x18\x01\x20\x01(\x0b2\x15.tensorflow.\ - StepStatsR\tstepStats\x127\n\ncost_graph\x18\x02\x20\x01(\x0b2\x18.tenso\ - rflow.CostGraphDefR\tcostGraph\x12?\n\x10partition_graphs\x18\x03\x20\ - \x03(\x0b2\x14.tensorflow.GraphDefR\x0fpartitionGraphs\x12O\n\x0ffunctio\ - n_graphs\x18\x04\x20\x03(\x0b2&.tensorflow.RunMetadata.FunctionGraphsR\ - \x0efunctionGraphs\x12F\n\x10session_metadata\x18\x05\x20\x01(\x0b2\x1b.\ - tensorflow.SessionMetadataR\x0fsessionMetadata\x1a\xeb\x01\n\x0eFunction\ - Graphs\x12?\n\x10partition_graphs\x18\x01\x20\x03(\x0b2\x14.tensorflow.G\ - raphDefR\x0fpartitionGraphs\x12J\n\x16pre_optimization_graph\x18\x02\x20\ - \x01(\x0b2\x14.tensorflow.GraphDefR\x14preOptimizationGraph\x12L\n\x17po\ - st_optimization_graph\x18\x03\x20\x01(\x0b2\x14.tensorflow.GraphDefR\x15\ - postOptimizationGraph\"P\n\x10TensorConnection\x12\x1f\n\x0bfrom_tensor\ - \x18\x01\x20\x01(\tR\nfromTensor\x12\x1b\n\tto_tensor\x18\x02\x20\x01(\t\ - R\x08toTensor\"\xa5\x04\n\x0fCallableOptions\x12\x12\n\x04feed\x18\x01\ - \x20\x03(\tR\x04feed\x12\x14\n\x05fetch\x18\x02\x20\x03(\tR\x05fetch\x12\ - \x16\n\x06target\x18\x03\x20\x03(\tR\x06target\x127\n\x0brun_options\x18\ - \x04\x20\x01(\x0b2\x16.tensorflow.RunOptionsR\nrunOptions\x12I\n\x11tens\ - or_connection\x18\x05\x20\x03(\x0b2\x1c.tensorflow.TensorConnectionR\x10\ - tensorConnection\x12O\n\x0cfeed_devices\x18\x06\x20\x03(\x0b2,.tensorflo\ - w.CallableOptions.FeedDevicesEntryR\x0bfeedDevices\x12R\n\rfetch_devices\ - \x18\x07\x20\x03(\x0b2-.tensorflow.CallableOptions.FetchDevicesEntryR\ - \x0cfetchDevices\x12&\n\x0ffetch_skip_sync\x18\x08\x20\x01(\x08R\rfetchS\ - kipSync\x1a>\n\x10FeedDevicesEntry\x12\x10\n\x03key\x18\x01\x20\x01(\tR\ - \x03key\x12\x14\n\x05value\x18\x02\x20\x01(\tR\x05value:\x028\x01\x1a?\n\ - \x11FetchDevicesEntry\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12\ - \x14\n\x05value\x18\x02\x20\x01(\tR\x05value:\x028\x01B\x84\x01\n\x18org\ - .tensorflow.frameworkB\x0cConfigProtosP\x01ZUgithub.com/tensorflow/tenso\ - rflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01b\ - \x06proto3\ + _options.proto\x1a&tsl/protobuf/coordination_config.proto\"\xdb\x0f\n\nG\ + PUOptions\x12D\n\x1fper_process_gpu_memory_fraction\x18\x01\x20\x01(\x01\ + R\x1bperProcessGpuMemoryFraction\x12!\n\x0callow_growth\x18\x04\x20\x01(\ + \x08R\x0ballowGrowth\x12%\n\x0eallocator_type\x18\x02\x20\x01(\tR\ralloc\ + atorType\x126\n\x17deferred_deletion_bytes\x18\x03\x20\x01(\x03R\x15defe\ + rredDeletionBytes\x12.\n\x13visible_device_list\x18\x05\x20\x01(\tR\x11v\ + isibleDeviceList\x12;\n\x1apolling_active_delay_usecs\x18\x06\x20\x01(\ + \x05R\x17pollingActiveDelayUsecs\x12?\n\x1cpolling_inactive_delay_msecs\ + \x18\x07\x20\x01(\x05R\x19pollingInactiveDelayMsecs\x120\n\x14force_gpu_\ + compatible\x18\x08\x20\x01(\x08R\x12forceGpuCompatible\x12G\n\x0cexperim\ + ental\x18\t\x20\x01(\x0b2#.tensorflow.GPUOptions.ExperimentalR\x0cexperi\ + mental\x1a\xdb\x0b\n\x0cExperimental\x12[\n\x0fvirtual_devices\x18\x01\ + \x20\x03(\x0b22.tensorflow.GPUOptions.Experimental.VirtualDevicesR\x0evi\ + rtualDevices\x12<\n\x1bnum_virtual_devices_per_gpu\x18\x0f\x20\x01(\x05R\ + \x17numVirtualDevicesPerGpu\x12,\n\x12use_unified_memory\x18\x02\x20\x01\ + (\x08R\x10useUnifiedMemory\x12;\n\x1bnum_dev_to_dev_copy_streams\x18\x03\ + \x20\x01(\x05R\x16numDevToDevCopyStreams\x122\n\x15collective_ring_order\ + \x18\x04\x20\x01(\tR\x13collectiveRingOrder\x123\n\x15timestamped_alloca\ + tor\x18\x05\x20\x01(\x08R\x14timestampedAllocator\x12=\n\x1bkernel_track\ + er_max_interval\x18\x07\x20\x01(\x05R\x18kernelTrackerMaxInterval\x127\n\ + \x18kernel_tracker_max_bytes\x18\x08\x20\x01(\x05R\x15kernelTrackerMaxBy\ + tes\x12;\n\x1akernel_tracker_max_pending\x18\t\x20\x01(\x05R\x17kernelTr\ + ackerMaxPending\x12F\n\x1finternal_fragmentation_fraction\x18\n\x20\x01(\ + \x01R\x1dinternalFragmentationFraction\x121\n\x15use_cuda_malloc_async\ + \x18\x0b\x20\x01(\x08R\x12useCudaMallocAsync\x12N\n$disallow_retry_on_al\ + location_failure\x18\x0c\x20\x01(\x08R\x20disallowRetryOnAllocationFailu\ + re\x125\n\x18gpu_host_mem_limit_in_mb\x18\r\x20\x01(\x02R\x13gpuHostMemL\ + imitInMb\x12>\n\x1cgpu_host_mem_disallow_growth\x18\x0e\x20\x01(\x08R\ + \x18gpuHostMemDisallowGrowth\x12=\n\x1cgpu_system_memory_size_in_mb\x18\ + \x10\x20\x01(\x05R\x17gpuSystemMemorySizeInMb\x12Q\n&populate_pjrt_gpu_c\ + lient_creation_info\x18\x11\x20\x01(\x08R!populatePjrtGpuClientCreationI\ + nfo\x12\x17\n\x07node_id\x18\x12\x20\x01(\x05R\x06nodeId\x12h\n\x14strea\ + m_merge_options\x18\x13\x20\x01(\x0b26.tensorflow.GPUOptions.Experimenta\ + l.StreamMergeOptionsR\x12streamMergeOptions\x1a{\n\x0eVirtualDevices\x12\ + &\n\x0fmemory_limit_mb\x18\x01\x20\x03(\x02R\rmemoryLimitMb\x12\x1a\n\ + \x08priority\x18\x02\x20\x03(\x05R\x08priority\x12%\n\x0edevice_ordinal\ + \x18\x03\x20\x03(\x05R\rdeviceOrdinal\x1a\xd2\x01\n\x12StreamMergeOption\ + s\x12<\n\x1bmerge_host_to_device_stream\x18\x01\x20\x01(\x08R\x17mergeHo\ + stToDeviceStream\x12<\n\x1bmerge_device_to_host_stream\x18\x02\x20\x01(\ + \x08R\x17mergeDeviceToHostStream\x12@\n\x1dmerge_device_to_device_stream\ + \x18\x03\x20\x01(\x08R\x19mergeDeviceToDeviceStream\"\xa8\x04\n\x10Optim\ + izerOptions\x12M\n#do_common_subexpression_elimination\x18\x01\x20\x01(\ + \x08R\x20doCommonSubexpressionElimination\x12.\n\x13do_constant_folding\ + \x18\x02\x20\x01(\x08R\x11doConstantFolding\x12>\n\x1cmax_folded_constan\ + t_in_bytes\x18\x06\x20\x01(\x03R\x18maxFoldedConstantInBytes\x120\n\x14d\ + o_function_inlining\x18\x04\x20\x01(\x08R\x12doFunctionInlining\x12?\n\t\ + opt_level\x18\x03\x20\x01(\x0e2\".tensorflow.OptimizerOptions.LevelR\x08\ + optLevel\x12U\n\x10global_jit_level\x18\x05\x20\x01(\x0e2+.tensorflow.Op\ + timizerOptions.GlobalJitLevelR\x0eglobalJitLevel\x12$\n\x0ecpu_global_ji\ + t\x18\x07\x20\x01(\x08R\x0ccpuGlobalJit\"\x20\n\x05Level\x12\x06\n\x02L1\ + \x10\0\x12\x0f\n\x02L0\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"C\n\ + \x0eGlobalJitLevel\x12\x0b\n\x07DEFAULT\x10\0\x12\x10\n\x03OFF\x10\xff\ + \xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x08\n\x04ON_1\x10\x01\x12\x08\n\ + \x04ON_2\x10\x02\"\x90\x04\n\x0cGraphOptions\x124\n\x16enable_recv_sched\ + uling\x18\x02\x20\x01(\x08R\x14enableRecvScheduling\x12I\n\x11optimizer_\ + options\x18\x03\x20\x01(\x0b2\x1c.tensorflow.OptimizerOptionsR\x10optimi\ + zerOptions\x12(\n\x10build_cost_model\x18\x04\x20\x01(\x03R\x0ebuildCost\ + Model\x123\n\x16build_cost_model_after\x18\t\x20\x01(\x03R\x13buildCostM\ + odelAfter\x12!\n\x0cinfer_shapes\x18\x05\x20\x01(\x08R\x0binferShapes\ + \x12,\n\x12place_pruned_graph\x18\x06\x20\x01(\x08R\x10placePrunedGraph\ + \x128\n\x18enable_bfloat16_sendrecv\x18\x07\x20\x01(\x08R\x16enableBfloa\ + t16Sendrecv\x12#\n\rtimeline_step\x18\x08\x20\x01(\x05R\x0ctimelineStep\ + \x12C\n\x0frewrite_options\x18\n\x20\x01(\x0b2\x1a.tensorflow.RewriterCo\ + nfigR\x0erewriteOptionsJ\x04\x08\x01\x10\x02R%skip_common_subexpression_\ + elimination\"Y\n\x15ThreadPoolOptionProto\x12\x1f\n\x0bnum_threads\x18\ + \x01\x20\x01(\x05R\nnumThreads\x12\x1f\n\x0bglobal_name\x18\x02\x20\x01(\ + \tR\nglobalName\"?\n\x0fSessionMetadata\x12\x12\n\x04name\x18\x01\x20\ + \x01(\tR\x04name\x12\x18\n\x07version\x18\x02\x20\x01(\x03R\x07version\"\ + \xd8\x17\n\x0bConfigProto\x12K\n\x0cdevice_count\x18\x01\x20\x03(\x0b2(.\ + tensorflow.ConfigProto.DeviceCountEntryR\x0bdeviceCount\x12?\n\x1cintra_\ + op_parallelism_threads\x18\x02\x20\x01(\x05R\x19intraOpParallelismThread\ + s\x12?\n\x1cinter_op_parallelism_threads\x18\x05\x20\x01(\x05R\x19interO\ + pParallelismThreads\x125\n\x17use_per_session_threads\x18\t\x20\x01(\x08\ + R\x14usePerSessionThreads\x12a\n\x1csession_inter_op_thread_pool\x18\x0c\ + \x20\x03(\x0b2!.tensorflow.ThreadPoolOptionProtoR\x18sessionInterOpThrea\ + dPool\x12)\n\x10placement_period\x18\x03\x20\x01(\x05R\x0fplacementPerio\ + d\x12%\n\x0edevice_filters\x18\x04\x20\x03(\tR\rdeviceFilters\x127\n\x0b\ + gpu_options\x18\x06\x20\x01(\x0b2\x16.tensorflow.GPUOptionsR\ngpuOptions\ + \x120\n\x14allow_soft_placement\x18\x07\x20\x01(\x08R\x12allowSoftPlacem\ + ent\x120\n\x14log_device_placement\x18\x08\x20\x01(\x08R\x12logDevicePla\ + cement\x12=\n\rgraph_options\x18\n\x20\x01(\x0b2\x18.tensorflow.GraphOpt\ + ionsR\x0cgraphOptions\x125\n\x17operation_timeout_in_ms\x18\x0b\x20\x01(\ + \x03R\x14operationTimeoutInMs\x127\n\x0brpc_options\x18\r\x20\x01(\x0b2\ + \x16.tensorflow.RPCOptionsR\nrpcOptions\x127\n\x0bcluster_def\x18\x0e\ + \x20\x01(\x0b2\x16.tensorflow.ClusterDefR\nclusterDef\x122\n\x15isolate_\ + session_state\x18\x0f\x20\x01(\x08R\x13isolateSessionState\x12F\n\x20sha\ + re_cluster_devices_in_session\x18\x11\x20\x01(\x08R\x1cshareClusterDevic\ + esInSession\x12H\n\x0cexperimental\x18\x10\x20\x01(\x0b2$.tensorflow.Con\ + figProto.ExperimentalR\x0cexperimental\x1a>\n\x10DeviceCountEntry\x12\ + \x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\ + \x01(\x05R\x05value:\x028\x01\x1a\x82\x0f\n\x0cExperimental\x126\n\x17co\ + llective_group_leader\x18\x01\x20\x01(\tR\x15collectiveGroupLeader\x12#\ + \n\rexecutor_type\x18\x03\x20\x01(\tR\x0cexecutorType\x12+\n\x12recv_buf\ + _max_chunk\x18\x04\x20\x01(\x05R\x0frecvBufMaxChunk\x12*\n\x11use_numa_a\ + ffinity\x18\x05\x20\x01(\x08R\x0fuseNumaAffinity\x12a\n-collective_deter\ + ministic_sequential_execution\x18\x06\x20\x01(\x08R*collectiveDeterminis\ + ticSequentialExecution\x12'\n\x0fcollective_nccl\x18\x07\x20\x01(\x08R\ + \x0ecollectiveNccl\x12a\n.share_session_state_in_clusterspec_propagation\ + \x18\x08\x20\x01(\x08R)shareSessionStateInClusterspecPropagation\x126\n\ + \x17disable_thread_spinning\x18\t\x20\x01(\x08R\x15disableThreadSpinning\ + \x12F\n\x20share_cluster_devices_in_session\x18\n\x20\x01(\x08R\x1cshare\ + ClusterDevicesInSession\x12F\n\x10session_metadata\x18\x0b\x20\x01(\x0b2\ + \x1b.tensorflow.SessionMetadataR\x0fsessionMetadata\x129\n\x19optimize_f\ + or_static_graph\x18\x0c\x20\x01(\x08R\x16optimizeForStaticGraph\x12,\n\ + \x12enable_mlir_bridge\x18\r\x20\x01(\x08R\x10enableMlirBridge\x12f\n\ + \x13mlir_bridge_rollout\x18\x11\x20\x01(\x0e26.tensorflow.ConfigProto.Ex\ + perimental.MlirBridgeRolloutR\x11mlirBridgeRollout\x12C\n\x1eenable_mlir\ + _graph_optimization\x18\x10\x20\x01(\x08R\x1benableMlirGraphOptimization\ + \x12E\n\x1fdisable_output_partition_graphs\x18\x0e\x20\x01(\x08R\x1cdisa\ + bleOutputPartitionGraphs\x12=\n\x1bxla_fusion_autotuner_thresh\x18\x0f\ + \x20\x01(\x03R\x18xlaFusionAutotunerThresh\x12\x19\n\x08use_tfrt\x18\x12\ + \x20\x01(\x08R\x07useTfrt\x12*\n\x11enable_multi_host\x18\x1b\x20\x01(\ + \x08R\x0fenableMultiHost\x12.\n\x13backend_server_port\x18\x1c\x20\x01(\ + \x05R\x11backendServerPort\x12\x1d\n\ntarget_tpu\x18\x1d\x20\x01(\x08R\t\ + targetTpu\x12\x1d\n\ntarget_gpu\x18\x1e\x20\x01(\x08R\ttargetGpu\x124\n\ + \x16stream_merge_threshold\x18\x1f\x20\x01(\x05R\x14streamMergeThreshold\ + \x12E\n\x1fdisable_functional_ops_lowering\x18\x15\x20\x01(\x08R\x1cdisa\ + bleFunctionalOpsLowering\x12D\n\x1fxla_prefer_single_graph_cluster\x18\ + \x16\x20\x01(\x08R\x1bxlaPreferSingleGraphCluster\x12V\n\x13coordination\ + _config\x18\x17\x20\x01(\x0b2%.tensorflow.CoordinationServiceConfigR\x12\ + coordinationConfig\x12H\n!disable_optimize_for_static_graph\x18\x18\x20\ + \x01(\x08R\x1ddisableOptimizeForStaticGraph\x12V\n(disable_eager_executo\ + r_streaming_enqueue\x18\x1a\x20\x01(\x08R$disableEagerExecutorStreamingE\ + nqueue\"\xde\x01\n\x11MlirBridgeRollout\x12#\n\x1fMLIR_BRIDGE_ROLLOUT_UN\ + SPECIFIED\x10\0\x12\x1f\n\x1bMLIR_BRIDGE_ROLLOUT_ENABLED\x10\x01\x12\x20\ + \n\x1cMLIR_BRIDGE_ROLLOUT_DISABLED\x10\x02\"\x04\x08\x03\x10\x03\"\x04\ + \x08\x04\x10\x04*%MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED*.MLIR_BRIDGE_ROL\ + LOUT_SAFE_MODE_FALLBACK_ENABLEDJ\x04\x08\x02\x10\x03J\x04\x08\x13\x10\ + \x14J\x04\x08\x14\x10\x15J\x04\x08\x19\x10\x1a\"\xa8\x06\n\nRunOptions\ + \x12B\n\x0btrace_level\x18\x01\x20\x01(\x0e2!.tensorflow.RunOptions.Trac\ + eLevelR\ntraceLevel\x12\"\n\rtimeout_in_ms\x18\x02\x20\x01(\x03R\x0btime\ + outInMs\x12/\n\x14inter_op_thread_pool\x18\x03\x20\x01(\x05R\x11interOpT\ + hreadPool\x126\n\x17output_partition_graphs\x18\x05\x20\x01(\x08R\x15out\ + putPartitionGraphs\x12=\n\rdebug_options\x18\x06\x20\x01(\x0b2\x18.tenso\ + rflow.DebugOptionsR\x0cdebugOptions\x12J\n\"report_tensor_allocations_up\ + on_oom\x18\x07\x20\x01(\x08R\x1ereportTensorAllocationsUponOom\x12G\n\ + \x0cexperimental\x18\x08\x20\x01(\x0b2#.tensorflow.RunOptions.Experiment\ + alR\x0cexperimental\x1a\x9a\x02\n\x0cExperimental\x120\n\x14collective_g\ + raph_key\x18\x01\x20\x01(\x03R\x12collectiveGraphKey\x12/\n\x14use_run_h\ + andler_pool\x18\x02\x20\x01(\x08R\x11useRunHandlerPool\x12r\n\x18run_han\ + dler_pool_options\x18\x03\x20\x01(\x0b29.tensorflow.RunOptions.Experimen\ + tal.RunHandlerPoolOptionsR\x15runHandlerPoolOptions\x1a3\n\x15RunHandler\ + PoolOptions\x12\x1a\n\x08priority\x18\x01\x20\x01(\x03R\x08priority\"R\n\ + \nTraceLevel\x12\x0c\n\x08NO_TRACE\x10\0\x12\x12\n\x0eSOFTWARE_TRACE\x10\ + \x01\x12\x12\n\x0eHARDWARE_TRACE\x10\x02\x12\x0e\n\nFULL_TRACE\x10\x03J\ + \x04\x08\x04\x10\x05\"\xc4\x04\n\x0bRunMetadata\x124\n\nstep_stats\x18\ + \x01\x20\x01(\x0b2\x15.tensorflow.StepStatsR\tstepStats\x127\n\ncost_gra\ + ph\x18\x02\x20\x01(\x0b2\x18.tensorflow.CostGraphDefR\tcostGraph\x12?\n\ + \x10partition_graphs\x18\x03\x20\x03(\x0b2\x14.tensorflow.GraphDefR\x0fp\ + artitionGraphs\x12O\n\x0ffunction_graphs\x18\x04\x20\x03(\x0b2&.tensorfl\ + ow.RunMetadata.FunctionGraphsR\x0efunctionGraphs\x12F\n\x10session_metad\ + ata\x18\x05\x20\x01(\x0b2\x1b.tensorflow.SessionMetadataR\x0fsessionMeta\ + data\x1a\xeb\x01\n\x0eFunctionGraphs\x12?\n\x10partition_graphs\x18\x01\ + \x20\x03(\x0b2\x14.tensorflow.GraphDefR\x0fpartitionGraphs\x12J\n\x16pre\ + _optimization_graph\x18\x02\x20\x01(\x0b2\x14.tensorflow.GraphDefR\x14pr\ + eOptimizationGraph\x12L\n\x17post_optimization_graph\x18\x03\x20\x01(\ + \x0b2\x14.tensorflow.GraphDefR\x15postOptimizationGraph\"P\n\x10TensorCo\ + nnection\x12\x1f\n\x0bfrom_tensor\x18\x01\x20\x01(\tR\nfromTensor\x12\ + \x1b\n\tto_tensor\x18\x02\x20\x01(\tR\x08toTensor\"\xa5\x04\n\x0fCallabl\ + eOptions\x12\x12\n\x04feed\x18\x01\x20\x03(\tR\x04feed\x12\x14\n\x05fetc\ + h\x18\x02\x20\x03(\tR\x05fetch\x12\x16\n\x06target\x18\x03\x20\x03(\tR\ + \x06target\x127\n\x0brun_options\x18\x04\x20\x01(\x0b2\x16.tensorflow.Ru\ + nOptionsR\nrunOptions\x12I\n\x11tensor_connection\x18\x05\x20\x03(\x0b2\ + \x1c.tensorflow.TensorConnectionR\x10tensorConnection\x12O\n\x0cfeed_dev\ + ices\x18\x06\x20\x03(\x0b2,.tensorflow.CallableOptions.FeedDevicesEntryR\ + \x0bfeedDevices\x12R\n\rfetch_devices\x18\x07\x20\x03(\x0b2-.tensorflow.\ + CallableOptions.FetchDevicesEntryR\x0cfetchDevices\x12&\n\x0ffetch_skip_\ + sync\x18\x08\x20\x01(\x08R\rfetchSkipSync\x1a>\n\x10FeedDevicesEntry\x12\ + \x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\ + \x01(\tR\x05value:\x028\x01\x1a?\n\x11FetchDevicesEntry\x12\x10\n\x03key\ + \x18\x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\x01(\tR\x05va\ + lue:\x028\x01B\x84\x01\n\x18org.tensorflow.frameworkB\x0cConfigProtosP\ + \x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_c\ + ore_protos_go_proto\xf8\x01\x01b\x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/src/protos/coordination_config.rs b/src/protos/coordination_config.rs index 9190190d1a..753dfe5217 100644 --- a/src/protos/coordination_config.rs +++ b/src/protos/coordination_config.rs @@ -17,7 +17,7 @@ #![allow(trivial_casts)] #![allow(unused_imports)] #![allow(unused_results)] -//! Generated file from `tensorflow/tsl/protobuf/coordination_config.proto` +//! Generated file from `tsl/protobuf/coordination_config.proto` /// Generated files are compatible only with the same version /// of protobuf runtime. @@ -230,6 +230,7 @@ pub struct CoordinationServiceConfig { pub agent_destruction_without_shutdown: bool, pub recoverable_jobs: ::protobuf::RepeatedField<::std::string::String>, pub allow_new_incarnation_to_reconnect: bool, + pub force_disable: bool, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -437,6 +438,21 @@ impl CoordinationServiceConfig { pub fn set_allow_new_incarnation_to_reconnect(&mut self, v: bool) { self.allow_new_incarnation_to_reconnect = v; } + + // bool force_disable = 12; + + + pub fn get_force_disable(&self) -> bool { + self.force_disable + } + pub fn clear_force_disable(&mut self) { + self.force_disable = false; + } + + // Param is passed by value, moved + pub fn set_force_disable(&mut self, v: bool) { + self.force_disable = v; + } } impl ::protobuf::Message for CoordinationServiceConfig { @@ -507,6 +523,13 @@ impl ::protobuf::Message for CoordinationServiceConfig { let tmp = is.read_bool()?; self.allow_new_incarnation_to_reconnect = tmp; }, + 12 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.force_disable = tmp; + }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, @@ -550,6 +573,9 @@ impl ::protobuf::Message for CoordinationServiceConfig { if self.allow_new_incarnation_to_reconnect != false { my_size += 2; } + if self.force_disable != false { + my_size += 2; + } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size @@ -588,6 +614,9 @@ impl ::protobuf::Message for CoordinationServiceConfig { if self.allow_new_incarnation_to_reconnect != false { os.write_bool(11, self.allow_new_incarnation_to_reconnect)?; } + if self.force_disable != false { + os.write_bool(12, self.force_disable)?; + } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } @@ -676,6 +705,11 @@ impl ::protobuf::Message for CoordinationServiceConfig { |m: &CoordinationServiceConfig| { &m.allow_new_incarnation_to_reconnect }, |m: &mut CoordinationServiceConfig| { &mut m.allow_new_incarnation_to_reconnect }, )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "force_disable", + |m: &CoordinationServiceConfig| { &m.force_disable }, + |m: &mut CoordinationServiceConfig| { &mut m.force_disable }, + )); ::protobuf::reflect::MessageDescriptor::new_pb_name::( "CoordinationServiceConfig", fields, @@ -702,6 +736,7 @@ impl ::protobuf::Clear for CoordinationServiceConfig { self.agent_destruction_without_shutdown = false; self.recoverable_jobs.clear(); self.allow_new_incarnation_to_reconnect = false; + self.force_disable = false; self.unknown_fields.clear(); } } @@ -719,23 +754,24 @@ impl ::protobuf::reflect::ProtobufValue for CoordinationServiceConfig { } static file_descriptor_proto_data: &'static [u8] = b"\ - \n1tensorflow/tsl/protobuf/coordination_config.proto\x12\ntensorflow\"A\ - \n\x0eCoordinatedJob\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x12\ - \x1b\n\tnum_tasks\x18\x02\x20\x01(\x05R\x08numTasks\"\xec\x04\n\x19Coord\ - inationServiceConfig\x12!\n\x0cservice_type\x18\x01\x20\x01(\tR\x0bservi\ - ceType\x12%\n\x0eservice_leader\x18\x02\x20\x01(\tR\rserviceLeader\x12.\ - \n\x13enable_health_check\x18\x03\x20\x01(\x08R\x11enableHealthCheck\x12\ - B\n\x1ecluster_register_timeout_in_ms\x18\x04\x20\x01(\x03R\x1aclusterRe\ - gisterTimeoutInMs\x125\n\x17heartbeat_timeout_in_ms\x18\x05\x20\x01(\x03\ - R\x14heartbeatTimeoutInMs\x12L\n\x14coordinated_job_list\x18\n\x20\x03(\ - \x0b2\x1a.tensorflow.CoordinatedJobR\x12coordinatedJobList\x12B\n\x1eshu\ - tdown_barrier_timeout_in_ms\x18\x07\x20\x01(\x03R\x1ashutdownBarrierTime\ - outInMs\x12K\n\"agent_destruction_without_shutdown\x18\x08\x20\x01(\x08R\ - \x1fagentDestructionWithoutShutdown\x12)\n\x10recoverable_jobs\x18\t\x20\ - \x03(\tR\x0frecoverableJobs\x12J\n\"allow_new_incarnation_to_reconnect\ - \x18\x0b\x20\x01(\x08R\x1eallowNewIncarnationToReconnectJ\x04\x08\x06\ - \x10\x07BWZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf\ - /for_core_protos_go_protob\x06proto3\ + \n&tsl/protobuf/coordination_config.proto\x12\ntensorflow\"A\n\x0eCoordi\ + natedJob\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x12\x1b\n\tnum_ta\ + sks\x18\x02\x20\x01(\x05R\x08numTasks\"\x91\x05\n\x19CoordinationService\ + Config\x12!\n\x0cservice_type\x18\x01\x20\x01(\tR\x0bserviceType\x12%\n\ + \x0eservice_leader\x18\x02\x20\x01(\tR\rserviceLeader\x12.\n\x13enable_h\ + ealth_check\x18\x03\x20\x01(\x08R\x11enableHealthCheck\x12B\n\x1ecluster\ + _register_timeout_in_ms\x18\x04\x20\x01(\x03R\x1aclusterRegisterTimeoutI\ + nMs\x125\n\x17heartbeat_timeout_in_ms\x18\x05\x20\x01(\x03R\x14heartbeat\ + TimeoutInMs\x12L\n\x14coordinated_job_list\x18\n\x20\x03(\x0b2\x1a.tenso\ + rflow.CoordinatedJobR\x12coordinatedJobList\x12B\n\x1eshutdown_barrier_t\ + imeout_in_ms\x18\x07\x20\x01(\x03R\x1ashutdownBarrierTimeoutInMs\x12K\n\ + \"agent_destruction_without_shutdown\x18\x08\x20\x01(\x08R\x1fagentDestr\ + uctionWithoutShutdown\x12)\n\x10recoverable_jobs\x18\t\x20\x03(\tR\x0fre\ + coverableJobs\x12J\n\"allow_new_incarnation_to_reconnect\x18\x0b\x20\x01\ + (\x08R\x1eallowNewIncarnationToReconnect\x12#\n\rforce_disable\x18\x0c\ + \x20\x01(\x08R\x0cforceDisableJ\x04\x08\x06\x10\x07BWZUgithub.com/tensor\ + flow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_protob\ + \x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/src/protos/graph_debug_info.rs b/src/protos/graph_debug_info.rs index 8c6c383bf9..71b3124d89 100644 --- a/src/protos/graph_debug_info.rs +++ b/src/protos/graph_debug_info.rs @@ -27,7 +27,10 @@ pub struct GraphDebugInfo { // message fields pub files: ::protobuf::RepeatedField<::std::string::String>, + pub frames_by_id: ::std::collections::HashMap, + pub traces_by_id: ::std::collections::HashMap, pub traces: ::std::collections::HashMap<::std::string::String, GraphDebugInfo_StackTrace>, + pub name_to_trace_id: ::std::collections::HashMap<::std::string::String, u64>, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -69,6 +72,56 @@ impl GraphDebugInfo { ::std::mem::replace(&mut self.files, ::protobuf::RepeatedField::new()) } + // repeated .tensorflow.GraphDebugInfo.FramesByIdEntry frames_by_id = 4; + + + pub fn get_frames_by_id(&self) -> &::std::collections::HashMap { + &self.frames_by_id + } + pub fn clear_frames_by_id(&mut self) { + self.frames_by_id.clear(); + } + + // Param is passed by value, moved + pub fn set_frames_by_id(&mut self, v: ::std::collections::HashMap) { + self.frames_by_id = v; + } + + // Mutable pointer to the field. + pub fn mut_frames_by_id(&mut self) -> &mut ::std::collections::HashMap { + &mut self.frames_by_id + } + + // Take field + pub fn take_frames_by_id(&mut self) -> ::std::collections::HashMap { + ::std::mem::replace(&mut self.frames_by_id, ::std::collections::HashMap::new()) + } + + // repeated .tensorflow.GraphDebugInfo.TracesByIdEntry traces_by_id = 6; + + + pub fn get_traces_by_id(&self) -> &::std::collections::HashMap { + &self.traces_by_id + } + pub fn clear_traces_by_id(&mut self) { + self.traces_by_id.clear(); + } + + // Param is passed by value, moved + pub fn set_traces_by_id(&mut self, v: ::std::collections::HashMap) { + self.traces_by_id = v; + } + + // Mutable pointer to the field. + pub fn mut_traces_by_id(&mut self) -> &mut ::std::collections::HashMap { + &mut self.traces_by_id + } + + // Take field + pub fn take_traces_by_id(&mut self) -> ::std::collections::HashMap { + ::std::mem::replace(&mut self.traces_by_id, ::std::collections::HashMap::new()) + } + // repeated .tensorflow.GraphDebugInfo.TracesEntry traces = 2; @@ -93,6 +146,31 @@ impl GraphDebugInfo { pub fn take_traces(&mut self) -> ::std::collections::HashMap<::std::string::String, GraphDebugInfo_StackTrace> { ::std::mem::replace(&mut self.traces, ::std::collections::HashMap::new()) } + + // repeated .tensorflow.GraphDebugInfo.NameToTraceIdEntry name_to_trace_id = 5; + + + pub fn get_name_to_trace_id(&self) -> &::std::collections::HashMap<::std::string::String, u64> { + &self.name_to_trace_id + } + pub fn clear_name_to_trace_id(&mut self) { + self.name_to_trace_id.clear(); + } + + // Param is passed by value, moved + pub fn set_name_to_trace_id(&mut self, v: ::std::collections::HashMap<::std::string::String, u64>) { + self.name_to_trace_id = v; + } + + // Mutable pointer to the field. + pub fn mut_name_to_trace_id(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, u64> { + &mut self.name_to_trace_id + } + + // Take field + pub fn take_name_to_trace_id(&mut self) -> ::std::collections::HashMap<::std::string::String, u64> { + ::std::mem::replace(&mut self.name_to_trace_id, ::std::collections::HashMap::new()) + } } impl ::protobuf::Message for GraphDebugInfo { @@ -107,9 +185,18 @@ impl ::protobuf::Message for GraphDebugInfo { 1 => { ::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.files)?; }, + 4 => { + ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>(wire_type, is, &mut self.frames_by_id)?; + }, + 6 => { + ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>(wire_type, is, &mut self.traces_by_id)?; + }, 2 => { ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(wire_type, is, &mut self.traces)?; }, + 5 => { + ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeFixed64>(wire_type, is, &mut self.name_to_trace_id)?; + }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, @@ -125,7 +212,10 @@ impl ::protobuf::Message for GraphDebugInfo { for value in &self.files { my_size += ::protobuf::rt::string_size(1, &value); }; + my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>(4, &self.frames_by_id); + my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>(6, &self.traces_by_id); my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(2, &self.traces); + my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeFixed64>(5, &self.name_to_trace_id); my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size @@ -135,7 +225,10 @@ impl ::protobuf::Message for GraphDebugInfo { for v in &self.files { os.write_string(1, &v)?; }; + ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>(4, &self.frames_by_id, os)?; + ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>(6, &self.traces_by_id, os)?; ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(2, &self.traces, os)?; + ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeFixed64>(5, &self.name_to_trace_id, os)?; os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } @@ -179,11 +272,26 @@ impl ::protobuf::Message for GraphDebugInfo { |m: &GraphDebugInfo| { &m.files }, |m: &mut GraphDebugInfo| { &mut m.files }, )); + fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>( + "frames_by_id", + |m: &GraphDebugInfo| { &m.frames_by_id }, + |m: &mut GraphDebugInfo| { &mut m.frames_by_id }, + )); + fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeFixed64, ::protobuf::types::ProtobufTypeMessage>( + "traces_by_id", + |m: &GraphDebugInfo| { &m.traces_by_id }, + |m: &mut GraphDebugInfo| { &mut m.traces_by_id }, + )); fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>( "traces", |m: &GraphDebugInfo| { &m.traces }, |m: &mut GraphDebugInfo| { &mut m.traces }, )); + fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeFixed64>( + "name_to_trace_id", + |m: &GraphDebugInfo| { &m.name_to_trace_id }, + |m: &mut GraphDebugInfo| { &mut m.name_to_trace_id }, + )); ::protobuf::reflect::MessageDescriptor::new_pb_name::( "GraphDebugInfo", fields, @@ -201,7 +309,10 @@ impl ::protobuf::Message for GraphDebugInfo { impl ::protobuf::Clear for GraphDebugInfo { fn clear(&mut self) { self.files.clear(); + self.frames_by_id.clear(); + self.traces_by_id.clear(); self.traces.clear(); + self.name_to_trace_id.clear(); self.unknown_fields.clear(); } } @@ -221,11 +332,11 @@ impl ::protobuf::reflect::ProtobufValue for GraphDebugInfo { #[derive(PartialEq,Clone,Default)] pub struct GraphDebugInfo_FileLineCol { // message fields - pub file_index: i32, - pub line: i32, - pub col: i32, - pub func: ::std::string::String, - pub code: ::std::string::String, + file_index: ::std::option::Option, + line: ::std::option::Option, + col: ::std::option::Option, + func: ::protobuf::SingularField<::std::string::String>, + code: ::protobuf::SingularField<::std::string::String>, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -242,101 +353,133 @@ impl GraphDebugInfo_FileLineCol { ::std::default::Default::default() } - // int32 file_index = 1; + // optional int32 file_index = 1; pub fn get_file_index(&self) -> i32 { - self.file_index + self.file_index.unwrap_or(0) } pub fn clear_file_index(&mut self) { - self.file_index = 0; + self.file_index = ::std::option::Option::None; + } + + pub fn has_file_index(&self) -> bool { + self.file_index.is_some() } // Param is passed by value, moved pub fn set_file_index(&mut self, v: i32) { - self.file_index = v; + self.file_index = ::std::option::Option::Some(v); } - // int32 line = 2; + // optional int32 line = 2; pub fn get_line(&self) -> i32 { - self.line + self.line.unwrap_or(0) } pub fn clear_line(&mut self) { - self.line = 0; + self.line = ::std::option::Option::None; + } + + pub fn has_line(&self) -> bool { + self.line.is_some() } // Param is passed by value, moved pub fn set_line(&mut self, v: i32) { - self.line = v; + self.line = ::std::option::Option::Some(v); } - // int32 col = 3; + // optional int32 col = 3; pub fn get_col(&self) -> i32 { - self.col + self.col.unwrap_or(0) } pub fn clear_col(&mut self) { - self.col = 0; + self.col = ::std::option::Option::None; + } + + pub fn has_col(&self) -> bool { + self.col.is_some() } // Param is passed by value, moved pub fn set_col(&mut self, v: i32) { - self.col = v; + self.col = ::std::option::Option::Some(v); } - // string func = 4; + // optional string func = 4; pub fn get_func(&self) -> &str { - &self.func + match self.func.as_ref() { + Some(v) => &v, + None => "", + } } pub fn clear_func(&mut self) { self.func.clear(); } + pub fn has_func(&self) -> bool { + self.func.is_some() + } + // Param is passed by value, moved pub fn set_func(&mut self, v: ::std::string::String) { - self.func = v; + self.func = ::protobuf::SingularField::some(v); } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_func(&mut self) -> &mut ::std::string::String { - &mut self.func + if self.func.is_none() { + self.func.set_default(); + } + self.func.as_mut().unwrap() } // Take field pub fn take_func(&mut self) -> ::std::string::String { - ::std::mem::replace(&mut self.func, ::std::string::String::new()) + self.func.take().unwrap_or_else(|| ::std::string::String::new()) } - // string code = 5; + // optional string code = 5; pub fn get_code(&self) -> &str { - &self.code + match self.code.as_ref() { + Some(v) => &v, + None => "", + } } pub fn clear_code(&mut self) { self.code.clear(); } + pub fn has_code(&self) -> bool { + self.code.is_some() + } + // Param is passed by value, moved pub fn set_code(&mut self, v: ::std::string::String) { - self.code = v; + self.code = ::protobuf::SingularField::some(v); } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_code(&mut self) -> &mut ::std::string::String { - &mut self.code + if self.code.is_none() { + self.code.set_default(); + } + self.code.as_mut().unwrap() } // Take field pub fn take_code(&mut self) -> ::std::string::String { - ::std::mem::replace(&mut self.code, ::std::string::String::new()) + self.code.take().unwrap_or_else(|| ::std::string::String::new()) } } @@ -354,27 +497,27 @@ impl ::protobuf::Message for GraphDebugInfo_FileLineCol { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; - self.file_index = tmp; + self.file_index = ::std::option::Option::Some(tmp); }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; - self.line = tmp; + self.line = ::std::option::Option::Some(tmp); }, 3 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; - self.col = tmp; + self.col = ::std::option::Option::Some(tmp); }, 4 => { - ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.func)?; + ::protobuf::rt::read_singular_string_into(wire_type, is, &mut self.func)?; }, 5 => { - ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.code)?; + ::protobuf::rt::read_singular_string_into(wire_type, is, &mut self.code)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; @@ -388,20 +531,20 @@ impl ::protobuf::Message for GraphDebugInfo_FileLineCol { #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; - if self.file_index != 0 { - my_size += ::protobuf::rt::value_size(1, self.file_index, ::protobuf::wire_format::WireTypeVarint); + if let Some(v) = self.file_index { + my_size += ::protobuf::rt::value_size(1, v, ::protobuf::wire_format::WireTypeVarint); } - if self.line != 0 { - my_size += ::protobuf::rt::value_size(2, self.line, ::protobuf::wire_format::WireTypeVarint); + if let Some(v) = self.line { + my_size += ::protobuf::rt::value_size(2, v, ::protobuf::wire_format::WireTypeVarint); } - if self.col != 0 { - my_size += ::protobuf::rt::value_size(3, self.col, ::protobuf::wire_format::WireTypeVarint); + if let Some(v) = self.col { + my_size += ::protobuf::rt::value_size(3, v, ::protobuf::wire_format::WireTypeVarint); } - if !self.func.is_empty() { - my_size += ::protobuf::rt::string_size(4, &self.func); + if let Some(ref v) = self.func.as_ref() { + my_size += ::protobuf::rt::string_size(4, &v); } - if !self.code.is_empty() { - my_size += ::protobuf::rt::string_size(5, &self.code); + if let Some(ref v) = self.code.as_ref() { + my_size += ::protobuf::rt::string_size(5, &v); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); @@ -409,20 +552,20 @@ impl ::protobuf::Message for GraphDebugInfo_FileLineCol { } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { - if self.file_index != 0 { - os.write_int32(1, self.file_index)?; + if let Some(v) = self.file_index { + os.write_int32(1, v)?; } - if self.line != 0 { - os.write_int32(2, self.line)?; + if let Some(v) = self.line { + os.write_int32(2, v)?; } - if self.col != 0 { - os.write_int32(3, self.col)?; + if let Some(v) = self.col { + os.write_int32(3, v)?; } - if !self.func.is_empty() { - os.write_string(4, &self.func)?; + if let Some(ref v) = self.func.as_ref() { + os.write_string(4, &v)?; } - if !self.code.is_empty() { - os.write_string(5, &self.code)?; + if let Some(ref v) = self.code.as_ref() { + os.write_string(5, &v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) @@ -462,27 +605,27 @@ impl ::protobuf::Message for GraphDebugInfo_FileLineCol { static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); - fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "file_index", |m: &GraphDebugInfo_FileLineCol| { &m.file_index }, |m: &mut GraphDebugInfo_FileLineCol| { &mut m.file_index }, )); - fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "line", |m: &GraphDebugInfo_FileLineCol| { &m.line }, |m: &mut GraphDebugInfo_FileLineCol| { &mut m.line }, )); - fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( + fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "col", |m: &GraphDebugInfo_FileLineCol| { &m.col }, |m: &mut GraphDebugInfo_FileLineCol| { &mut m.col }, )); - fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + fields.push(::protobuf::reflect::accessor::make_singular_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( "func", |m: &GraphDebugInfo_FileLineCol| { &m.func }, |m: &mut GraphDebugInfo_FileLineCol| { &mut m.func }, )); - fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + fields.push(::protobuf::reflect::accessor::make_singular_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( "code", |m: &GraphDebugInfo_FileLineCol| { &m.code }, |m: &mut GraphDebugInfo_FileLineCol| { &mut m.code }, @@ -503,9 +646,9 @@ impl ::protobuf::Message for GraphDebugInfo_FileLineCol { impl ::protobuf::Clear for GraphDebugInfo_FileLineCol { fn clear(&mut self) { - self.file_index = 0; - self.line = 0; - self.col = 0; + self.file_index = ::std::option::Option::None; + self.line = ::std::option::Option::None; + self.col = ::std::option::Option::None; self.func.clear(); self.code.clear(); self.unknown_fields.clear(); @@ -528,6 +671,7 @@ impl ::protobuf::reflect::ProtobufValue for GraphDebugInfo_FileLineCol { pub struct GraphDebugInfo_StackTrace { // message fields pub file_line_cols: ::protobuf::RepeatedField, + pub frame_id: ::std::vec::Vec, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -568,6 +712,31 @@ impl GraphDebugInfo_StackTrace { pub fn take_file_line_cols(&mut self) -> ::protobuf::RepeatedField { ::std::mem::replace(&mut self.file_line_cols, ::protobuf::RepeatedField::new()) } + + // repeated fixed64 frame_id = 2; + + + pub fn get_frame_id(&self) -> &[u64] { + &self.frame_id + } + pub fn clear_frame_id(&mut self) { + self.frame_id.clear(); + } + + // Param is passed by value, moved + pub fn set_frame_id(&mut self, v: ::std::vec::Vec) { + self.frame_id = v; + } + + // Mutable pointer to the field. + pub fn mut_frame_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.frame_id + } + + // Take field + pub fn take_frame_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.frame_id, ::std::vec::Vec::new()) + } } impl ::protobuf::Message for GraphDebugInfo_StackTrace { @@ -587,6 +756,9 @@ impl ::protobuf::Message for GraphDebugInfo_StackTrace { 1 => { ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.file_line_cols)?; }, + 2 => { + ::protobuf::rt::read_repeated_fixed64_into(wire_type, is, &mut self.frame_id)?; + }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, @@ -603,6 +775,9 @@ impl ::protobuf::Message for GraphDebugInfo_StackTrace { let len = value.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }; + if !self.frame_id.is_empty() { + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size((self.frame_id.len() * 8) as u32) + (self.frame_id.len() * 8) as u32; + } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size @@ -614,6 +789,14 @@ impl ::protobuf::Message for GraphDebugInfo_StackTrace { os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }; + if !self.frame_id.is_empty() { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + // TODO: Data size is computed again, it should be cached + os.write_raw_varint32((self.frame_id.len() * 8) as u32)?; + for v in &self.frame_id { + os.write_fixed64_no_tag(*v)?; + }; + } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } @@ -657,6 +840,11 @@ impl ::protobuf::Message for GraphDebugInfo_StackTrace { |m: &GraphDebugInfo_StackTrace| { &m.file_line_cols }, |m: &mut GraphDebugInfo_StackTrace| { &mut m.file_line_cols }, )); + fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeFixed64>( + "frame_id", + |m: &GraphDebugInfo_StackTrace| { &m.frame_id }, + |m: &mut GraphDebugInfo_StackTrace| { &mut m.frame_id }, + )); ::protobuf::reflect::MessageDescriptor::new_pb_name::( "GraphDebugInfo.StackTrace", fields, @@ -674,6 +862,7 @@ impl ::protobuf::Message for GraphDebugInfo_StackTrace { impl ::protobuf::Clear for GraphDebugInfo_StackTrace { fn clear(&mut self) { self.file_line_cols.clear(); + self.frame_id.clear(); self.unknown_fields.clear(); } } @@ -692,19 +881,31 @@ impl ::protobuf::reflect::ProtobufValue for GraphDebugInfo_StackTrace { static file_descriptor_proto_data: &'static [u8] = b"\ \n0tensorflow/core/framework/graph_debug_info.proto\x12\ntensorflow\"\ - \xa0\x03\n\x0eGraphDebugInfo\x12\x14\n\x05files\x18\x01\x20\x03(\tR\x05f\ - iles\x12>\n\x06traces\x18\x02\x20\x03(\x0b2&.tensorflow.GraphDebugInfo.T\ - racesEntryR\x06traces\x1az\n\x0bFileLineCol\x12\x1d\n\nfile_index\x18\ - \x01\x20\x01(\x05R\tfileIndex\x12\x12\n\x04line\x18\x02\x20\x01(\x05R\ - \x04line\x12\x10\n\x03col\x18\x03\x20\x01(\x05R\x03col\x12\x12\n\x04func\ - \x18\x04\x20\x01(\tR\x04func\x12\x12\n\x04code\x18\x05\x20\x01(\tR\x04co\ - de\x1aZ\n\nStackTrace\x12L\n\x0efile_line_cols\x18\x01\x20\x03(\x0b2&.te\ - nsorflow.GraphDebugInfo.FileLineColR\x0cfileLineCols\x1a`\n\x0bTracesEnt\ - ry\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12;\n\x05value\x18\x02\ - \x20\x01(\x0b2%.tensorflow.GraphDebugInfo.StackTraceR\x05value:\x028\x01\ - B\x8c\x01\n\x18org.tensorflow.frameworkB\x14GraphDebugInfoProtosP\x01ZUg\ - ithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_pro\ - tos_go_proto\xf8\x01\x01b\x06proto3\ + \xc2\x07\n\x0eGraphDebugInfo\x12\x14\n\x05files\x18\x01\x20\x03(\tR\x05f\ + iles\x12L\n\x0cframes_by_id\x18\x04\x20\x03(\x0b2*.tensorflow.GraphDebug\ + Info.FramesByIdEntryR\nframesById\x12L\n\x0ctraces_by_id\x18\x06\x20\x03\ + (\x0b2*.tensorflow.GraphDebugInfo.TracesByIdEntryR\ntracesById\x12>\n\ + \x06traces\x18\x02\x20\x03(\x0b2&.tensorflow.GraphDebugInfo.TracesEntryR\ + \x06traces\x12V\n\x10name_to_trace_id\x18\x05\x20\x03(\x0b2-.tensorflow.\ + GraphDebugInfo.NameToTraceIdEntryR\rnameToTraceId\x1az\n\x0bFileLineCol\ + \x12\x1d\n\nfile_index\x18\x01\x20\x01(\x05R\tfileIndex\x12\x12\n\x04lin\ + e\x18\x02\x20\x01(\x05R\x04line\x12\x10\n\x03col\x18\x03\x20\x01(\x05R\ + \x03col\x12\x12\n\x04func\x18\x04\x20\x01(\tR\x04func\x12\x12\n\x04code\ + \x18\x05\x20\x01(\tR\x04code\x1ay\n\nStackTrace\x12L\n\x0efile_line_cols\ + \x18\x01\x20\x03(\x0b2&.tensorflow.GraphDebugInfo.FileLineColR\x0cfileLi\ + neCols\x12\x1d\n\x08frame_id\x18\x02\x20\x03(\x06R\x07frameIdB\x02\x10\ + \x01\x1ae\n\x0fFramesByIdEntry\x12\x10\n\x03key\x18\x01\x20\x01(\x06R\ + \x03key\x12<\n\x05value\x18\x02\x20\x01(\x0b2&.tensorflow.GraphDebugInfo\ + .FileLineColR\x05value:\x028\x01\x1ad\n\x0fTracesByIdEntry\x12\x10\n\x03\ + key\x18\x01\x20\x01(\x06R\x03key\x12;\n\x05value\x18\x02\x20\x01(\x0b2%.\ + tensorflow.GraphDebugInfo.StackTraceR\x05value:\x028\x01\x1a`\n\x0bTrace\ + sEntry\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12;\n\x05value\x18\ + \x02\x20\x01(\x0b2%.tensorflow.GraphDebugInfo.StackTraceR\x05value:\x028\ + \x01\x1a@\n\x12NameToTraceIdEntry\x12\x10\n\x03key\x18\x01\x20\x01(\tR\ + \x03key\x12\x14\n\x05value\x18\x02\x20\x01(\x06R\x05value:\x028\x01B\x8c\ + \x01\n\x18org.tensorflow.frameworkB\x14GraphDebugInfoProtosP\x01ZUgithub\ + .com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_g\ + o_proto\xf8\x01\x01\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/src/protos/meta_graph.rs b/src/protos/meta_graph.rs index 79106b577e..4a3ede5a12 100644 --- a/src/protos/meta_graph.rs +++ b/src/protos/meta_graph.rs @@ -3190,6 +3190,7 @@ pub struct SignatureDef { pub inputs: ::std::collections::HashMap<::std::string::String, TensorInfo>, pub outputs: ::std::collections::HashMap<::std::string::String, TensorInfo>, pub method_name: ::std::string::String, + pub defaults: ::std::collections::HashMap<::std::string::String, super::tensor::TensorProto>, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -3281,6 +3282,31 @@ impl SignatureDef { pub fn take_method_name(&mut self) -> ::std::string::String { ::std::mem::replace(&mut self.method_name, ::std::string::String::new()) } + + // repeated .tensorflow.SignatureDef.DefaultsEntry defaults = 4; + + + pub fn get_defaults(&self) -> &::std::collections::HashMap<::std::string::String, super::tensor::TensorProto> { + &self.defaults + } + pub fn clear_defaults(&mut self) { + self.defaults.clear(); + } + + // Param is passed by value, moved + pub fn set_defaults(&mut self, v: ::std::collections::HashMap<::std::string::String, super::tensor::TensorProto>) { + self.defaults = v; + } + + // Mutable pointer to the field. + pub fn mut_defaults(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, super::tensor::TensorProto> { + &mut self.defaults + } + + // Take field + pub fn take_defaults(&mut self) -> ::std::collections::HashMap<::std::string::String, super::tensor::TensorProto> { + ::std::mem::replace(&mut self.defaults, ::std::collections::HashMap::new()) + } } impl ::protobuf::Message for SignatureDef { @@ -3301,6 +3327,9 @@ impl ::protobuf::Message for SignatureDef { 3 => { ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.method_name)?; }, + 4 => { + ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(wire_type, is, &mut self.defaults)?; + }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, @@ -3318,6 +3347,7 @@ impl ::protobuf::Message for SignatureDef { if !self.method_name.is_empty() { my_size += ::protobuf::rt::string_size(3, &self.method_name); } + my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(4, &self.defaults); my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size @@ -3329,6 +3359,7 @@ impl ::protobuf::Message for SignatureDef { if !self.method_name.is_empty() { os.write_string(3, &self.method_name)?; } + ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>(4, &self.defaults, os)?; os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } @@ -3382,6 +3413,11 @@ impl ::protobuf::Message for SignatureDef { |m: &SignatureDef| { &m.method_name }, |m: &mut SignatureDef| { &mut m.method_name }, )); + fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage>( + "defaults", + |m: &SignatureDef| { &m.defaults }, + |m: &mut SignatureDef| { &mut m.defaults }, + )); ::protobuf::reflect::MessageDescriptor::new_pb_name::( "SignatureDef", fields, @@ -3401,6 +3437,7 @@ impl ::protobuf::Clear for SignatureDef { self.inputs.clear(); self.outputs.clear(); self.method_name.clear(); + self.defaults.clear(); self.unknown_fields.clear(); } } @@ -3636,73 +3673,77 @@ impl ::protobuf::reflect::ProtobufValue for AssetFileDef { static file_descriptor_proto_data: &'static [u8] = b"\ \n)tensorflow/core/protobuf/meta_graph.proto\x12\ntensorflow\x1a\x19goog\ le/protobuf/any.proto\x1a%tensorflow/core/framework/graph.proto\x1a&tens\ - orflow/core/framework/op_def.proto\x1a,tensorflow/core/framework/tensor_\ - shape.proto\x1a%tensorflow/core/framework/types.proto\x1a1tensorflow/cor\ - e/protobuf/saved_object_graph.proto\x1a$tensorflow/core/protobuf/saver.p\ - roto\x1a%tensorflow/core/protobuf/struct.proto\"\xa9\t\n\x0cMetaGraphDef\ - \x12H\n\rmeta_info_def\x18\x01\x20\x01(\x0b2$.tensorflow.MetaGraphDef.Me\ - taInfoDefR\x0bmetaInfoDef\x121\n\tgraph_def\x18\x02\x20\x01(\x0b2\x14.te\ - nsorflow.GraphDefR\x08graphDef\x121\n\tsaver_def\x18\x03\x20\x01(\x0b2\ - \x14.tensorflow.SaverDefR\x08saverDef\x12R\n\x0ecollection_def\x18\x04\ - \x20\x03(\x0b2+.tensorflow.MetaGraphDef.CollectionDefEntryR\rcollectionD\ - ef\x12O\n\rsignature_def\x18\x05\x20\x03(\x0b2*.tensorflow.MetaGraphDef.\ - SignatureDefEntryR\x0csignatureDef\x12>\n\x0easset_file_def\x18\x06\x20\ - \x03(\x0b2\x18.tensorflow.AssetFileDefR\x0cassetFileDef\x12F\n\x10object\ - _graph_def\x18\x07\x20\x01(\x0b2\x1c.tensorflow.SavedObjectGraphR\x0eobj\ - ectGraphDef\x1a\x83\x04\n\x0bMetaInfoDef\x12,\n\x12meta_graph_version\ - \x18\x01\x20\x01(\tR\x10metaGraphVersion\x12<\n\x10stripped_op_list\x18\ - \x02\x20\x01(\x0b2\x12.tensorflow.OpListR\x0estrippedOpList\x12/\n\x08an\ - y_info\x18\x03\x20\x01(\x0b2\x14.google.protobuf.AnyR\x07anyInfo\x12\x12\ - \n\x04tags\x18\x04\x20\x03(\tR\x04tags\x12-\n\x12tensorflow_version\x18\ - \x05\x20\x01(\tR\x11tensorflowVersion\x124\n\x16tensorflow_git_version\ - \x18\x06\x20\x01(\tR\x14tensorflowGitVersion\x124\n\x16stripped_default_\ - attrs\x18\x07\x20\x01(\x08R\x14strippedDefaultAttrs\x12d\n\x10function_a\ - liases\x18\x08\x20\x03(\x0b29.tensorflow.MetaGraphDef.MetaInfoDef.Functi\ - onAliasesEntryR\x0ffunctionAliases\x1aB\n\x14FunctionAliasesEntry\x12\ - \x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\ - \x01(\tR\x05value:\x028\x01\x1a[\n\x12CollectionDefEntry\x12\x10\n\x03ke\ - y\x18\x01\x20\x01(\tR\x03key\x12/\n\x05value\x18\x02\x20\x01(\x0b2\x19.t\ - ensorflow.CollectionDefR\x05value:\x028\x01\x1aY\n\x11SignatureDefEntry\ - \x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12.\n\x05value\x18\x02\x20\ - \x01(\x0b2\x18.tensorflow.SignatureDefR\x05value:\x028\x01\"\xb6\x04\n\r\ - CollectionDef\x12A\n\tnode_list\x18\x01\x20\x01(\x0b2\".tensorflow.Colle\ - ctionDef.NodeListH\0R\x08nodeList\x12D\n\nbytes_list\x18\x02\x20\x01(\ - \x0b2#.tensorflow.CollectionDef.BytesListH\0R\tbytesList\x12D\n\nint64_l\ - ist\x18\x03\x20\x01(\x0b2#.tensorflow.CollectionDef.Int64ListH\0R\tint64\ - List\x12D\n\nfloat_list\x18\x04\x20\x01(\x0b2#.tensorflow.CollectionDef.\ - FloatListH\0R\tfloatList\x12>\n\x08any_list\x18\x05\x20\x01(\x0b2!.tenso\ - rflow.CollectionDef.AnyListH\0R\x07anyList\x1a\x20\n\x08NodeList\x12\x14\ - \n\x05value\x18\x01\x20\x03(\tR\x05value\x1a!\n\tBytesList\x12\x14\n\x05\ - value\x18\x01\x20\x03(\x0cR\x05value\x1a%\n\tInt64List\x12\x18\n\x05valu\ - e\x18\x01\x20\x03(\x03R\x05valueB\x02\x10\x01\x1a%\n\tFloatList\x12\x18\ - \n\x05value\x18\x01\x20\x03(\x02R\x05valueB\x02\x10\x01\x1a5\n\x07AnyLis\ - t\x12*\n\x05value\x18\x01\x20\x03(\x0b2\x14.google.protobuf.AnyR\x05valu\ - eB\x06\n\x04kind\"\xda\x04\n\nTensorInfo\x12\x14\n\x04name\x18\x01\x20\ - \x01(\tH\0R\x04name\x12A\n\ncoo_sparse\x18\x04\x20\x01(\x0b2\x20.tensorf\ - low.TensorInfo.CooSparseH\0R\tcooSparse\x12S\n\x10composite_tensor\x18\ - \x05\x20\x01(\x0b2&.tensorflow.TensorInfo.CompositeTensorH\0R\x0fcomposi\ - teTensor\x12*\n\x05dtype\x18\x02\x20\x01(\x0e2\x14.tensorflow.DataTypeR\ - \x05dtype\x12?\n\x0ctensor_shape\x18\x03\x20\x01(\x0b2\x1c.tensorflow.Te\ - nsorShapeProtoR\x0btensorShape\x1a\xa0\x01\n\tCooSparse\x12,\n\x12values\ - _tensor_name\x18\x01\x20\x01(\tR\x10valuesTensorName\x12.\n\x13indices_t\ - ensor_name\x18\x02\x20\x01(\tR\x11indicesTensorName\x125\n\x17dense_shap\ - e_tensor_name\x18\x03\x20\x01(\tR\x14denseShapeTensorName\x1a\x81\x01\n\ - \x0fCompositeTensor\x126\n\ttype_spec\x18\x01\x20\x01(\x0b2\x19.tensorfl\ - ow.TypeSpecProtoR\x08typeSpec\x126\n\ncomponents\x18\x02\x20\x03(\x0b2\ - \x16.tensorflow.TensorInfoR\ncomponentsB\n\n\x08encoding\"\xd5\x02\n\x0c\ - SignatureDef\x12<\n\x06inputs\x18\x01\x20\x03(\x0b2$.tensorflow.Signatur\ - eDef.InputsEntryR\x06inputs\x12?\n\x07outputs\x18\x02\x20\x03(\x0b2%.ten\ - sorflow.SignatureDef.OutputsEntryR\x07outputs\x12\x1f\n\x0bmethod_name\ - \x18\x03\x20\x01(\tR\nmethodName\x1aQ\n\x0bInputsEntry\x12\x10\n\x03key\ - \x18\x01\x20\x01(\tR\x03key\x12,\n\x05value\x18\x02\x20\x01(\x0b2\x16.te\ - nsorflow.TensorInfoR\x05value:\x028\x01\x1aR\n\x0cOutputsEntry\x12\x10\n\ - \x03key\x18\x01\x20\x01(\tR\x03key\x12,\n\x05value\x18\x02\x20\x01(\x0b2\ - \x16.tensorflow.TensorInfoR\x05value:\x028\x01\"c\n\x0cAssetFileDef\x127\ - \n\x0btensor_info\x18\x01\x20\x01(\x0b2\x16.tensorflow.TensorInfoR\ntens\ - orInfo\x12\x1a\n\x08filename\x18\x02\x20\x01(\tR\x08filenameB\x87\x01\n\ - \x18org.tensorflow.frameworkB\x0fMetaGraphProtosP\x01ZUgithub.com/tensor\ - flow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\ - \x01\x01b\x06proto3\ + orflow/core/framework/op_def.proto\x1a&tensorflow/core/framework/tensor.\ + proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/co\ + re/framework/types.proto\x1a1tensorflow/core/protobuf/saved_object_graph\ + .proto\x1a$tensorflow/core/protobuf/saver.proto\x1a%tensorflow/core/prot\ + obuf/struct.proto\"\xa9\t\n\x0cMetaGraphDef\x12H\n\rmeta_info_def\x18\ + \x01\x20\x01(\x0b2$.tensorflow.MetaGraphDef.MetaInfoDefR\x0bmetaInfoDef\ + \x121\n\tgraph_def\x18\x02\x20\x01(\x0b2\x14.tensorflow.GraphDefR\x08gra\ + phDef\x121\n\tsaver_def\x18\x03\x20\x01(\x0b2\x14.tensorflow.SaverDefR\ + \x08saverDef\x12R\n\x0ecollection_def\x18\x04\x20\x03(\x0b2+.tensorflow.\ + MetaGraphDef.CollectionDefEntryR\rcollectionDef\x12O\n\rsignature_def\ + \x18\x05\x20\x03(\x0b2*.tensorflow.MetaGraphDef.SignatureDefEntryR\x0csi\ + gnatureDef\x12>\n\x0easset_file_def\x18\x06\x20\x03(\x0b2\x18.tensorflow\ + .AssetFileDefR\x0cassetFileDef\x12F\n\x10object_graph_def\x18\x07\x20\ + \x01(\x0b2\x1c.tensorflow.SavedObjectGraphR\x0eobjectGraphDef\x1a\x83\ + \x04\n\x0bMetaInfoDef\x12,\n\x12meta_graph_version\x18\x01\x20\x01(\tR\ + \x10metaGraphVersion\x12<\n\x10stripped_op_list\x18\x02\x20\x01(\x0b2\ + \x12.tensorflow.OpListR\x0estrippedOpList\x12/\n\x08any_info\x18\x03\x20\ + \x01(\x0b2\x14.google.protobuf.AnyR\x07anyInfo\x12\x12\n\x04tags\x18\x04\ + \x20\x03(\tR\x04tags\x12-\n\x12tensorflow_version\x18\x05\x20\x01(\tR\ + \x11tensorflowVersion\x124\n\x16tensorflow_git_version\x18\x06\x20\x01(\ + \tR\x14tensorflowGitVersion\x124\n\x16stripped_default_attrs\x18\x07\x20\ + \x01(\x08R\x14strippedDefaultAttrs\x12d\n\x10function_aliases\x18\x08\ + \x20\x03(\x0b29.tensorflow.MetaGraphDef.MetaInfoDef.FunctionAliasesEntry\ + R\x0ffunctionAliases\x1aB\n\x14FunctionAliasesEntry\x12\x10\n\x03key\x18\ + \x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\x01(\tR\x05value:\ + \x028\x01\x1a[\n\x12CollectionDefEntry\x12\x10\n\x03key\x18\x01\x20\x01(\ + \tR\x03key\x12/\n\x05value\x18\x02\x20\x01(\x0b2\x19.tensorflow.Collecti\ + onDefR\x05value:\x028\x01\x1aY\n\x11SignatureDefEntry\x12\x10\n\x03key\ + \x18\x01\x20\x01(\tR\x03key\x12.\n\x05value\x18\x02\x20\x01(\x0b2\x18.te\ + nsorflow.SignatureDefR\x05value:\x028\x01\"\xb6\x04\n\rCollectionDef\x12\ + A\n\tnode_list\x18\x01\x20\x01(\x0b2\".tensorflow.CollectionDef.NodeList\ + H\0R\x08nodeList\x12D\n\nbytes_list\x18\x02\x20\x01(\x0b2#.tensorflow.Co\ + llectionDef.BytesListH\0R\tbytesList\x12D\n\nint64_list\x18\x03\x20\x01(\ + \x0b2#.tensorflow.CollectionDef.Int64ListH\0R\tint64List\x12D\n\nfloat_l\ + ist\x18\x04\x20\x01(\x0b2#.tensorflow.CollectionDef.FloatListH\0R\tfloat\ + List\x12>\n\x08any_list\x18\x05\x20\x01(\x0b2!.tensorflow.CollectionDef.\ + AnyListH\0R\x07anyList\x1a\x20\n\x08NodeList\x12\x14\n\x05value\x18\x01\ + \x20\x03(\tR\x05value\x1a!\n\tBytesList\x12\x14\n\x05value\x18\x01\x20\ + \x03(\x0cR\x05value\x1a%\n\tInt64List\x12\x18\n\x05value\x18\x01\x20\x03\ + (\x03R\x05valueB\x02\x10\x01\x1a%\n\tFloatList\x12\x18\n\x05value\x18\ + \x01\x20\x03(\x02R\x05valueB\x02\x10\x01\x1a5\n\x07AnyList\x12*\n\x05val\ + ue\x18\x01\x20\x03(\x0b2\x14.google.protobuf.AnyR\x05valueB\x06\n\x04kin\ + d\"\xda\x04\n\nTensorInfo\x12\x14\n\x04name\x18\x01\x20\x01(\tH\0R\x04na\ + me\x12A\n\ncoo_sparse\x18\x04\x20\x01(\x0b2\x20.tensorflow.TensorInfo.Co\ + oSparseH\0R\tcooSparse\x12S\n\x10composite_tensor\x18\x05\x20\x01(\x0b2&\ + .tensorflow.TensorInfo.CompositeTensorH\0R\x0fcompositeTensor\x12*\n\x05\ + dtype\x18\x02\x20\x01(\x0e2\x14.tensorflow.DataTypeR\x05dtype\x12?\n\x0c\ + tensor_shape\x18\x03\x20\x01(\x0b2\x1c.tensorflow.TensorShapeProtoR\x0bt\ + ensorShape\x1a\xa0\x01\n\tCooSparse\x12,\n\x12values_tensor_name\x18\x01\ + \x20\x01(\tR\x10valuesTensorName\x12.\n\x13indices_tensor_name\x18\x02\ + \x20\x01(\tR\x11indicesTensorName\x125\n\x17dense_shape_tensor_name\x18\ + \x03\x20\x01(\tR\x14denseShapeTensorName\x1a\x81\x01\n\x0fCompositeTenso\ + r\x126\n\ttype_spec\x18\x01\x20\x01(\x0b2\x19.tensorflow.TypeSpecProtoR\ + \x08typeSpec\x126\n\ncomponents\x18\x02\x20\x03(\x0b2\x16.tensorflow.Ten\ + sorInfoR\ncomponentsB\n\n\x08encoding\"\xef\x03\n\x0cSignatureDef\x12<\n\ + \x06inputs\x18\x01\x20\x03(\x0b2$.tensorflow.SignatureDef.InputsEntryR\ + \x06inputs\x12?\n\x07outputs\x18\x02\x20\x03(\x0b2%.tensorflow.Signature\ + Def.OutputsEntryR\x07outputs\x12\x1f\n\x0bmethod_name\x18\x03\x20\x01(\t\ + R\nmethodName\x12B\n\x08defaults\x18\x04\x20\x03(\x0b2&.tensorflow.Signa\ + tureDef.DefaultsEntryR\x08defaults\x1aQ\n\x0bInputsEntry\x12\x10\n\x03ke\ + y\x18\x01\x20\x01(\tR\x03key\x12,\n\x05value\x18\x02\x20\x01(\x0b2\x16.t\ + ensorflow.TensorInfoR\x05value:\x028\x01\x1aR\n\x0cOutputsEntry\x12\x10\ + \n\x03key\x18\x01\x20\x01(\tR\x03key\x12,\n\x05value\x18\x02\x20\x01(\ + \x0b2\x16.tensorflow.TensorInfoR\x05value:\x028\x01\x1aT\n\rDefaultsEntr\ + y\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12-\n\x05value\x18\x02\ + \x20\x01(\x0b2\x17.tensorflow.TensorProtoR\x05value:\x028\x01\"c\n\x0cAs\ + setFileDef\x127\n\x0btensor_info\x18\x01\x20\x01(\x0b2\x16.tensorflow.Te\ + nsorInfoR\ntensorInfo\x12\x1a\n\x08filename\x18\x02\x20\x01(\tR\x08filen\ + ameB\x87\x01\n\x18org.tensorflow.frameworkB\x0fMetaGraphProtosP\x01ZUgit\ + hub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_proto\ + s_go_proto\xf8\x01\x01b\x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/src/protos/rewriter_config.rs b/src/protos/rewriter_config.rs index 60716e5e39..25cb6228af 100644 --- a/src/protos/rewriter_config.rs +++ b/src/protos/rewriter_config.rs @@ -391,6 +391,7 @@ pub struct RewriterConfig { pub auto_mixed_precision_onednn_bfloat16: RewriterConfig_Toggle, pub auto_mixed_precision_cpu: RewriterConfig_Toggle, pub disable_meta_optimizer: bool, + pub disable_tfg_optimizer: bool, pub use_plugin_optimizers: RewriterConfig_Toggle, pub experimental_conditional_code_motion: RewriterConfig_Toggle, pub meta_optimizer_iterations: RewriterConfig_NumIterationsType, @@ -723,6 +724,21 @@ impl RewriterConfig { self.disable_meta_optimizer = v; } + // bool disable_tfg_optimizer = 32; + + + pub fn get_disable_tfg_optimizer(&self) -> bool { + self.disable_tfg_optimizer + } + pub fn clear_disable_tfg_optimizer(&mut self) { + self.disable_tfg_optimizer = false; + } + + // Param is passed by value, moved + pub fn set_disable_tfg_optimizer(&mut self, v: bool) { + self.disable_tfg_optimizer = v; + } + // .tensorflow.RewriterConfig.Toggle use_plugin_optimizers = 28; @@ -1169,6 +1185,13 @@ impl ::protobuf::Message for RewriterConfig { let tmp = is.read_bool()?; self.disable_meta_optimizer = tmp; }, + 32 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_bool()?; + self.disable_tfg_optimizer = tmp; + }, 28 => { ::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.use_plugin_optimizers, 28, &mut self.unknown_fields)? }, @@ -1309,6 +1332,9 @@ impl ::protobuf::Message for RewriterConfig { if self.disable_meta_optimizer != false { my_size += 3; } + if self.disable_tfg_optimizer != false { + my_size += 3; + } if self.use_plugin_optimizers != RewriterConfig_Toggle::DEFAULT { my_size += ::protobuf::rt::enum_size(28, self.use_plugin_optimizers); } @@ -1428,6 +1454,9 @@ impl ::protobuf::Message for RewriterConfig { if self.disable_meta_optimizer != false { os.write_bool(19, self.disable_meta_optimizer)?; } + if self.disable_tfg_optimizer != false { + os.write_bool(32, self.disable_tfg_optimizer)?; + } if self.use_plugin_optimizers != RewriterConfig_Toggle::DEFAULT { os.write_enum(28, ::protobuf::ProtobufEnum::value(&self.use_plugin_optimizers))?; } @@ -1624,6 +1653,11 @@ impl ::protobuf::Message for RewriterConfig { |m: &RewriterConfig| { &m.disable_meta_optimizer }, |m: &mut RewriterConfig| { &mut m.disable_meta_optimizer }, )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>( + "disable_tfg_optimizer", + |m: &RewriterConfig| { &m.disable_tfg_optimizer }, + |m: &mut RewriterConfig| { &mut m.disable_tfg_optimizer }, + )); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum>( "use_plugin_optimizers", |m: &RewriterConfig| { &m.use_plugin_optimizers }, @@ -1740,6 +1774,7 @@ impl ::protobuf::Clear for RewriterConfig { self.auto_mixed_precision_onednn_bfloat16 = RewriterConfig_Toggle::DEFAULT; self.auto_mixed_precision_cpu = RewriterConfig_Toggle::DEFAULT; self.disable_meta_optimizer = false; + self.disable_tfg_optimizer = false; self.use_plugin_optimizers = RewriterConfig_Toggle::DEFAULT; self.experimental_conditional_code_motion = RewriterConfig_Toggle::DEFAULT; self.meta_optimizer_iterations = RewriterConfig_NumIterationsType::DEFAULT_NUM_ITERS; @@ -2207,7 +2242,7 @@ static file_descriptor_proto_data: &'static [u8] = b"\ rifier_config.proto\"P\n\x13AutoParallelOptions\x12\x16\n\x06enable\x18\ \x01\x20\x01(\x08R\x06enable\x12!\n\x0cnum_replicas\x18\x02\x20\x01(\x05\ R\x0bnumReplicas\"5\n\x16ScopedAllocatorOptions\x12\x1b\n\tenable_op\x18\ - \x01\x20\x03(\tR\x08enableOp\"\xf0\x1c\n\x0eRewriterConfig\x12X\n\x15cpu\ + \x01\x20\x03(\tR\x08enableOp\"\xa4\x1d\n\x0eRewriterConfig\x12X\n\x15cpu\ _layout_conversion\x182\x20\x01(\x0e2$.tensorflow.RewriterConfig.CpuLayo\ utR\x13cpuLayoutConversion\x12L\n\x10layout_optimizer\x18\x01\x20\x01(\ \x0e2!.tensorflow.RewriterConfig.ToggleR\x0flayoutOptimizer\x12L\n\x10co\ @@ -2238,32 +2273,33 @@ static file_descriptor_proto_data: &'static [u8] = b"\ \x20autoMixedPrecisionOnednnBfloat16\x12Z\n\x18auto_mixed_precision_cpu\ \x18\x1d\x20\x01(\x0e2!.tensorflow.RewriterConfig.ToggleR\x15autoMixedPr\ ecisionCpu\x124\n\x16disable_meta_optimizer\x18\x13\x20\x01(\x08R\x14dis\ - ableMetaOptimizer\x12U\n\x15use_plugin_optimizers\x18\x1c\x20\x01(\x0e2!\ - .tensorflow.RewriterConfig.ToggleR\x13usePluginOptimizers\x12r\n$experim\ - ental_conditional_code_motion\x18\x1e\x20\x01(\x0e2!.tensorflow.Rewriter\ - Config.ToggleR!experimentalConditionalCodeMotion\x12h\n\x19meta_optimize\ - r_iterations\x18\x0c\x20\x01(\x0e2,.tensorflow.RewriterConfig.NumIterati\ - onsTypeR\x17metaOptimizerIterations\x12&\n\x0fmin_graph_nodes\x18\x11\ - \x20\x01(\x05R\rminGraphNodes\x12l\n3experimental_disable_compressed_ten\ - sor_optimization\x18\x1a\x20\x01(\x08R/experimentalDisableCompressedTens\ - orOptimization\x12l\n3experimental_disable_folding_quantization_emulatio\ - n\x18\x1b\x20\x01(\x08R/experimentalDisableFoldingQuantizationEmulation\ - \x12V\n\x13memory_optimization\x18\x04\x20\x01(\x0e2%.tensorflow.Rewrite\ - rConfig.MemOptTypeR\x12memoryOptimization\x12S\n'memory_optimizer_target\ - _node_name_scope\x18\x06\x20\x01(\tR\"memoryOptimizerTargetNodeNameScope\ - \x129\n\x19meta_optimizer_timeout_ms\x18\x14\x20\x01(\x03R\x16metaOptimi\ - zerTimeoutMs\x12D\n\rauto_parallel\x18\x05\x20\x01(\x0b2\x1f.tensorflow.\ - AutoParallelOptionsR\x0cautoParallel\x127\n\x18fail_on_optimizer_errors\ - \x18\x15\x20\x01(\x08R\x15failOnOptimizerErrors\x12V\n\x15scoped_allocat\ - or_opts\x18\x10\x20\x01(\x0b2\".tensorflow.ScopedAllocatorOptionsR\x13sc\ - opedAllocatorOpts\x12\x1e\n\noptimizers\x18d\x20\x03(\tR\noptimizers\x12\ - ]\n\x11custom_optimizers\x18\xc8\x01\x20\x03(\x0b2/.tensorflow.RewriterC\ - onfig.CustomGraphOptimizerR\x10customOptimizers\x12b\n\x1finter_optimize\ - r_verifier_config\x18\xac\x02\x20\x01(\x0b2\x1a.tensorflow.VerifierConfi\ - gR\x1cinterOptimizerVerifierConfig\x12f\n!post_optimization_verifier_con\ - fig\x18\xad\x02\x20\x01(\x0b2\x1a.tensorflow.VerifierConfigR\x1epostOpti\ - mizationVerifierConfig\x1a\xea\x01\n\x14CustomGraphOptimizer\x12\x12\n\ - \x04name\x18\x01\x20\x01(\tR\x04name\x12f\n\rparameter_map\x18\x02\x20\ + ableMetaOptimizer\x122\n\x15disable_tfg_optimizer\x18\x20\x20\x01(\x08R\ + \x13disableTfgOptimizer\x12U\n\x15use_plugin_optimizers\x18\x1c\x20\x01(\ + \x0e2!.tensorflow.RewriterConfig.ToggleR\x13usePluginOptimizers\x12r\n$e\ + xperimental_conditional_code_motion\x18\x1e\x20\x01(\x0e2!.tensorflow.Re\ + writerConfig.ToggleR!experimentalConditionalCodeMotion\x12h\n\x19meta_op\ + timizer_iterations\x18\x0c\x20\x01(\x0e2,.tensorflow.RewriterConfig.NumI\ + terationsTypeR\x17metaOptimizerIterations\x12&\n\x0fmin_graph_nodes\x18\ + \x11\x20\x01(\x05R\rminGraphNodes\x12l\n3experimental_disable_compressed\ + _tensor_optimization\x18\x1a\x20\x01(\x08R/experimentalDisableCompressed\ + TensorOptimization\x12l\n3experimental_disable_folding_quantization_emul\ + ation\x18\x1b\x20\x01(\x08R/experimentalDisableFoldingQuantizationEmulat\ + ion\x12V\n\x13memory_optimization\x18\x04\x20\x01(\x0e2%.tensorflow.Rewr\ + iterConfig.MemOptTypeR\x12memoryOptimization\x12S\n'memory_optimizer_tar\ + get_node_name_scope\x18\x06\x20\x01(\tR\"memoryOptimizerTargetNodeNameSc\ + ope\x129\n\x19meta_optimizer_timeout_ms\x18\x14\x20\x01(\x03R\x16metaOpt\ + imizerTimeoutMs\x12D\n\rauto_parallel\x18\x05\x20\x01(\x0b2\x1f.tensorfl\ + ow.AutoParallelOptionsR\x0cautoParallel\x127\n\x18fail_on_optimizer_erro\ + rs\x18\x15\x20\x01(\x08R\x15failOnOptimizerErrors\x12V\n\x15scoped_alloc\ + ator_opts\x18\x10\x20\x01(\x0b2\".tensorflow.ScopedAllocatorOptionsR\x13\ + scopedAllocatorOpts\x12\x1e\n\noptimizers\x18d\x20\x03(\tR\noptimizers\ + \x12]\n\x11custom_optimizers\x18\xc8\x01\x20\x03(\x0b2/.tensorflow.Rewri\ + terConfig.CustomGraphOptimizerR\x10customOptimizers\x12b\n\x1finter_opti\ + mizer_verifier_config\x18\xac\x02\x20\x01(\x0b2\x1a.tensorflow.VerifierC\ + onfigR\x1cinterOptimizerVerifierConfig\x12f\n!post_optimization_verifier\ + _config\x18\xad\x02\x20\x01(\x0b2\x1a.tensorflow.VerifierConfigR\x1epost\ + OptimizationVerifierConfig\x1a\xea\x01\n\x14CustomGraphOptimizer\x12\x12\ + \n\x04name\x18\x01\x20\x01(\tR\x04name\x12f\n\rparameter_map\x18\x02\x20\ \x03(\x0b2A.tensorflow.RewriterConfig.CustomGraphOptimizer.ParameterMapE\ ntryR\x0cparameterMap\x1aV\n\x11ParameterMapEntry\x12\x10\n\x03key\x18\ \x01\x20\x01(\tR\x03key\x12+\n\x05value\x18\x02\x20\x01(\x0b2\x15.tensor\ diff --git a/src/protos/rpc_options.rs b/src/protos/rpc_options.rs index bf957247f1..9ad0d63c99 100644 --- a/src/protos/rpc_options.rs +++ b/src/protos/rpc_options.rs @@ -17,7 +17,7 @@ #![allow(trivial_casts)] #![allow(unused_imports)] #![allow(unused_results)] -//! Generated file from `tensorflow/tsl/protobuf/rpc_options.proto` +//! Generated file from `tsl/protobuf/rpc_options.proto` /// Generated files are compatible only with the same version /// of protobuf runtime. @@ -358,16 +358,15 @@ impl ::protobuf::reflect::ProtobufValue for RPCOptions { } static file_descriptor_proto_data: &'static [u8] = b"\ - \n)tensorflow/tsl/protobuf/rpc_options.proto\x12\ntensorflow\"\xe0\x02\n\ - \nRPCOptions\x12>\n\x1cuse_rpc_for_inprocess_master\x18\x01\x20\x01(\x08\ - R\x18useRpcForInprocessMaster\x123\n\x15compression_algorithm\x18\x02\ - \x20\x01(\tR\x14compressionAlgorithm\x12+\n\x11compression_level\x18\x03\ - \x20\x01(\x05R\x10compressionLevel\x12,\n\x12cache_rpc_response\x18\x04\ - \x20\x01(\x08R\x10cacheRpcResponse\x12K\n\"disable_session_connection_sh\ - aring\x18\x05\x20\x01(\x08R\x1fdisableSessionConnectionSharing\x125\n\ - \x17num_channels_per_target\x18\x06\x20\x01(\x05R\x14numChannelsPerTarge\ - tB@Z>github.com/google/tsl/tsl/go/protobuf/for_core_protos_go_protob\x06\ - proto3\ + \n\x1etsl/protobuf/rpc_options.proto\x12\ntensorflow\"\xe0\x02\n\nRPCOpt\ + ions\x12>\n\x1cuse_rpc_for_inprocess_master\x18\x01\x20\x01(\x08R\x18use\ + RpcForInprocessMaster\x123\n\x15compression_algorithm\x18\x02\x20\x01(\t\ + R\x14compressionAlgorithm\x12+\n\x11compression_level\x18\x03\x20\x01(\ + \x05R\x10compressionLevel\x12,\n\x12cache_rpc_response\x18\x04\x20\x01(\ + \x08R\x10cacheRpcResponse\x12K\n\"disable_session_connection_sharing\x18\ + \x05\x20\x01(\x08R\x1fdisableSessionConnectionSharing\x125\n\x17num_chan\ + nels_per_target\x18\x06\x20\x01(\x05R\x14numChannelsPerTargetB@Z>github.\ + com/google/tsl/tsl/go/protobuf/for_core_protos_go_protob\x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/src/protos/struct_pb.rs b/src/protos/struct_pb.rs index bbda60dc16..efe7c65cd0 100644 --- a/src/protos/struct_pb.rs +++ b/src/protos/struct_pb.rs @@ -55,6 +55,7 @@ pub enum StructuredValue_oneof_kind { dict_value(DictValue), named_tuple_value(NamedTupleValue), tensor_value(super::tensor::TensorProto), + numpy_value(super::tensor::TensorProto), } impl StructuredValue { @@ -700,6 +701,55 @@ impl StructuredValue { super::tensor::TensorProto::new() } } + + // .tensorflow.TensorProto numpy_value = 56; + + + pub fn get_numpy_value(&self) -> &super::tensor::TensorProto { + match self.kind { + ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(ref v)) => v, + _ => ::default_instance(), + } + } + pub fn clear_numpy_value(&mut self) { + self.kind = ::std::option::Option::None; + } + + pub fn has_numpy_value(&self) -> bool { + match self.kind { + ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(..)) => true, + _ => false, + } + } + + // Param is passed by value, moved + pub fn set_numpy_value(&mut self, v: super::tensor::TensorProto) { + self.kind = ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(v)) + } + + // Mutable pointer to the field. + pub fn mut_numpy_value(&mut self) -> &mut super::tensor::TensorProto { + if let ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(_)) = self.kind { + } else { + self.kind = ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(super::tensor::TensorProto::new())); + } + match self.kind { + ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(ref mut v)) => v, + _ => panic!(), + } + } + + // Take field + pub fn take_numpy_value(&mut self) -> super::tensor::TensorProto { + if self.has_numpy_value() { + match self.kind.take() { + ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(v)) => v, + _ => panic!(), + } + } else { + super::tensor::TensorProto::new() + } + } } impl ::protobuf::Message for StructuredValue { @@ -754,6 +804,11 @@ impl ::protobuf::Message for StructuredValue { return false; } } + if let Some(StructuredValue_oneof_kind::numpy_value(ref v)) = self.kind { + if !v.is_initialized() { + return false; + } + } true } @@ -851,6 +906,12 @@ impl ::protobuf::Message for StructuredValue { } self.kind = ::std::option::Option::Some(StructuredValue_oneof_kind::tensor_value(is.read_message()?)); }, + 56 => { + if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + self.kind = ::std::option::Option::Some(StructuredValue_oneof_kind::numpy_value(is.read_message()?)); + }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, @@ -920,6 +981,10 @@ impl ::protobuf::Message for StructuredValue { let len = v.compute_size(); my_size += 2 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, + &StructuredValue_oneof_kind::numpy_value(ref v) => { + let len = v.compute_size(); + my_size += 2 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }, }; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); @@ -995,6 +1060,11 @@ impl ::protobuf::Message for StructuredValue { os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, + &StructuredValue_oneof_kind::numpy_value(ref v) => { + os.write_tag(56, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }, }; } os.write_unknown_fields(self.get_unknown_fields())?; @@ -1110,6 +1180,11 @@ impl ::protobuf::Message for StructuredValue { StructuredValue::has_tensor_value, StructuredValue::get_tensor_value, )); + fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, super::tensor::TensorProto>( + "numpy_value", + StructuredValue::has_numpy_value, + StructuredValue::get_numpy_value, + )); ::protobuf::reflect::MessageDescriptor::new_pb_name::( "StructuredValue", fields, @@ -1141,6 +1216,7 @@ impl ::protobuf::Clear for StructuredValue { self.kind = ::std::option::Option::None; self.kind = ::std::option::Option::None; self.kind = ::std::option::Option::None; + self.kind = ::std::option::Option::None; self.unknown_fields.clear(); } } @@ -3159,7 +3235,7 @@ impl ::protobuf::reflect::ProtobufValue for TypeSpecProto_TypeSpecClass { static file_descriptor_proto_data: &'static [u8] = b"\ \n%tensorflow/core/protobuf/struct.proto\x12\ntensorflow\x1a&tensorflow/\ core/framework/tensor.proto\x1a,tensorflow/core/framework/tensor_shape.p\ - roto\x1a%tensorflow/core/framework/types.proto\"\x9a\x07\n\x0fStructured\ + roto\x1a%tensorflow/core/framework/types.proto\"\xd6\x07\n\x0fStructured\ Value\x126\n\nnone_value\x18\x01\x20\x01(\x0b2\x15.tensorflow.NoneValueH\ \0R\tnoneValue\x12%\n\rfloat64_value\x18\x0b\x20\x01(\x01H\0R\x0cfloat64\ Value\x12!\n\x0bint64_value\x18\x0c\x20\x01(\x12H\0R\nint64Value\x12#\n\ @@ -3177,41 +3253,42 @@ static file_descriptor_proto_data: &'static [u8] = b"\ alue\x126\n\ndict_value\x185\x20\x01(\x0b2\x15.tensorflow.DictValueH\0R\ \tdictValue\x12I\n\x11named_tuple_value\x186\x20\x01(\x0b2\x1b.tensorflo\ w.NamedTupleValueH\0R\x0fnamedTupleValue\x12<\n\x0ctensor_value\x187\x20\ - \x01(\x0b2\x17.tensorflow.TensorProtoH\0R\x0btensorValueB\x06\n\x04kind\ - \"\x0b\n\tNoneValue\"@\n\tListValue\x123\n\x06values\x18\x01\x20\x03(\ - \x0b2\x1b.tensorflow.StructuredValueR\x06values\"A\n\nTupleValue\x123\n\ - \x06values\x18\x01\x20\x03(\x0b2\x1b.tensorflow.StructuredValueR\x06valu\ - es\"\x9e\x01\n\tDictValue\x129\n\x06fields\x18\x01\x20\x03(\x0b2!.tensor\ - flow.DictValue.FieldsEntryR\x06fields\x1aV\n\x0bFieldsEntry\x12\x10\n\ - \x03key\x18\x01\x20\x01(\tR\x03key\x121\n\x05value\x18\x02\x20\x01(\x0b2\ - \x1b.tensorflow.StructuredValueR\x05value:\x028\x01\"P\n\tPairValue\x12\ - \x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x121\n\x05value\x18\x02\x20\x01\ - (\x0b2\x1b.tensorflow.StructuredValueR\x05value\"T\n\x0fNamedTupleValue\ - \x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x12-\n\x06values\x18\x02\ - \x20\x03(\x0b2\x15.tensorflow.PairValueR\x06values\"\x85\x01\n\x0fTensor\ - SpecProto\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x122\n\x05shape\ - \x18\x02\x20\x01(\x0b2\x1c.tensorflow.TensorShapeProtoR\x05shape\x12*\n\ - \x05dtype\x18\x03\x20\x01(\x0e2\x14.tensorflow.DataTypeR\x05dtype\"\xf2\ - \x01\n\x16BoundedTensorSpecProto\x12\x12\n\x04name\x18\x01\x20\x01(\tR\ - \x04name\x122\n\x05shape\x18\x02\x20\x01(\x0b2\x1c.tensorflow.TensorShap\ - eProtoR\x05shape\x12*\n\x05dtype\x18\x03\x20\x01(\x0e2\x14.tensorflow.Da\ - taTypeR\x05dtype\x121\n\x07minimum\x18\x04\x20\x01(\x0b2\x17.tensorflow.\ - TensorProtoR\x07minimum\x121\n\x07maximum\x18\x05\x20\x01(\x0b2\x17.tens\ - orflow.TensorProtoR\x07maximum\"\xb8\x04\n\rTypeSpecProto\x12O\n\x0ftype\ - _spec_class\x18\x01\x20\x01(\x0e2'.tensorflow.TypeSpecProto.TypeSpecClas\ - sR\rtypeSpecClass\x12:\n\ntype_state\x18\x02\x20\x01(\x0b2\x1b.tensorflo\ - w.StructuredValueR\ttypeState\x12/\n\x14type_spec_class_name\x18\x03\x20\ - \x01(\tR\x11typeSpecClassName\x12.\n\x13num_flat_components\x18\x04\x20\ - \x01(\x05R\x11numFlatComponents\"\xb8\x02\n\rTypeSpecClass\x12\x0b\n\x07\ - UNKNOWN\x10\0\x12\x16\n\x12SPARSE_TENSOR_SPEC\x10\x01\x12\x17\n\x13INDEX\ - ED_SLICES_SPEC\x10\x02\x12\x16\n\x12RAGGED_TENSOR_SPEC\x10\x03\x12\x15\n\ - \x11TENSOR_ARRAY_SPEC\x10\x04\x12\x15\n\x11DATA_DATASET_SPEC\x10\x05\x12\ - \x16\n\x12DATA_ITERATOR_SPEC\x10\x06\x12\x11\n\rOPTIONAL_SPEC\x10\x07\ - \x12\x14\n\x10PER_REPLICA_SPEC\x10\x08\x12\x11\n\rVARIABLE_SPEC\x10\t\ - \x12\x16\n\x12ROW_PARTITION_SPEC\x10\n\x12\x18\n\x14REGISTERED_TYPE_SPEC\ - \x10\x0c\x12\x17\n\x13EXTENSION_TYPE_SPEC\x10\r\"\x04\x08\x0b\x10\x0bBWZ\ - Ugithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_p\ - rotos_go_protob\x06proto3\ + \x01(\x0b2\x17.tensorflow.TensorProtoH\0R\x0btensorValue\x12:\n\x0bnumpy\ + _value\x188\x20\x01(\x0b2\x17.tensorflow.TensorProtoH\0R\nnumpyValueB\ + \x06\n\x04kind\"\x0b\n\tNoneValue\"@\n\tListValue\x123\n\x06values\x18\ + \x01\x20\x03(\x0b2\x1b.tensorflow.StructuredValueR\x06values\"A\n\nTuple\ + Value\x123\n\x06values\x18\x01\x20\x03(\x0b2\x1b.tensorflow.StructuredVa\ + lueR\x06values\"\x9e\x01\n\tDictValue\x129\n\x06fields\x18\x01\x20\x03(\ + \x0b2!.tensorflow.DictValue.FieldsEntryR\x06fields\x1aV\n\x0bFieldsEntry\ + \x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x121\n\x05value\x18\x02\x20\ + \x01(\x0b2\x1b.tensorflow.StructuredValueR\x05value:\x028\x01\"P\n\tPair\ + Value\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x121\n\x05value\x18\ + \x02\x20\x01(\x0b2\x1b.tensorflow.StructuredValueR\x05value\"T\n\x0fName\ + dTupleValue\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x12-\n\x06valu\ + es\x18\x02\x20\x03(\x0b2\x15.tensorflow.PairValueR\x06values\"\x85\x01\n\ + \x0fTensorSpecProto\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x122\n\ + \x05shape\x18\x02\x20\x01(\x0b2\x1c.tensorflow.TensorShapeProtoR\x05shap\ + e\x12*\n\x05dtype\x18\x03\x20\x01(\x0e2\x14.tensorflow.DataTypeR\x05dtyp\ + e\"\xf2\x01\n\x16BoundedTensorSpecProto\x12\x12\n\x04name\x18\x01\x20\ + \x01(\tR\x04name\x122\n\x05shape\x18\x02\x20\x01(\x0b2\x1c.tensorflow.Te\ + nsorShapeProtoR\x05shape\x12*\n\x05dtype\x18\x03\x20\x01(\x0e2\x14.tenso\ + rflow.DataTypeR\x05dtype\x121\n\x07minimum\x18\x04\x20\x01(\x0b2\x17.ten\ + sorflow.TensorProtoR\x07minimum\x121\n\x07maximum\x18\x05\x20\x01(\x0b2\ + \x17.tensorflow.TensorProtoR\x07maximum\"\xb8\x04\n\rTypeSpecProto\x12O\ + \n\x0ftype_spec_class\x18\x01\x20\x01(\x0e2'.tensorflow.TypeSpecProto.Ty\ + peSpecClassR\rtypeSpecClass\x12:\n\ntype_state\x18\x02\x20\x01(\x0b2\x1b\ + .tensorflow.StructuredValueR\ttypeState\x12/\n\x14type_spec_class_name\ + \x18\x03\x20\x01(\tR\x11typeSpecClassName\x12.\n\x13num_flat_components\ + \x18\x04\x20\x01(\x05R\x11numFlatComponents\"\xb8\x02\n\rTypeSpecClass\ + \x12\x0b\n\x07UNKNOWN\x10\0\x12\x16\n\x12SPARSE_TENSOR_SPEC\x10\x01\x12\ + \x17\n\x13INDEXED_SLICES_SPEC\x10\x02\x12\x16\n\x12RAGGED_TENSOR_SPEC\ + \x10\x03\x12\x15\n\x11TENSOR_ARRAY_SPEC\x10\x04\x12\x15\n\x11DATA_DATASE\ + T_SPEC\x10\x05\x12\x16\n\x12DATA_ITERATOR_SPEC\x10\x06\x12\x11\n\rOPTION\ + AL_SPEC\x10\x07\x12\x14\n\x10PER_REPLICA_SPEC\x10\x08\x12\x11\n\rVARIABL\ + E_SPEC\x10\t\x12\x16\n\x12ROW_PARTITION_SPEC\x10\n\x12\x18\n\x14REGISTER\ + ED_TYPE_SPEC\x10\x0c\x12\x17\n\x13EXTENSION_TYPE_SPEC\x10\r\"\x04\x08\ + \x0b\x10\x0bBWZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/prot\ + obuf/for_core_protos_go_protob\x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/src/protos/types.rs b/src/protos/types.rs index eb55cddf09..19282f1a37 100644 --- a/src/protos/types.rs +++ b/src/protos/types.rs @@ -199,6 +199,8 @@ pub enum DataType { DT_UINT64 = 23, DT_FLOAT8_E5M2 = 24, DT_FLOAT8_E4M3FN = 25, + DT_INT4 = 29, + DT_UINT4 = 30, DT_FLOAT_REF = 101, DT_DOUBLE_REF = 102, DT_INT32_REF = 103, @@ -224,6 +226,8 @@ pub enum DataType { DT_UINT64_REF = 123, DT_FLOAT8_E5M2_REF = 124, DT_FLOAT8_E4M3FN_REF = 125, + DT_INT4_REF = 129, + DT_UINT4_REF = 130, } impl ::protobuf::ProtobufEnum for DataType { @@ -259,6 +263,8 @@ impl ::protobuf::ProtobufEnum for DataType { 23 => ::std::option::Option::Some(DataType::DT_UINT64), 24 => ::std::option::Option::Some(DataType::DT_FLOAT8_E5M2), 25 => ::std::option::Option::Some(DataType::DT_FLOAT8_E4M3FN), + 29 => ::std::option::Option::Some(DataType::DT_INT4), + 30 => ::std::option::Option::Some(DataType::DT_UINT4), 101 => ::std::option::Option::Some(DataType::DT_FLOAT_REF), 102 => ::std::option::Option::Some(DataType::DT_DOUBLE_REF), 103 => ::std::option::Option::Some(DataType::DT_INT32_REF), @@ -284,6 +290,8 @@ impl ::protobuf::ProtobufEnum for DataType { 123 => ::std::option::Option::Some(DataType::DT_UINT64_REF), 124 => ::std::option::Option::Some(DataType::DT_FLOAT8_E5M2_REF), 125 => ::std::option::Option::Some(DataType::DT_FLOAT8_E4M3FN_REF), + 129 => ::std::option::Option::Some(DataType::DT_INT4_REF), + 130 => ::std::option::Option::Some(DataType::DT_UINT4_REF), _ => ::std::option::Option::None } } @@ -316,6 +324,8 @@ impl ::protobuf::ProtobufEnum for DataType { DataType::DT_UINT64, DataType::DT_FLOAT8_E5M2, DataType::DT_FLOAT8_E4M3FN, + DataType::DT_INT4, + DataType::DT_UINT4, DataType::DT_FLOAT_REF, DataType::DT_DOUBLE_REF, DataType::DT_INT32_REF, @@ -341,6 +351,8 @@ impl ::protobuf::ProtobufEnum for DataType { DataType::DT_UINT64_REF, DataType::DT_FLOAT8_E5M2_REF, DataType::DT_FLOAT8_E4M3FN_REF, + DataType::DT_INT4_REF, + DataType::DT_UINT4_REF, ]; values } @@ -371,7 +383,7 @@ impl ::protobuf::reflect::ProtobufValue for DataType { static file_descriptor_proto_data: &'static [u8] = b"\ \n%tensorflow/core/framework/types.proto\x12\ntensorflow\"C\n\x0fSeriali\ zedDType\x120\n\x08datatype\x18\x01\x20\x01(\x0e2\x14.tensorflow.DataTyp\ - eR\x08datatype*\x86\x07\n\x08DataType\x12\x0e\n\nDT_INVALID\x10\0\x12\ + eR\x08datatype*\xc6\x07\n\x08DataType\x12\x0e\n\nDT_INVALID\x10\0\x12\ \x0c\n\x08DT_FLOAT\x10\x01\x12\r\n\tDT_DOUBLE\x10\x02\x12\x0c\n\x08DT_IN\ T32\x10\x03\x12\x0c\n\x08DT_UINT8\x10\x04\x12\x0c\n\x08DT_INT16\x10\x05\ \x12\x0b\n\x07DT_INT8\x10\x06\x12\r\n\tDT_STRING\x10\x07\x12\x10\n\x0cDT\ @@ -382,20 +394,22 @@ static file_descriptor_proto_data: &'static [u8] = b"\ COMPLEX128\x10\x12\x12\x0b\n\x07DT_HALF\x10\x13\x12\x0f\n\x0bDT_RESOURCE\ \x10\x14\x12\x0e\n\nDT_VARIANT\x10\x15\x12\r\n\tDT_UINT32\x10\x16\x12\r\ \n\tDT_UINT64\x10\x17\x12\x12\n\x0eDT_FLOAT8_E5M2\x10\x18\x12\x14\n\x10D\ - T_FLOAT8_E4M3FN\x10\x19\x12\x10\n\x0cDT_FLOAT_REF\x10e\x12\x11\n\rDT_DOU\ - BLE_REF\x10f\x12\x10\n\x0cDT_INT32_REF\x10g\x12\x10\n\x0cDT_UINT8_REF\ - \x10h\x12\x10\n\x0cDT_INT16_REF\x10i\x12\x0f\n\x0bDT_INT8_REF\x10j\x12\ - \x11\n\rDT_STRING_REF\x10k\x12\x14\n\x10DT_COMPLEX64_REF\x10l\x12\x10\n\ - \x0cDT_INT64_REF\x10m\x12\x0f\n\x0bDT_BOOL_REF\x10n\x12\x10\n\x0cDT_QINT\ - 8_REF\x10o\x12\x11\n\rDT_QUINT8_REF\x10p\x12\x11\n\rDT_QINT32_REF\x10q\ - \x12\x13\n\x0fDT_BFLOAT16_REF\x10r\x12\x11\n\rDT_QINT16_REF\x10s\x12\x12\ - \n\x0eDT_QUINT16_REF\x10t\x12\x11\n\rDT_UINT16_REF\x10u\x12\x15\n\x11DT_\ - COMPLEX128_REF\x10v\x12\x0f\n\x0bDT_HALF_REF\x10w\x12\x13\n\x0fDT_RESOUR\ - CE_REF\x10x\x12\x12\n\x0eDT_VARIANT_REF\x10y\x12\x11\n\rDT_UINT32_REF\ - \x10z\x12\x11\n\rDT_UINT64_REF\x10{\x12\x16\n\x12DT_FLOAT8_E5M2_REF\x10|\ - \x12\x18\n\x14DT_FLOAT8_E4M3FN_REF\x10}Bz\n\x18org.tensorflow.frameworkB\ - \x0bTypesProtosP\x01ZLgithub.com/tensorflow/tensorflow/tensorflow/go/cor\ - e/framework/types_go_proto\xf8\x01\x01b\x06proto3\ + T_FLOAT8_E4M3FN\x10\x19\x12\x0b\n\x07DT_INT4\x10\x1d\x12\x0c\n\x08DT_UIN\ + T4\x10\x1e\x12\x10\n\x0cDT_FLOAT_REF\x10e\x12\x11\n\rDT_DOUBLE_REF\x10f\ + \x12\x10\n\x0cDT_INT32_REF\x10g\x12\x10\n\x0cDT_UINT8_REF\x10h\x12\x10\n\ + \x0cDT_INT16_REF\x10i\x12\x0f\n\x0bDT_INT8_REF\x10j\x12\x11\n\rDT_STRING\ + _REF\x10k\x12\x14\n\x10DT_COMPLEX64_REF\x10l\x12\x10\n\x0cDT_INT64_REF\ + \x10m\x12\x0f\n\x0bDT_BOOL_REF\x10n\x12\x10\n\x0cDT_QINT8_REF\x10o\x12\ + \x11\n\rDT_QUINT8_REF\x10p\x12\x11\n\rDT_QINT32_REF\x10q\x12\x13\n\x0fDT\ + _BFLOAT16_REF\x10r\x12\x11\n\rDT_QINT16_REF\x10s\x12\x12\n\x0eDT_QUINT16\ + _REF\x10t\x12\x11\n\rDT_UINT16_REF\x10u\x12\x15\n\x11DT_COMPLEX128_REF\ + \x10v\x12\x0f\n\x0bDT_HALF_REF\x10w\x12\x13\n\x0fDT_RESOURCE_REF\x10x\ + \x12\x12\n\x0eDT_VARIANT_REF\x10y\x12\x11\n\rDT_UINT32_REF\x10z\x12\x11\ + \n\rDT_UINT64_REF\x10{\x12\x16\n\x12DT_FLOAT8_E5M2_REF\x10|\x12\x18\n\ + \x14DT_FLOAT8_E4M3FN_REF\x10}\x12\x10\n\x0bDT_INT4_REF\x10\x81\x01\x12\ + \x11\n\x0cDT_UINT4_REF\x10\x82\x01Bz\n\x18org.tensorflow.frameworkB\x0bT\ + ypesProtosP\x01ZLgithub.com/tensorflow/tensorflow/tensorflow/go/core/fra\ + mework/types_go_proto\xf8\x01\x01b\x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/tensorflow-op-codegen/src/protos/types.rs b/tensorflow-op-codegen/src/protos/types.rs index eb55cddf09..19282f1a37 100644 --- a/tensorflow-op-codegen/src/protos/types.rs +++ b/tensorflow-op-codegen/src/protos/types.rs @@ -199,6 +199,8 @@ pub enum DataType { DT_UINT64 = 23, DT_FLOAT8_E5M2 = 24, DT_FLOAT8_E4M3FN = 25, + DT_INT4 = 29, + DT_UINT4 = 30, DT_FLOAT_REF = 101, DT_DOUBLE_REF = 102, DT_INT32_REF = 103, @@ -224,6 +226,8 @@ pub enum DataType { DT_UINT64_REF = 123, DT_FLOAT8_E5M2_REF = 124, DT_FLOAT8_E4M3FN_REF = 125, + DT_INT4_REF = 129, + DT_UINT4_REF = 130, } impl ::protobuf::ProtobufEnum for DataType { @@ -259,6 +263,8 @@ impl ::protobuf::ProtobufEnum for DataType { 23 => ::std::option::Option::Some(DataType::DT_UINT64), 24 => ::std::option::Option::Some(DataType::DT_FLOAT8_E5M2), 25 => ::std::option::Option::Some(DataType::DT_FLOAT8_E4M3FN), + 29 => ::std::option::Option::Some(DataType::DT_INT4), + 30 => ::std::option::Option::Some(DataType::DT_UINT4), 101 => ::std::option::Option::Some(DataType::DT_FLOAT_REF), 102 => ::std::option::Option::Some(DataType::DT_DOUBLE_REF), 103 => ::std::option::Option::Some(DataType::DT_INT32_REF), @@ -284,6 +290,8 @@ impl ::protobuf::ProtobufEnum for DataType { 123 => ::std::option::Option::Some(DataType::DT_UINT64_REF), 124 => ::std::option::Option::Some(DataType::DT_FLOAT8_E5M2_REF), 125 => ::std::option::Option::Some(DataType::DT_FLOAT8_E4M3FN_REF), + 129 => ::std::option::Option::Some(DataType::DT_INT4_REF), + 130 => ::std::option::Option::Some(DataType::DT_UINT4_REF), _ => ::std::option::Option::None } } @@ -316,6 +324,8 @@ impl ::protobuf::ProtobufEnum for DataType { DataType::DT_UINT64, DataType::DT_FLOAT8_E5M2, DataType::DT_FLOAT8_E4M3FN, + DataType::DT_INT4, + DataType::DT_UINT4, DataType::DT_FLOAT_REF, DataType::DT_DOUBLE_REF, DataType::DT_INT32_REF, @@ -341,6 +351,8 @@ impl ::protobuf::ProtobufEnum for DataType { DataType::DT_UINT64_REF, DataType::DT_FLOAT8_E5M2_REF, DataType::DT_FLOAT8_E4M3FN_REF, + DataType::DT_INT4_REF, + DataType::DT_UINT4_REF, ]; values } @@ -371,7 +383,7 @@ impl ::protobuf::reflect::ProtobufValue for DataType { static file_descriptor_proto_data: &'static [u8] = b"\ \n%tensorflow/core/framework/types.proto\x12\ntensorflow\"C\n\x0fSeriali\ zedDType\x120\n\x08datatype\x18\x01\x20\x01(\x0e2\x14.tensorflow.DataTyp\ - eR\x08datatype*\x86\x07\n\x08DataType\x12\x0e\n\nDT_INVALID\x10\0\x12\ + eR\x08datatype*\xc6\x07\n\x08DataType\x12\x0e\n\nDT_INVALID\x10\0\x12\ \x0c\n\x08DT_FLOAT\x10\x01\x12\r\n\tDT_DOUBLE\x10\x02\x12\x0c\n\x08DT_IN\ T32\x10\x03\x12\x0c\n\x08DT_UINT8\x10\x04\x12\x0c\n\x08DT_INT16\x10\x05\ \x12\x0b\n\x07DT_INT8\x10\x06\x12\r\n\tDT_STRING\x10\x07\x12\x10\n\x0cDT\ @@ -382,20 +394,22 @@ static file_descriptor_proto_data: &'static [u8] = b"\ COMPLEX128\x10\x12\x12\x0b\n\x07DT_HALF\x10\x13\x12\x0f\n\x0bDT_RESOURCE\ \x10\x14\x12\x0e\n\nDT_VARIANT\x10\x15\x12\r\n\tDT_UINT32\x10\x16\x12\r\ \n\tDT_UINT64\x10\x17\x12\x12\n\x0eDT_FLOAT8_E5M2\x10\x18\x12\x14\n\x10D\ - T_FLOAT8_E4M3FN\x10\x19\x12\x10\n\x0cDT_FLOAT_REF\x10e\x12\x11\n\rDT_DOU\ - BLE_REF\x10f\x12\x10\n\x0cDT_INT32_REF\x10g\x12\x10\n\x0cDT_UINT8_REF\ - \x10h\x12\x10\n\x0cDT_INT16_REF\x10i\x12\x0f\n\x0bDT_INT8_REF\x10j\x12\ - \x11\n\rDT_STRING_REF\x10k\x12\x14\n\x10DT_COMPLEX64_REF\x10l\x12\x10\n\ - \x0cDT_INT64_REF\x10m\x12\x0f\n\x0bDT_BOOL_REF\x10n\x12\x10\n\x0cDT_QINT\ - 8_REF\x10o\x12\x11\n\rDT_QUINT8_REF\x10p\x12\x11\n\rDT_QINT32_REF\x10q\ - \x12\x13\n\x0fDT_BFLOAT16_REF\x10r\x12\x11\n\rDT_QINT16_REF\x10s\x12\x12\ - \n\x0eDT_QUINT16_REF\x10t\x12\x11\n\rDT_UINT16_REF\x10u\x12\x15\n\x11DT_\ - COMPLEX128_REF\x10v\x12\x0f\n\x0bDT_HALF_REF\x10w\x12\x13\n\x0fDT_RESOUR\ - CE_REF\x10x\x12\x12\n\x0eDT_VARIANT_REF\x10y\x12\x11\n\rDT_UINT32_REF\ - \x10z\x12\x11\n\rDT_UINT64_REF\x10{\x12\x16\n\x12DT_FLOAT8_E5M2_REF\x10|\ - \x12\x18\n\x14DT_FLOAT8_E4M3FN_REF\x10}Bz\n\x18org.tensorflow.frameworkB\ - \x0bTypesProtosP\x01ZLgithub.com/tensorflow/tensorflow/tensorflow/go/cor\ - e/framework/types_go_proto\xf8\x01\x01b\x06proto3\ + T_FLOAT8_E4M3FN\x10\x19\x12\x0b\n\x07DT_INT4\x10\x1d\x12\x0c\n\x08DT_UIN\ + T4\x10\x1e\x12\x10\n\x0cDT_FLOAT_REF\x10e\x12\x11\n\rDT_DOUBLE_REF\x10f\ + \x12\x10\n\x0cDT_INT32_REF\x10g\x12\x10\n\x0cDT_UINT8_REF\x10h\x12\x10\n\ + \x0cDT_INT16_REF\x10i\x12\x0f\n\x0bDT_INT8_REF\x10j\x12\x11\n\rDT_STRING\ + _REF\x10k\x12\x14\n\x10DT_COMPLEX64_REF\x10l\x12\x10\n\x0cDT_INT64_REF\ + \x10m\x12\x0f\n\x0bDT_BOOL_REF\x10n\x12\x10\n\x0cDT_QINT8_REF\x10o\x12\ + \x11\n\rDT_QUINT8_REF\x10p\x12\x11\n\rDT_QINT32_REF\x10q\x12\x13\n\x0fDT\ + _BFLOAT16_REF\x10r\x12\x11\n\rDT_QINT16_REF\x10s\x12\x12\n\x0eDT_QUINT16\ + _REF\x10t\x12\x11\n\rDT_UINT16_REF\x10u\x12\x15\n\x11DT_COMPLEX128_REF\ + \x10v\x12\x0f\n\x0bDT_HALF_REF\x10w\x12\x13\n\x0fDT_RESOURCE_REF\x10x\ + \x12\x12\n\x0eDT_VARIANT_REF\x10y\x12\x11\n\rDT_UINT32_REF\x10z\x12\x11\ + \n\rDT_UINT64_REF\x10{\x12\x16\n\x12DT_FLOAT8_E5M2_REF\x10|\x12\x18\n\ + \x14DT_FLOAT8_E4M3FN_REF\x10}\x12\x10\n\x0bDT_INT4_REF\x10\x81\x01\x12\ + \x11\n\x0cDT_UINT4_REF\x10\x82\x01Bz\n\x18org.tensorflow.frameworkB\x0bT\ + ypesProtosP\x01ZLgithub.com/tensorflow/tensorflow/tensorflow/go/core/fra\ + mework/types_go_proto\xf8\x01\x01b\x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; diff --git a/tensorflow-proto-codegen/src/main.rs b/tensorflow-proto-codegen/src/main.rs index 9022cce8de..217a4d6b57 100644 --- a/tensorflow-proto-codegen/src/main.rs +++ b/tensorflow-proto-codegen/src/main.rs @@ -9,6 +9,24 @@ fn main() -> Result<(), Box> { let args: Vec = env::args().collect(); let tensorflow_folder = &args[1]; let output_folder = Path::new(&args[2]); + protoc_rust::Codegen::new() + .out_dir( + output_folder + .join("src/protos") + .to_str() + .ok_or("Unable to format output path for main crate")?, + ) + .inputs( + [ + "third_party/xla/third_party/tsl/tsl/protobuf/coordination_config.proto", + "third_party/xla/third_party/tsl/tsl/protobuf/rpc_options.proto", + ] + .iter() + .map(|p| format!("{}/{}", tensorflow_folder, p)) + .collect::>(), + ) + .include(Path::new(tensorflow_folder).join("third_party/xla/third_party/tsl")) + .run()?; protoc_rust::Codegen::new() .out_dir( output_folder @@ -46,14 +64,13 @@ fn main() -> Result<(), Box> { "tensorflow/core/protobuf/struct.proto", "tensorflow/core/protobuf/trackable_object_graph.proto", "tensorflow/core/protobuf/verifier_config.proto", - "third_party/xla/third_party/tsl/tsl/protobuf/coordination_config.proto", - "third_party/xla/third_party/tsl/tsl/protobuf/rpc_options.proto", ] .iter() .map(|p| format!("{}/{}", tensorflow_folder, p)) .collect::>(), ) .include(tensorflow_folder) + .include(Path::new(tensorflow_folder).join("third_party/xla/third_party/tsl")) .run()?; protoc_rust::Codegen::new() .out_dir( From cd16d819b7f89dcd7cff7d5482e24fd4ccfadb18 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sat, 2 Dec 2023 18:05:58 -0800 Subject: [PATCH 06/12] Remove deprecated c_enum! patterns --- src/lib.rs | 50 ++++++++++++++++++++------------------------------ 1 file changed, 20 insertions(+), 30 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index dbb1f233bf..de8ba4e0e5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -168,18 +168,6 @@ macro_rules! c_enum { $name:ident = $num:expr,)* }) => { c_enum!($c_name, $(#[$enum_attr])* $enum_name { $( $(#[$attr])* $name = $num),* }); }; - // Deprecated pattern. - ($doc:expr, $c_name:ident, $(#[$enum_attr:meta])* $enum_name:ident { $( $(#[$attr:meta])* value - $name:ident = $num:expr),* }) => { - c_enum!($c_name, #[doc = $doc] $(#[$enum_attr])* - $enum_name { $( $(#[$attr])* $name = $num),* }); - }; - // Deprecated pattern. - ($doc:expr, $c_name:ident, $(#[$enum_attr:meta])* $enum_name:ident { $( $(#[$attr:meta])* value - $name:ident = $num:expr,)* }) => { - c_enum!($c_name, #[doc = $doc] $(#[$enum_attr])* - $enum_name { $( $(#[$attr])* $name = $num),* }); - } } //////////////////////// @@ -225,41 +213,43 @@ pub use tf::library; //////////////////////// -c_enum!("Error values that can be returned.", TF_Code, Code { +c_enum!(TF_Code, +/// Error values that can be returned. +Code { /// Not an error; returned on success. - value Ok = 0, + Ok = 0, /// The operation was cancelled (typically by the caller). - value Cancelled = 1, + Cancelled = 1, /// Unknown error. An example of where this error may be returned is /// if a Status value received from another address space belongs to /// an error-space that is not known in this address space. Also /// errors raised by APIs that do not return enough error information /// may be converted to this error. - value Unknown = 2, + Unknown = 2, /// Client specified an invalid argument. Note that this differs /// from FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments /// that are problematic regardless of the state of the system /// (e.g., a malformed file name). - value InvalidArgument = 3, + InvalidArgument = 3, /// Deadline expired before operation could complete. For operations /// that change the state of the system, this error may be returned /// even if the operation has completed successfully. For example, a /// successful response from a server could have been delayed long /// enough for the deadline to expire. - value DeadlineExceeded = 4, + DeadlineExceeded = 4, /// Some requested entity (e.g., file or directory) was not found. /// For privacy reasons, this code *may* be returned when the client /// does not have the access right to the entity. - value NotFound = 5, + NotFound = 5, /// Some entity that we attempted to create (e.g., file or directory) /// already exists. - value AlreadyExists = 6, + AlreadyExists = 6, /// The caller does not have permission to execute the specified /// operation. PERMISSION_DENIED must not be used for rejections @@ -267,11 +257,11 @@ c_enum!("Error values that can be returned.", TF_Code, Code { /// instead for those errors). PERMISSION_DENIED must not be /// used if the caller can not be identified (use UNAUTHENTICATED /// instead for those errors). - value PermissionDenied = 7, + PermissionDenied = 7, /// Some resource has been exhausted, perhaps a per-user quota, or /// perhaps the entire file system is out of space. - value ResourceExhausted = 8, + ResourceExhausted = 8, /// Operation was rejected because the system is not in a state /// required for the operation's execution. For example, directory @@ -292,14 +282,14 @@ c_enum!("Error values that can be returned.", TF_Code, Code { /// REST Get/Update/Delete on a resource and the resource on the /// server does not match the condition. E.g., conflicting /// read-modify-write on the same resource. - value FailedPrecondition = 9, + FailedPrecondition = 9, /// The operation was aborted, typically due to a concurrency issue /// like sequencer check failures, transaction aborts, etc. /// /// See litmus test above for deciding between FAILED_PRECONDITION, /// ABORTED, and UNAVAILABLE. - value Aborted = 10, + Aborted = 10, /// Operation tried to iterate past the valid input range. E.g., seeking or /// reading past end of file. @@ -316,15 +306,15 @@ c_enum!("Error values that can be returned.", TF_Code, Code { /// error) when it applies so that callers who are iterating through /// a space can easily look for an OUT_OF_RANGE error to detect when /// they are done. - value OutOfRange = 11, + OutOfRange = 11, /// Operation is not implemented or not supported/enabled in this service. - value Unimplemented = 12, + Unimplemented = 12, /// Internal errors. Means some invariants expected by underlying /// system has been broken. If you see one of these errors, /// something is very broken. - value Internal = 13, + Internal = 13, /// The service is currently unavailable. This is a most likely a /// transient condition and may be corrected by retrying with @@ -332,14 +322,14 @@ c_enum!("Error values that can be returned.", TF_Code, Code { /// /// See litmus test above for deciding between FAILED_PRECONDITION, /// ABORTED, and UNAVAILABLE. - value Unavailable = 14, + Unavailable = 14, /// Unrecoverable data loss or corruption. - value DataLoss = 15, + DataLoss = 15, /// The request does not have valid authentication credentials for the /// operation. - value Unauthenticated = 16, + Unauthenticated = 16, }); //////////////////////// From ce62770f0c72490bbdbef72684d07a2f37e7bd62 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Mon, 12 Aug 2024 19:47:39 -0700 Subject: [PATCH 07/12] Bump required Python version and add tf_keras requirement --- .github/workflows/ci.yml | 2 +- .github/workflows/requirements.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2d1808b393..2afe422758 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: - name: Setup Python # Set Python version uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: "3.10" # Install pip and pytest - name: Install dependencies run: | diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt index 0d17710148..e998807252 100644 --- a/.github/workflows/requirements.txt +++ b/.github/workflows/requirements.txt @@ -1 +1,2 @@ tensorflow == 2.17.0 +tf_keras == 2.17.0 From f0b486da0447893de5ab14a79e9c29b1281100d5 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Mon, 2 Sep 2024 16:07:06 -0700 Subject: [PATCH 08/12] Suppress most warnings in test-all --- test-all | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test-all b/test-all index ada9c9e29f..dde417c59e 100755 --- a/test-all +++ b/test-all @@ -44,6 +44,9 @@ if [[ "${version_tensorflow_sys_crate}" != "${version_tensorflow_sys_readme}" ]] exit 1 fi +# GitHub seems to choke on the large number of warnings from dependencies. +export RUSTFLAGS="-Awarnings" + # Legacy Keras required for now because Keras 3 requires exporting models as # Keras format, which the C API can't read: # https://github.com/tensorflow/tensorflow/issues/70514 @@ -58,8 +61,8 @@ run cargo run --example regression run cargo run --example xor run cargo run --features tensorflow_unstable --example expressions run cargo run --features eager --example mobilenetv3 -run cargo doc -vv --features experimental,tensorflow_unstable,ndarray,eager -run cargo doc -vv --features experimental,tensorflow_unstable,ndarray,eager,private-docs-rs +run cargo doc --features experimental,tensorflow_unstable,ndarray,eager +run cargo doc --features experimental,tensorflow_unstable,ndarray,eager,private-docs-rs # TODO(#66): Re-enable: (cd tensorflow-sys && cargo test -vv -j 1) (cd tensorflow-sys && run cargo run --example multiplication) (cd tensorflow-sys && run cargo run --example tf_version) From f77f2d90e86f7abe864f34ec2f1da8822d582ef9 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sat, 28 Sep 2024 21:02:06 -0700 Subject: [PATCH 09/12] Debug GitHub low disk space error --- check-disk-space | 7 +++++++ test-all | 40 ++++++++++++++++++++++++++++++++-------- 2 files changed, 39 insertions(+), 8 deletions(-) create mode 100755 check-disk-space diff --git a/check-disk-space b/check-disk-space new file mode 100755 index 0000000000..98ff4e756c --- /dev/null +++ b/check-disk-space @@ -0,0 +1,7 @@ +#!/bin/bash + +set -euo pipefail + +df -h || true +du -sh || true +du -sh /* || true diff --git a/test-all b/test-all index dde417c59e..549b7a1351 100755 --- a/test-all +++ b/test-all @@ -9,6 +9,15 @@ function run { echo } +function clean { + if [ "$CI" = "true" ]; then + rm -rf target/debug/examples + rm -rf target/debug/incremental + fi + df -h || true + du -sh target/debug/build/* || true +} + # Make sure the Tensorflow version in the -sys build script matches the one in # the run-valgrind script. version_build_script=`grep "const VERSION" tensorflow-sys/build.rs | sed 's|.*"\([^"]*\)";|\1|g'` @@ -51,28 +60,40 @@ export RUSTFLAGS="-Awarnings" # Keras format, which the C API can't read: # https://github.com/tensorflow/tensorflow/issues/70514 TF_USE_LEGACY_KERAS=1 run python3 examples/mobilenetv3/create_model.py +run df -h # TODO(#391): Re-enable: (cd test_resources/library && ./build-test-op) run cargo fmt --all -- --check +clean run cargo test -vv -j 2 +clean run cargo test -vv -j 2 --features eager +clean run cargo test -vv -j 2 --features tensorflow_unstable +clean run cargo test -vv -j 2 --features ndarray +clean run cargo run --example regression +clean run cargo run --example xor +clean run cargo run --features tensorflow_unstable --example expressions +clean run cargo run --features eager --example mobilenetv3 +clean run cargo doc --features experimental,tensorflow_unstable,ndarray,eager +clean run cargo doc --features experimental,tensorflow_unstable,ndarray,eager,private-docs-rs +clean # TODO(#66): Re-enable: (cd tensorflow-sys && cargo test -vv -j 1) -(cd tensorflow-sys && run cargo run --example multiplication) -(cd tensorflow-sys && run cargo run --example tf_version) -(cd tensorflow-sys && run cargo doc -vv) +(cd tensorflow-sys && run cargo run --example multiplication && clean) +(cd tensorflow-sys && run cargo run --example tf_version && clean) +(cd tensorflow-sys && run cargo doc -vv && clean) -run cargo clippy -(cd tensorflow-sys && run cargo clippy) -(cd tensorflow-op-codegen && run cargo clippy) -(cd tensorflow-proto-codegen && run cargo clippy) -(cd tensorflow-internal-macros && run cargo clippy) +# run cargo clippy +# (cd tensorflow-sys && run cargo clippy) +# (cd tensorflow-op-codegen && run cargo clippy) +# (cd tensorflow-proto-codegen && run cargo clippy) +# (cd tensorflow-internal-macros && run cargo clippy) for file in $(find . -name target -prune -o -name '*.rs' -print); do bad_deprecations="$(rustfmt --emit stdout --config max_width=1000 "$file" | grep '#\[deprecated' | grep -E -v '([^"\\]|\\.|"([^"\\]|\\.)*")*since' || true)" @@ -88,3 +109,6 @@ for file in $(find . -name target -prune -o -name '*.rs' -print); do exit 1 fi done + +df -h +du -sh target/debug/build/* From b5bdfffb629243fc9b44d17a23629ec09f21e59e Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Tue, 5 Nov 2024 20:18:43 -0800 Subject: [PATCH 10/12] Upgrade to TensorFlow 2.17.1 --- .github/workflows/requirements.txt | 4 ++-- tensorflow-sys/build.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt index e998807252..8ee319a93f 100644 --- a/.github/workflows/requirements.txt +++ b/.github/workflows/requirements.txt @@ -1,2 +1,2 @@ -tensorflow == 2.17.0 -tf_keras == 2.17.0 +tensorflow == 2.17.1 +tf_keras == 2.17.1 diff --git a/tensorflow-sys/build.rs b/tensorflow-sys/build.rs index ad0b9a248a..5b29318771 100644 --- a/tensorflow-sys/build.rs +++ b/tensorflow-sys/build.rs @@ -24,8 +24,8 @@ const REPOSITORY: &str = "https://github.com/tensorflow/tensorflow.git"; const FRAMEWORK_TARGET: &str = "tensorflow:libtensorflow_framework"; const TARGET: &str = "tensorflow:libtensorflow"; // `VERSION` and `TAG` are separate because the tag is not always `'v' + VERSION`. -const VERSION: &str = "2.17.0"; -const TAG: &str = "v2.17.0"; +const VERSION: &str = "2.17.1"; +const TAG: &str = "v2.17.1"; const MIN_BAZEL: &str = "3.7.2"; macro_rules! get(($name:expr) => (ok!(env::var($name)))); From 508a5d15980147954b0b3a0314df003d0913ce22 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Sat, 23 Nov 2024 15:54:17 -0800 Subject: [PATCH 11/12] Upgrade to TensorFlow 2.18.0 --- .github/workflows/requirements.txt | 4 ++-- tensorflow-sys/build.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt index 8ee319a93f..9440aa347d 100644 --- a/.github/workflows/requirements.txt +++ b/.github/workflows/requirements.txt @@ -1,2 +1,2 @@ -tensorflow == 2.17.1 -tf_keras == 2.17.1 +tensorflow == 2.18.0 +tf_keras == 2.18.0 diff --git a/tensorflow-sys/build.rs b/tensorflow-sys/build.rs index 5b29318771..1fda27a3b5 100644 --- a/tensorflow-sys/build.rs +++ b/tensorflow-sys/build.rs @@ -24,8 +24,8 @@ const REPOSITORY: &str = "https://github.com/tensorflow/tensorflow.git"; const FRAMEWORK_TARGET: &str = "tensorflow:libtensorflow_framework"; const TARGET: &str = "tensorflow:libtensorflow"; // `VERSION` and `TAG` are separate because the tag is not always `'v' + VERSION`. -const VERSION: &str = "2.17.1"; -const TAG: &str = "v2.17.1"; +const VERSION: &str = "2.18.0"; +const TAG: &str = "v2.18.0"; const MIN_BAZEL: &str = "3.7.2"; macro_rules! get(($name:expr) => (ok!(env::var($name)))); From 391f36fbe1775c1955225d1e739ce5d0d066ff56 Mon Sep 17 00:00:00 2001 From: Adam Crume Date: Tue, 26 Nov 2024 19:25:23 -0800 Subject: [PATCH 12/12] Drop -march=native for macos It's unclear, but this could be related to the error: external/boringssl/src/crypto/cpu_aarch64_apple.c:56:2: error: "NEON and crypto extensions should be statically available." --- tensorflow-sys/build.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tensorflow-sys/build.rs b/tensorflow-sys/build.rs index 1fda27a3b5..0d977c6c2b 100644 --- a/tensorflow-sys/build.rs +++ b/tensorflow-sys/build.rs @@ -381,14 +381,15 @@ fn build_from_src() { "".to_string() }; run("bazel", |command| { - command + let mut cmd = command .current_dir(&source) .arg("build") .arg(format!("--jobs={}", get!("NUM_JOBS"))) - .arg("--compilation_mode=opt") - .arg("--copt=-march=native") - .args(bazel_args_string.split_whitespace()) - .arg(&target) + .arg("--compilation_mode=opt"); + if target_os() != "macos" { + cmd = cmd.arg("--copt=-march=native"); + } + cmd.args(bazel_args_string.split_whitespace()).arg(&target) }); let framework_target_bazel_bin = source.join("bazel-bin").join(framework_target_path); log!(