diff --git a/torch/csrc/autograd/variable.cpp b/torch/csrc/autograd/variable.cpp index 2102665a9bb0c..2de0ddf0bec80 100644 --- a/torch/csrc/autograd/variable.cpp +++ b/torch/csrc/autograd/variable.cpp @@ -22,7 +22,7 @@ namespace torch { namespace autograd { Variable::Impl::Impl(at::Tensor data, bool requires_grad, Edge gradient_edge) - : TensorImpl(data.type().type_id(), data.type().typeMeta(), data.type().allocator(), /* is variable */ true), + : TensorImpl(data.type_id(), data.dtype(), /*allocator=*/nullptr, /* is variable */ true), data_(std::move(data)), grad_fn_(std::move(gradient_edge.function)), requires_grad_(false), diff --git a/torch/csrc/autograd/variable.h b/torch/csrc/autograd/variable.h index 1fb63c8bead2e..75d6d815e315b 100644 --- a/torch/csrc/autograd/variable.h +++ b/torch/csrc/autograd/variable.h @@ -319,7 +319,7 @@ struct TORCH_API Variable::Impl : public at::TensorImpl { /// variables. void set_requires_grad(bool requires_grad) override { AT_CHECK( - !requires_grad || at::isFloatingType(type().scalarType()), + !requires_grad || at::isFloatingType(at::typeMetaToScalarType(dtype())), "Only Tensors of floating point dtype can require gradients"); requires_grad_ = requires_grad; }