diff --git a/onnxruntime/python/onnxruntime_pybind_mlvalue.cc b/onnxruntime/python/onnxruntime_pybind_mlvalue.cc index 92396bb09bd4c..5742b4db42512 100644 --- a/onnxruntime/python/onnxruntime_pybind_mlvalue.cc +++ b/onnxruntime/python/onnxruntime_pybind_mlvalue.cc @@ -280,7 +280,7 @@ void DmlToCpuMemCpy(void* dst, const void* src, size_t num_bytes) { uint32_t readback_heap_size = gsl::narrow_cast(sizeof(readback_heap)); ORT_THROW_IF_FAILED(d3d12_device->GetPrivateData(dml_readback_heap_guid, &readback_heap_size, &readback_heap)); - // ReadbackFromGpu already syncs with the CPU and waits for the copy to be completed, so we don't need to sync after + // ReadbackFromGpu already syncs with the CPU and waits for the copy to be completed, so we dont need to sync after // this call readback_heap->ReadbackFromGpu( gsl::make_span(static_cast(dst), num_bytes), @@ -428,7 +428,7 @@ MLDataType NumpyTypeToOnnxRuntimeTensorType(int numpy_type) { // Special, not a C type expands to enum value of 16 {NPY_FLOAT16, DataTypeImpl::GetType()}, {NPY_DOUBLE, DataTypeImpl::GetType()}, - // We don't want to use size specific types such + // We dont want to use size specific types such // as NPY_INT32 bc they are not enums but hash defines // which may map into other enums and may conflict with other entries here // also NPY docs define these sizes as platform specific, thus we @@ -581,6 +581,7 @@ static void CopyDataToTensor(PyArrayObject* darray, int npy_type, Tensor& tensor for (int i = 0; i < total_items; ++i, src += item_size) { // Python unicode strings are assumed to be USC-4. Strings are stored as UTF-8. PyObject* item = PyArray_GETITEM(darray, src); + UniqueDecRefPtr itemGuard(item, DecRefFn()); PyObject* pStr = PyObject_Str(item); UniqueDecRefPtr strGuard(pStr, DecRefFn()); dst[i] = py::reinterpret_borrow(pStr);