Skip to content

Commit

Permalink
Correct the wrong indents of paramters
Browse files Browse the repository at this point in the history
  • Loading branch information
NALLEIN committed Jul 16, 2020
1 parent 9a64e09 commit 6580754
Show file tree
Hide file tree
Showing 14 changed files with 33 additions and 35 deletions.
2 changes: 1 addition & 1 deletion modules/dnn/src/op_webgpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ void copyToMat(Mat &dst, webgpu::Tensor &src)
CV_Assert(dst.type() == CV_32F);

std::vector<int> shape = src.getShape();
void *data = const_cast<void *>(src.map() );
void *data = const_cast<void *>(src.mapRead() );
Mat tmp(shape, CV_32F, data);
tmp.copyTo(dst);
src.unMap();
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/src/webgpu/dawnAPITest/helloCompute.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ wgpu::Device createCppDawnDevice() {
return wgpu::Device::Acquire(backendDevice);
}

wgpu::CreateBufferMappedResult createBufferMappedFromData( wgpu::Device& device,
wgpu::CreateBufferMappedResult createBufferMappedFromData(wgpu::Device& device,
const void* data,
size_t size,
wgpu::BufferUsage usage){
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/src/webgpu/dawnWrapperTest/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ file(GLOB DNN
${OPENCV_SOURCE_DIR}/modules/dnn/src/webgpu/dawn/*.cpp
)

add_executable( softmaxTest op_softmax_test.cpp
add_executable(softmaxTest op_softmax_test.cpp
${SOURCES}
${Dawn}
${DNN}
Expand Down
3 changes: 1 addition & 2 deletions modules/dnn/src/webgpu/dawnWrapperTest/op_softmax_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@ void printData(const void * data, int num) {
}
int main(int argc, char** argv )
{
webgpu::wDevice = std::make_shared<wgpu::Device>(webgpu::createCppDawnDevice());
webgpu::wQueue = std::make_shared<wgpu::Queue>(webgpu::wDevice->GetDefaultQueue());
webgpu::isAvailable();
float inputData1[] = {1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int> shape = {2,4,1}; // outer_size * channels * channel_size

Expand Down
6 changes: 3 additions & 3 deletions modules/dnn/src/webgpu/include/buffer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ class Buffer
wgpu::BufferUsage getBufferUsage() { return usage_;}

static void BufferMapReadCallback(WGPUBufferMapAsyncStatus status,
const void* data,
uint64_t dataLength,
void* userdata)
const void* data,
uint64_t dataLength,
void* userdata)
{
static_cast<Buffer*>(userdata)->mappedData = data;
}
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/src/webgpu/include/op_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class OpBase
void createCommandBuffer();
void runCommandBuffer();
wgpu::FenceCompletionStatus WaitForCompletedValue(wgpu::Fence fence,
uint64_t completedValue);
uint64_t completedValue);

std::shared_ptr<wgpu::Device> device_;
wgpu::ComputePipeline pipeline_;
Expand Down
9 changes: 5 additions & 4 deletions modules/dnn/src/webgpu/include/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class Tensor{
public:
Tensor(Format fmt = wFormatFp32);
Tensor(const void* data, std::vector<int>& shape,
Format fmt = wFormatFp32);
Format fmt = wFormatFp32);
const void* mapRead();
void unMap();
Shape getShape() const;
Expand All @@ -24,8 +24,8 @@ class Tensor{
// Copy data if data != NULL
// Allocate new internal buffer if new size > old size or alloc flag is true
Tensor reshape(const void* data, const std::vector<int>& shape,
bool alloc = false,
Format fmt = wFormatInvalid);
bool alloc = false,
Format fmt = wFormatInvalid);
Tensor fillData(const void * data);
int getFormat() const;
size_t size() const { return size_in_byte_; }
Expand All @@ -38,7 +38,8 @@ class Tensor{
size_t size_in_byte_;
std::shared_ptr<Buffer> buffer_;
Format format_;
wgpu::BufferUsage usage_;
wgpu::BufferUsage usage_ = wgpu::BufferUsage::Storage |
wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
};

// #endif //HAVE_WEBGPU
Expand Down
15 changes: 8 additions & 7 deletions modules/dnn/src/webgpu/src/buffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,14 @@ namespace cv { namespace dnn { namespace webgpu {

Buffer::Buffer(std::shared_ptr<wgpu::Device> device)
{
device_ = device;
usage_ = wgpu::BufferUsage::Storage;
device_ = device;
usage_ = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst |
wgpu::BufferUsage::CopySrc;
}

Buffer::Buffer(std::shared_ptr<wgpu::Device> device,
const void* data, size_t size,
wgpu::BufferUsage usage)
const void* data, size_t size,
wgpu::BufferUsage usage)
{
device_ = device;
usage_ = usage;
Expand All @@ -26,7 +27,7 @@ Buffer::Buffer(std::shared_ptr<wgpu::Device> device,
}

Buffer::Buffer(const void* data, size_t size,
wgpu::BufferUsage usage)
wgpu::BufferUsage usage)
{
createContext();
device_ = wDevice;
Expand Down Expand Up @@ -55,8 +56,8 @@ const void* Buffer::MapReadAsyncAndWait()
gpuReadBuffer_ = device_->CreateBuffer(& desc);
}
wgpu::CommandEncoder encoder = device_->CreateCommandEncoder();
encoder.CopyBufferToBuffer( buffer_, 0,
gpuReadBuffer_, 0, size_);
encoder.CopyBufferToBuffer(buffer_, 0,
gpuReadBuffer_, 0, size_);
wgpu::CommandBuffer cmdBuffer = encoder.Finish();
encoder.Release();
wQueue->Submit(1, &cmdBuffer);
Expand Down
2 changes: 0 additions & 2 deletions modules/dnn/src/webgpu/src/context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,11 @@ bool isAvailable()
}
Context::Context()
{
// create wgpu::Device
wDevice = std::make_shared<wgpu::Device>(createCppDawnDevice());
wQueue = std::make_shared<wgpu::Queue>(wDevice->GetDefaultQueue());
}
Context::~Context()
{
// how to release object
wDevice->Release();
wQueue->Release();
}
Expand Down
4 changes: 0 additions & 4 deletions modules/dnn/src/webgpu/src/context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,6 @@ class Context
public:
Context();
~Context();
std::shared_ptr<Context> wCtx;
std::shared_ptr<wgpu::Device> wDevice = nullptr;
std::shared_ptr<wgpu::Queue> wQueue = nullptr;
cv::Mutex wContextMtx;
};

void createContext();
Expand Down
12 changes: 6 additions & 6 deletions modules/dnn/src/webgpu/src/internal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ void bindTensor(Tensor& tensor, uint32_t binding,
}

void bindUniform(Buffer& buffer, uint32_t binding,
std::vector<wgpu::BindGroupEntry>& bgEntries)
std::vector<wgpu::BindGroupEntry>& bgEntries)
{
wgpu::BindGroupEntry bgEntry = {};
bgEntry.binding = binding;
Expand Down Expand Up @@ -106,11 +106,11 @@ void computeConvOutputShapeAndPadding(const PaddingMode& padding_mode,
}

void computePoolOutputShape(const PaddingMode& padding_mode,
const int& padding_top, const int& padding_left,
const int& in_h, const int& in_w,
const int& filter_h, const int& filter_w,
const int& stride_h, const int& stride_w,
int& out_h, int& out_w)
const int& padding_top, const int& padding_left,
const int& in_h, const int& in_w,
const int& filter_h, const int& filter_w,
const int& stride_h, const int& stride_w,
int& out_h, int& out_w)
{
if (padding_mode == wPaddingModeValid)
{
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/src/webgpu/src/internal.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ std::vector<uint32_t> compile(const std::string& name,
void bindTensor(Tensor& tensor, uint32_t binding,
std::vector<wgpu::BindGroupEntry>& bgEntries);
void bindUniform(Buffer& buffer, uint32_t binding,
std::vector<wgpu::BindGroupEntry>& bgEntries);
std::vector<wgpu::BindGroupEntry>& bgEntries);
void computeConvOutputShapeAndPadding(const PaddingMode& padding_mode,
int& padding_top, int& padding_left,
const int& in_h, const int& in_w,
Expand Down
5 changes: 3 additions & 2 deletions modules/dnn/src/webgpu/src/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Tensor::Tensor(const void* data, std::vector<int>& shape, Format fmt)
createContext();
device_ = wDevice;
size_in_byte_ = 0;
usage_ = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
usage_ = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
format_ = fmt;
reshape(data, shape);
}
Expand Down Expand Up @@ -54,7 +54,7 @@ int Tensor::dimNum() const
}

Tensor Tensor::reshape(const void* data, const std::vector<int>& shape,
bool alloc, Format fmt)
bool alloc, Format fmt)
{
if (device_ == nullptr)
{
Expand All @@ -76,6 +76,7 @@ Tensor Tensor::reshape(const void* data, const std::vector<int>& shape,
return * this;
}
fillData(data);
return * this;
}

Tensor Tensor::fillData(const void * data)
Expand Down
2 changes: 2 additions & 0 deletions modules/dnn/test/test_common.impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE*"; return;
case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
case DNN_BACKEND_WGPU: *os << "WGPU"; return;
case DNN_BACKEND_OPENCV: *os << "OCV"; return;
case DNN_BACKEND_CUDA: *os << "CUDA"; return;
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: *os << "DLIE"; return;
Expand All @@ -41,6 +42,7 @@ void PrintTo(const cv::dnn::Target& v, std::ostream* os)
case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
case DNN_TARGET_VULKAN: *os << "VULKAN"; return;
case DNN_TARGET_WGPU: *os << "WGPU"; return;
case DNN_TARGET_FPGA: *os << "FPGA"; return;
case DNN_TARGET_CUDA: *os << "CUDA"; return;
case DNN_TARGET_CUDA_FP16: *os << "CUDA_FP16"; return;
Expand Down

0 comments on commit 6580754

Please sign in to comment.