Skip to content

Commit

Permalink
remove native conv cpp impl in cpp operation/; rename github workflow…
Browse files Browse the repository at this point in the history
… stages
  • Loading branch information
nudles committed Aug 30, 2020
1 parent e5445a5 commit d5cce78
Show file tree
Hide file tree
Showing 9 changed files with 43 additions and 6,362 deletions.
2 changes: 2 additions & 0 deletions .asf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,5 @@ github:
wiki: true
# Enable issues on github
issues: true
# Enable settings on github
settings: true
4 changes: 2 additions & 2 deletions .github/workflows/conda.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ on:

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
build-ubuntu-conda:
build-pytest-on-cpu:
runs-on: ubuntu-latest

steps:
Expand All @@ -36,5 +36,5 @@ jobs:
run: conda install conda-build
- name: conda-config
run: conda config --add channels conda-forge && conda config --add channels nusdbsystem
- name: build-singa-conda
- name: build-pytest
run: conda build tool/conda/singa
4 changes: 2 additions & 2 deletions .github/workflows/macOS.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@
# limitations under the License.
#

name: macOS
name: Native-MacOS

on:
push:
pull_request:

jobs:
build-macOS-cpu:
build-cpptest-cpu:
runs-on: macos-latest

steps:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/cpp.yaml → .github/workflows/ubuntu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

# This is a basic workflow to help you get started with Actions

name: C++
name: Native-Ubuntu

# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
Expand All @@ -41,7 +41,7 @@ jobs:
# - name: C++ test
# run: build/bin/test_singa

build-ubuntu-oneDNN:
build-cpptest-on-cpu:
runs-on: ubuntu-latest

steps:
Expand Down
6 changes: 2 additions & 4 deletions examples/onnx/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import tarfile
import glob
import onnx
from onnx import numpy_helper
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')

Expand All @@ -41,7 +40,7 @@ def load_dataset(test_data_dir):
onnx_tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
onnx_tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(onnx_tensor))
inputs.append(onnx.numpy_helper.to_array(onnx_tensor))

# load reference outputs
ref_outputs = []
Expand All @@ -51,7 +50,7 @@ def load_dataset(test_data_dir):
onnx_tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
onnx_tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(onnx_tensor))
ref_outputs.append(onnx.numpy_helper.to_array(onnx_tensor))
return inputs, ref_outputs


Expand All @@ -63,4 +62,3 @@ def check_exist_or_download(url):
logging.info("Downloading %s" % url)
urllib.request.urlretrieve(url, filename)
return filename

30 changes: 16 additions & 14 deletions python/singa/sonnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,21 @@

import numpy as np
import onnx.utils
import onnx
import onnx.save
import onnx.load
from onnx.backend.base import Backend, BackendRep
from onnx import (checker, helper, numpy_helper, GraphProto, NodeProto,
TensorProto, OperatorSetIdProto, optimizer, mapping)
TensorProto, OperatorSetIdProto, optimizer, mapping,
shape_inference)
import warnings

from singa import device
from . import singa_wrap as singa
from . import autograd, layer
from . import tensor
from . import model
from singa import utils
import device
import autograd
import layer
import tensor
import model
import utils
from singa_wrap as singa

import collections
OrderedDict = collections.OrderedDict
Expand Down Expand Up @@ -390,10 +393,9 @@ def _create_constantOfShape(cls, op, op_t):
the onnx node
"""
node = cls._common_singa_tensor_to_onnx_node(op, op_t)
tensor_type = onnx.TensorProto.FLOAT if isinstance(
op.value, float) else onnx.TensorProto.INT32
tensor_value = onnx.helper.make_tensor("value", tensor_type, [1],
[op.value])
tensor_type = TensorProto.FLOAT if isinstance(
op.value, float) else TensorProto.INT32
tensor_value = helper.make_tensor("value", tensor_type, [1], [op.value])
node.attribute.extend([
helper.make_attribute('value', tensor_value),
])
Expand Down Expand Up @@ -1895,7 +1897,7 @@ def prepare(cls, model, device='CPU', **kwargs):
try:
model = onnx.utils.polish_model(model)
except IndexError as err:
model = onnx.shape_inference.infer_shapes(model)
model = shape_inference.infer_shapes(model)

# check the opset version and ir version
# SINGA supports opset version(11), ir version(1.6.0 -> 6)
Expand Down Expand Up @@ -2199,4 +2201,4 @@ def forward(self, *input, aux_output=(), **kwargs):
get_op = SingaBackend._onnx_node_to_singa_op
to_onnx = SingaFrontend.singa_to_onnx_model
save = onnx.save
load = onnx.load
load = onnx.load
4 changes: 0 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@
import io
import sys
import subprocess
import textwrap
import traceback
import shutil
import shlex
from pathlib import Path
Expand Down Expand Up @@ -192,10 +190,8 @@ def generate_singa_config(with_cuda, with_nccl):


def get_cpp_flags():
last_err = None
default_flags = ['-std=c++11', '-fPIC', '-g', '-O2', '-Wall', '-pthread']
# avx_flags = [ '-mavx'] #'-mf16c',
flags_to_try = []
if sys.platform == 'darwin':
# Darwin most likely will have Clang, which has libc++.
return default_flags + ['-stdlib=libc++']
Expand Down
24 changes: 17 additions & 7 deletions src/model/operation/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@
************************************************************/
// #include "../layer/convolution.h"

#include <cctype>

#include "convolution.h"

#include <cctype>

namespace singa {

ConvHandle::ConvHandle(const Tensor &input,
Expand Down Expand Up @@ -188,7 +188,8 @@ Tensor CpuConvForward(const Tensor &x, Tensor &W, Tensor &b,
{x.block(), W.block(), b.block()}, {output.block()}, "CpuConvForward");

return output;
#else // cpp naive
#else // cpp naive, error due to Im2col importing
/*
Shape w_shape = W.shape();
Shape b_shape;
if (ch.bias_term) b_shape = b.shape();
Expand Down Expand Up @@ -219,6 +220,7 @@ Tensor CpuConvForward(const Tensor &x, Tensor &W, Tensor &b,
W.Reshape(w_shape);
if (ch.bias_term) b.Reshape(b_shape);
return output;
*/
#endif // USE_DNNL
}

Expand Down Expand Up @@ -284,6 +286,7 @@ Tensor CpuConvBackwardx(const Tensor &dy, Tensor &W, const Tensor &x,
return dx;

#else // NOT USE_DNNL
/* // error due to importing Col2im
Shape w_shape = W.shape();
W.Reshape(Shape{ch.num_filters, ch.col_height});
Expand All @@ -303,6 +306,7 @@ Tensor CpuConvBackwardx(const Tensor &dy, Tensor &W, const Tensor &x,
}
W.Reshape(w_shape);
return dx;
*/
#endif // USE_DNNL
}

Expand Down Expand Up @@ -372,10 +376,12 @@ Tensor CpuConvBackwardW(const Tensor &dy, const Tensor &x, const Tensor &W,
{DNNL_ARG_DIFF_BIAS, conv_diff_bias_memory}});
ctx->dnnl_stream.wait();
},
{x.block(), dy.block(), W.block()}, {dW.block(), ch.db->block()}, "CpuConvBackwardW");
{x.block(), dy.block(), W.block()}, {dW.block(), ch.db->block()},
"CpuConvBackwardW");

return dW;
#else // native cpp
/* // error due to importing Im2col
Tensor dW;
dW.ResetLike(W);
dW.SetValue(0.0f);
Expand All @@ -398,6 +404,7 @@ Tensor CpuConvBackwardW(const Tensor &dy, const Tensor &x, const Tensor &W,
}
dW.Reshape(w_shape);
return dW;
*/
#endif // USE_DNNL
}

Expand Down Expand Up @@ -598,7 +605,8 @@ Tensor GpuConvForward(const Tensor &x, const Tensor &W, const Tensor &b,
cch.workspace_count * sizeof(float), &beta,
cch.y_desc, outblock->mutable_data());
},
{x.block(), W.block()}, {output.block(), cch.workspace.block()}, "cudnnConvForward");
{x.block(), W.block()}, {output.block(), cch.workspace.block()},
"cudnnConvForward");

if (cch.bias_term) {
Tensor outputFake(output);
Expand Down Expand Up @@ -634,7 +642,8 @@ Tensor GpuConvBackwardx(const Tensor &dy, const Tensor &W, const Tensor &x,
cch.workspace_count * sizeof(float), &beta, cch.x_desc,
dxblock->mutable_data());
},
{dy.block(), W.block()}, {dx.block(), cch.workspace.block()}, "cudnnConvolutionBackwardData");
{dy.block(), W.block()}, {dx.block(), cch.workspace.block()},
"cudnnConvolutionBackwardData");

return dx;
}
Expand All @@ -658,7 +667,8 @@ Tensor GpuConvBackwardW(const Tensor &dy, const Tensor &x, const Tensor &W,
cch.workspace_count * sizeof(float), &beta, cch.filter_desc,
dwblock->mutable_data());
},
{dy.block(), x.block()}, {dW.block(), cch.workspace.block()}, "cudnnConvolutionBackwardFilter");
{dy.block(), x.block()}, {dW.block(), cch.workspace.block()},
"cudnnConvolutionBackwardFilter");

return dW;
}
Expand Down
Loading

0 comments on commit d5cce78

Please sign in to comment.