Skip to content

Commit

Permalink
Merge pull request #884 from xmos/fix_audioi16
Browse files Browse the repository at this point in the history
  • Loading branch information
panickal-xmos authored Feb 28, 2024
2 parents 47eb94f + 487643d commit 22bc8d2
Show file tree
Hide file tree
Showing 15 changed files with 149 additions and 17 deletions.
2 changes: 2 additions & 0 deletions examples/app_mobilenetv2/obtain_and_optimize_mobilenetv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ def optimize_mobilenetv2():
#######################################################################
# Running the model on xcore host interpreter with sample input image #
#######################################################################
get_mobilenetv2()
optimize_mobilenetv2()

# Sample image of a lion (ImageNet class 291)
with open("lion.bin", "rb") as f:
Expand Down
2 changes: 1 addition & 1 deletion third_party/lib_nn
2 changes: 1 addition & 1 deletion third_party/lib_tflite_micro
1 change: 0 additions & 1 deletion xformer/IR/XCoreOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ def XC_SliceOp : XC_Op<"slice", [Pure]> {
let results = (outs TensorOf<[QI8, QI16, F32, I8, I32]> : $output);
}


def XC_PadOp : XC_Op<"pad", [Pure]> {
let summary = "Pad op";

Expand Down
4 changes: 3 additions & 1 deletion xformer/Transforms/ApplyXCPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,8 @@ DenseElementsAttr getLookupTableI16(PatternRewriter &rewriter,
fn = approximation_function_logistics;
} else if (isa<TFL::EluOp>(activationOp)) {
fn = approximation_function_elu;
} else if (isa<TFL::ReluOp>(activationOp)) {
fn = approximation_function_relu;
} else {
llvm_unreachable("Unsupported op!");
}
Expand All @@ -172,7 +174,7 @@ DenseElementsAttr getLookupTableI16(PatternRewriter &rewriter,
quadratic_function_table_t table;
quadratic_approximation_generator(&table, fn, inputScale, outputScale, chunks,
&max_error, &square_error);
if (max_error >= 2) {
if (max_error > quadraticLookupErrorOption) {
(void)rewriter.notifyMatchFailure(
activationOp->getLoc(), "Cannot calculate quadratic approximation!");
return {};
Expand Down
10 changes: 7 additions & 3 deletions xformer/Transforms/OptimizeConv2D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -454,11 +454,15 @@ struct SameToValidTransposeConvPattern
// Input type must be QI8
auto inputElementType =
tConvOp.getInput().getType().cast<ShapedType>().getElementType();
if (!utils::hasNBitSignedQType(inputElementType) &&
!utils::hasNBitSignedQType<16>(inputElementType)) {
return failure();
}

auto filterElementType =
tConvOp.getWeights().getType().cast<ShapedType>().getElementType();

if (!utils::hasNBitSignedQType(inputElementType) ||
!utils::hasNBitSignedQType(filterElementType)) {
if (!utils::hasNBitSignedQType(filterElementType) &&
!utils::hasNBitSignedQType<16>(filterElementType)) {
return failure();
}

Expand Down
1 change: 1 addition & 0 deletions xformer/Transforms/Options.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
namespace mlir {
namespace xcore {

extern llvm::cl::opt<unsigned> quadraticLookupErrorOption;
extern llvm::cl::opt<bool> enableBetaFloatOption;
extern llvm::cl::opt<unsigned> threadCountOption;
extern llvm::cl::opt<std::string> weightsFilenameOption;
Expand Down
2 changes: 2 additions & 0 deletions xformer/Transforms/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ void buildXCorePassPipeline(OpPassManager &pm) {
// Run pass from LCE to convert Larq ops which are in TFL custom op format to
// Larq dialect
pm.addPass(mlir::TFL::CreateTranslateToLCEPass());
// Convert dynamic shapes in batch dimension to static
pm.addPass(createRemoveDynamicShapePass());
// TFL passes
pm.addPass(createOptimizeTransposePass());
pm.addPass(createReplaceAvgPoolWithConv2DPass());
Expand Down
1 change: 1 addition & 0 deletions xformer/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ std::unique_ptr<OperationPass<func::FuncOp>> createReplaceFCWithConv2DPass();
std::unique_ptr<OperationPass<func::FuncOp>> createOptimizeConv2DPass();
std::unique_ptr<OperationPass<func::FuncOp>> createOpSplitPass();
std::unique_ptr<OperationPass<func::FuncOp>> createApplyTFLPatternsPass();
std::unique_ptr<OperationPass<func::FuncOp>> createRemoveDynamicShapePass();
std::unique_ptr<OperationPass<func::FuncOp>> createReplaceAddPass();
std::unique_ptr<OperationPass<func::FuncOp>> createReplaceMulPass();
std::unique_ptr<OperationPass<func::FuncOp>> createReplaceMaxPoolPass();
Expand Down
93 changes: 93 additions & 0 deletions xformer/Transforms/RemoveDynamicShape.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
// Copyright 2021 XMOS LIMITED. This Software is subject to the terms of the
// XMOS Public License: Version 1

#include "IR/XCoreOps.h"

#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"

namespace mlir::xcore {

namespace {
struct RemoveDynamicShape
: public PassWrapper<RemoveDynamicShape, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(RemoveDynamicShape)

void getDependentDialects(DialectRegistry &registry) const final {
registry.insert<XCoreDialect>();
}
StringRef getArgument() const final { return "xcore-remove-dynamic-shape"; }
StringRef getDescription() const final { return "Remove dynamic shape"; }
void runOnOperation() override;
};

void RemoveDynamicShape::runOnOperation() {
auto func = getOperation();
auto *ctx = &getContext();

// Lambda for getting a new type with dynamic changed to static
auto getNewTensorType = [&](TensorType tensorType) {
TensorType newType = tensorType;
// If batch dim is dynamic, make it of size one
if (tensorType.hasRank() && tensorType.getRank() > 1 &&
tensorType.getDimSize(0) == ShapedType::kDynamic) {
llvm::ArrayRef<int64_t> shape = tensorType.getShape();
std::vector<int64_t> newShape;
newShape.reserve(shape.size());
for (auto &dim : shape) {
newShape.push_back(static_cast<int>(dim));
}
newShape[0] = 1;
newType = tensorType.clone(llvm::ArrayRef<int64_t>(newShape));
}
return newType;
};

// Handle func arguments and return types
llvm::SmallVector<Type> newFuncInputTypes;
newFuncInputTypes.resize(func.getNumArguments());
llvm::SmallVector<Type> newFuncOutputTypes;
newFuncOutputTypes.resize(func.getNumResults());

for (BlockArgument argument : func.getArguments()) {
auto tensorType = argument.getType().dyn_cast<TensorType>();
auto newType = getNewTensorType(tensorType);
newFuncInputTypes[argument.getArgNumber()] = newType;
argument.setType(newType);
}

for (int i = 0; i < func.getNumResults(); ++i) {
auto tensorType = func.getResultTypes()[i].dyn_cast<TensorType>();
newFuncOutputTypes[i] = getNewTensorType(tensorType);
}
FunctionType funcType = func.getFunctionType();
auto newFuncType =
FunctionType::get(ctx, newFuncInputTypes, newFuncOutputTypes);
func.setType(newFuncType);

// Iterate through all other ops
func.walk<WalkOrder::PreOrder>([&](Operation *op) {
if (op == func) {
return;
}

for (Value result : op->getResults()) {
if (result.getType().isa<NoneType>()) {
continue;
}
auto tensorType = result.getType().dyn_cast<TensorType>();
result.setType(getNewTensorType(tensorType));
}
});
}
} // namespace

// Creates an instance of the RemoveDynamicShape pass.
std::unique_ptr<OperationPass<func::FuncOp>> createRemoveDynamicShapePass() {
return std::make_unique<RemoveDynamicShape>();
}

static PassRegistration<RemoveDynamicShape> pass;

} // namespace mlir::xcore
17 changes: 17 additions & 0 deletions xformer/Transforms/TFLPatterns.td
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,23 @@ def : Pat<(TFL_ReluOp(TFL_Conv2DOp $input, $f, $b, $dh, $dw, TFL_AF_None, $p,
$sh, $sw)),
(TFL_Conv2DOp $input, $f, $b, $dh, $dw, TFL_AF_Relu, $p, $sh, $sw)>;

// Unfuse activation functions from binary ops
// TFL Add, Sub, Mul
foreach binaryOp = [TFL_AddOp, TFL_SubOp, TFL_MulOp] in {
foreach activation = [
[TFL_AF_Relu, TFL_ReluOp],
[TFL_AF_Relu1, TFL_Relu1Op],
[TFL_AF_Relu6, TFL_Relu6Op],
[TFL_AF_Tanh, TFL_TanhOp],
[TFL_AF_Sign, TFL_SignOp],
] in {
def:
Pat<(binaryOp
: $output TensorOf<[QI8, QI16]>:$input1, TensorOf<[QI8, QI16]>:$input2,
activation[0]), (activation[1] (binaryOp $input1, $input2, TFL_AF_None, (returnType $output)))>;
}
}

// If MeanOp with spatial axis and rank 2 output, expand output to rank 4, which
// we later lower to AveragePool2D
def : Pat<(TFL_MeanOp
Expand Down
2 changes: 1 addition & 1 deletion xformer/Transforms/TranslateToCustomOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ void TranslateToCustomOp::runOnOperation() {
patterns.insert<RewriteToCustomOp<MaxPool2DOp>>(ctx);
patterns.insert<RewriteToCustomOp<LoadFlashOp>>(ctx);
patterns.insert<RewriteToCustomOp<LookupOp>>(ctx);
// patterns.insert<RewriteToCustomOp<SoftmaxOp>>(ctx);
patterns.insert<RewriteToCustomOp<SoftmaxOp>>(ctx);
patterns.insert<RewriteToCustomOp<MulOp>>(ctx);
patterns.insert<RewriteToCustomOp<Pad3To4Op>>(ctx);
patterns.insert<RewriteToCustomOp<SliceOp>>(ctx);
Expand Down
15 changes: 8 additions & 7 deletions xformer/Transforms/XCPatterns.td
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def getLookupTableI16OrFail
: NativeCodeCall<"getLookupTableI16($_builder, $0.getDefiningOp()); "
"if(blob == nullptr){return failure();}">;

foreach activationOp = [TFL_LogisticOp, TFL_TanhOp] in {
foreach activationOp = [TFL_LogisticOp, TFL_TanhOp, TFL_ReluOp] in {
def:
Pat<(activationOp
: $output TensorOf<[QI16]>:$input),
Expand All @@ -48,11 +48,12 @@ def isSingleSegment

def betaIsOne : Constraint<CPred<"$0.getValue().convertToFloat() == 1.0">>;

// def:
// Pat<(TFL_SoftmaxOp
// : $output TensorOf<[QI8]>:$input, $beta),
// (XC_SoftmaxOp $input, (Arith_ConstantOp (getExpLookupF32
// $output))), [(betaIsOne $beta), (isSingleSegment $input)]>;
// Softmax -> XC_SoftmaxOp
def:
Pat<(TFL_SoftmaxOp
: $output TensorOf<[QI8]>:$input, $beta),
(XC_SoftmaxOp $input, (Arith_ConstantOp (getExpLookupF32
$output))), [(betaIsOne $beta), (isSingleSegment $input)]>;

// Beta float activation lookup
def getActivationType
Expand Down Expand Up @@ -108,7 +109,7 @@ def: Pat<(TFL_DequantizeOp
def getBinaryI16BlobOrFail
: NativeCodeCall<"getBinaryI16Blob($_builder, $0.getDefiningOp()); if(blob "
"== nullptr){return failure();}">;
// TFL Mul, Add
// TFL Add, Sub, Mul
def: Pat<(TFL_AddOp
: $output TensorOf<[QI16]>:$input1, TensorOf<[QI16]>:$input2,
TFL_AF_None), (XC_BinaryI16Op $input1, $input2, (Arith_ConstantOp
Expand Down
6 changes: 4 additions & 2 deletions xformer/Utils/Utils.td
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,12 @@ class HasMultipleOfNBytesPerPixel<int n>
"$0.getType().cast<ShapedType>().getDimSize(3) % " #n #" == 0">>;

def HasOnlyChannelPadding
: Constraint<CPred<"utils::hasOnlyChannelPadding($0.cast<DenseIntElementsAttr>())">>;
: Constraint<CPred<
"utils::hasOnlyChannelPadding($0.cast<DenseIntElementsAttr>())">>;

def HasOnlySpatialPadding
: Constraint<CPred<"utils::hasOnlySpatialPadding($0.cast<DenseIntElementsAttr>())">>;
: Constraint<CPred<
"utils::hasOnlySpatialPadding($0.cast<DenseIntElementsAttr>())">>;

// Casts $1 to a dequantized type and then casts that to a quantized type
// using the quantization parameters from the type in $0
Expand Down
8 changes: 8 additions & 0 deletions xformer/XCoreOptMain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,14 @@ namespace mlir::xcore {
// and -help) will be hidden.
static cl::OptionCategory XformerCategory("Xformer options");

cl::opt<unsigned>
quadraticLookupErrorOption("xcore-quadratic-lookup-error",
cl::desc("Used only for int16. Defaults to TFL "
"ops if quadratic lookup error is more "
"than provided "
"(default = 1)."),
cl::init(1), cl::cat(XformerCategory));

cl::opt<bool> enableBetaFloatOption("xcore-enable-beta-float",
cl::desc("Enable beta float support."),
cl::init(false), cl::cat(XformerCategory));
Expand Down

0 comments on commit 22bc8d2

Please sign in to comment.