Skip to content

Commit

Permalink
roll backing reinterpret_cast for merge conflict fix
Browse files Browse the repository at this point in the history
  • Loading branch information
ranjitshs committed Jun 7, 2024
2 parents 9a91370 + 74028e4 commit de44c0c
Show file tree
Hide file tree
Showing 99 changed files with 1,719 additions and 1,047 deletions.
8 changes: 6 additions & 2 deletions cmake/external/abseil-cpp.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,18 @@ FetchContent_Declare(
URL ${DEP_URL_abseil_cpp}
URL_HASH SHA1=${DEP_SHA1_abseil_cpp}
PATCH_COMMAND ${ABSL_PATCH_COMMAND}
FIND_PACKAGE_ARGS NAMES absl
FIND_PACKAGE_ARGS 20240116 NAMES absl
)

onnxruntime_fetchcontent_makeavailable(abseil_cpp)
FetchContent_GetProperties(abseil_cpp)
set(ABSEIL_SOURCE_DIR ${abseil_cpp_SOURCE_DIR})
# abseil_cpp_SOURCE_DIR is non-empty if we build it from source
message(STATUS "Abseil source dir:" ${ABSEIL_SOURCE_DIR})

# abseil_cpp_VERSION is non-empty if we find a preinstalled ABSL
if(abseil_cpp_VERSION)
message(STATUS "Abseil version:" ${abseil_cpp_VERSION})
endif()
if (GDK_PLATFORM)
# Abseil considers any partition that is NOT in the WINAPI_PARTITION_APP a viable platform
# for Win32 symbolize code (which depends on dbghelp.lib); this logic should really be flipped
Expand Down
2 changes: 1 addition & 1 deletion docs/OperatorKernels.md
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ Do not modify directly.*
|Transpose|*in* data:**T**<br> *out* transposed:**T**|21+|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int4), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint4), tensor(uint64), tensor(uint8)|
|||[13, 20]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||[1, 12]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|Trilu|*in* input:**T**<br> *in* k:**tensor(int64)**<br> *out* output:**T**|14+|**T** = tensor(double), tensor(float), tensor(int64)|
|Trilu|*in* input:**T**<br> *in* k:**tensor(int64)**<br> *out* output:**T**|14+|**T** = tensor(bool), tensor(double), tensor(float), tensor(int64)|
|Unique|*in* X:**T**<br> *out* Y:**T**<br> *out* indices:**tensor(int64)**<br> *out* inverse_indices:**tensor(int64)**<br> *out* counts:**tensor(int64)**|11+|**T** = tensor(double), tensor(float), tensor(int64), tensor(int8), tensor(string)|
|Unsqueeze|*in* data:**T**<br> *in* axes:**tensor(int64)**<br> *out* expanded:**T**<br><br>or<br><br>*in* data:**T**<br> *out* expanded:**T**|21+|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|||[13, 20]|**T** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
Expand Down
7 changes: 6 additions & 1 deletion include/onnxruntime/core/common/logging/isink.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,15 @@
#include <string>

#include "core/common/logging/logging.h"
#include "core/common/logging/sink_types.h"

namespace onnxruntime {
namespace logging {
class ISink {
public:
ISink() = default;
explicit ISink(SinkType type = SinkType::BaseSink) : type_(type) {}

SinkType GetType() const { return type_; }

/**
Sends the message to the sink.
Expand All @@ -32,6 +35,8 @@ class ISink {
virtual ~ISink() = default;

private:
SinkType type_;

// Make Code Analysis happy by disabling all for now. Enable as needed.
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ISink);

Expand Down
32 changes: 26 additions & 6 deletions include/onnxruntime/core/common/logging/logging.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@
#include "core/common/common.h"
#include "core/common/profiler_common.h"
#include "core/common/logging/capture.h"
#include "core/common/logging/severity.h"

#include "core/common/logging/macros.h"

#include "core/common/logging/severity.h"
#include "core/common/logging/sink_types.h"
#include "core/platform/ort_mutex.h"
#include "date/date.h"

/*
Expand Down Expand Up @@ -167,6 +167,23 @@ class LoggingManager final {
*/
static bool HasDefaultLogger() { return nullptr != s_default_logger_; }

/**
Gets the default instance of the LoggingManager.
*/
static LoggingManager* GetDefaultInstance();

/**
Removes a Sink if one is present
*/
void RemoveSink(SinkType sinkType);

/**
Adds a Sink to the current sink creating a CompositeSink if necessary
Sinks types must be unique
@param severity The severity level for the new Sink
*/
bool AddSinkOfType(SinkType sinkType, std::function<std::unique_ptr<ISink>()> sinkFactory, logging::Severity severity);

/**
Change the minimum severity level for log messages to be output by the default logger.
@param severity The severity.
Expand Down Expand Up @@ -214,7 +231,10 @@ class LoggingManager final {
void CreateDefaultLogger(const std::string& logger_id);

std::unique_ptr<ISink> sink_;
const Severity default_min_severity_;
#ifdef _WIN32
mutable OrtMutex sink_mutex_;
#endif
Severity default_min_severity_;
const bool default_filter_user_data_;
const int default_max_vlog_level_;
bool owns_default_logger_;
Expand Down Expand Up @@ -362,8 +382,8 @@ unsigned int GetProcessId();
/**
If the ONNXRuntimeTraceLoggingProvider ETW Provider is enabled, then adds to the existing logger.
*/
std::unique_ptr<ISink> EnhanceLoggerWithEtw(std::unique_ptr<ISink> existingLogger, logging::Severity originalSeverity,
logging::Severity etwSeverity);
std::unique_ptr<ISink> EnhanceSinkWithEtw(std::unique_ptr<ISink> existingSink, logging::Severity originalSeverity,
logging::Severity etwSeverity);

/**
If the ONNXRuntimeTraceLoggingProvider ETW Provider is enabled, then can override the logging level.
Expand Down
11 changes: 11 additions & 0 deletions include/onnxruntime/core/common/logging/sink_types.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#pragma once

namespace onnxruntime {
namespace logging {
enum class SinkType {
BaseSink,
CompositeSink,
EtwSink
};
} // namespace logging
} // namespace onnxruntime
1 change: 1 addition & 0 deletions js/web/docs/webgpu-operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ Do not modify directly.*
| Not | ai.onnx(1+) | |
| Pad | ai.onnx(2-10,11-12,13-17,18,19+) | |
| Pow | ai.onnx(7-11,12,13-14,15+) | |
| QuickGelu | com.microsoft(1+) | |
| Range | ai.onnx(11+) | |
| Reciprocal | ai.onnx(6-12,13+) | |
| ReduceL1 | ai.onnx(1-10,11-12,13-17,18+) | |
Expand Down
8 changes: 4 additions & 4 deletions js/web/docs/webnn-operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ operators and the supported opset domain/versions in **WebNN EP** by ONNX Runtim
| BatchNormalization | ai.onnx(7-8, 9-13, 14, 15+) | batchNormalization ||| Only supports 'training_mode' value is 0, one output |
| Cast | ai.onnx(7-8, 9-12, 13-18, 19-20, 21+) | cast ||| |
| Ceil | ai.onnx(7-12, 13+) | ceil ||| |
| Clip | ai.onnx(7-10, 11, 12, 13+) | clamp ||| |
| Clip | ai.onnx(7-10, 11, 12, 13+) | clamp ||| WebNN CPU backend only supports 3 specific ranges: [0.0, infinity], [-1.0, 1.0], [0.0, 6.0] (Chromium issue: https://issues.chromium.org/issues/326156496) |
| Concat | ai.onnx(7-10, 11-12, 13+) | concat ||| |
| Conv | ai.onnx(7-10, 11+) | conv2d ||| Only supports 3-D or 4-D input and 'W' (weight). WebNN CPU requires the 'W' (weight) input to be a constant |
| ConvTranspose | ai.onnx(7-10, 11+) | convTranspose2d ||| Only supports 3-D or 4-D input and 'W' (weight). |
Expand Down Expand Up @@ -50,7 +50,7 @@ operators and the supported opset domain/versions in **WebNN EP** by ONNX Runtim
| LessOrEqual | ai.onnx(12-15, 16+) | lesserOrEqual ||| |
| Log | ai.onnx(7-12, 13+) | log ||| |
| LpPool | ai.onnx(7-10, 11-17, 18+) | l2Pool2d ||| Only supports 4-D input, 2-D 'kernel_shape', 'p' value is 2 |
| MatMul | ai.onnx(7-8, 9-12, 13+) | matmul ||| WebNN CPU doesn't support broadcasting for MatMul |
| MatMul | ai.onnx(7-8, 9-12, 13+) | matmul ||| |
| Max | ai.onnx(7, 8-11, 12, 13+) | max ||| |
| MaxPool | ai.onnx(7, 8-9, 10, 11, 12+) | maxPool2d ||| Only supports 4-D input, 2-D 'kernel_shape', 'storage_order' != 1, one output |
| Min | ai.onnx(7, 8-11, 12, 13+) | min ||| |
Expand All @@ -73,15 +73,15 @@ operators and the supported opset domain/versions in **WebNN EP** by ONNX Runtim
| ReduceSumSquare | ai.onnx(7-10, 11-12, 13-17, 18+) | reduceSumSquare ||| Input 'axes' if present should be a constant |
| Relu | ai.onnx(7-12, 13, 14+) | relu ||| |
| Reshape | ai.onnx(7-12, 13, 14-18, 19-20, 21+) | reshape ||| Input 'shape' should be a constant, 0 dimension value in 'shape' is not supported |
| Resize | ai.onnx(11-12, 13-17, 18, 19+) | resample2d ||| Only supports 4-D input, exclude_outside != 0, input 'scales' and 'sizes' if present must be a constant, WebNN CPU backend only supports 'linear' mode, WebNN GPU backend only supports 'linear' and 'nearest' modes |
| Resize | ai.onnx(11-12, 13-17, 18, 19+) | resample2d ||| Only supports 4-D input, exclude_outside != 0, input 'scales' and 'sizes' if present must be a constant, 'linear' and 'nearest' modes |
| Shape | ai.onnx(7-12, 13-14, 15-18, 19-20, 21+) | slice ||| |
| Sigmoid | ai.onnx(7-12, 13+) | sigmoid ||| |
| Softplus | ai.onnx(7+) | softplus ||| |
| Softsign | ai.onnx(7+) | softsign ||| |
| Sin | ai.onnx(7+) | sin ||| |
| Slice | ai.onnx(7-9, 10, 11-12, 13+) | slice ||| Input 'starts', 'ends', 'axes', and 'steps' if present must be a constant, only supports 'steps' value 1 |
| Softmax | ai.onnx(7-10, 11-12, 13+) | softmax ||| Only supports input rank >= 2 |
| Split | ai.onnx(7-10, 11-12, 13-17, 18+) | split ||| Input 'split' if present should be a constant, WebNN CPU backend only supports up to 4 outputs |
| Split | ai.onnx(7-10, 11-12, 13-17, 18+) | split ||| Input 'split' if present should be a constant |
| Sqrt | ai.onnx(7-12, 13+) | sqrt ||| |
| Squeeze | ai.onnx(7-10, 11-12, 13-20, 21+) | reshape ||| Input 'axes' if present should be a constant |
| Sub | ai.onnx(7-12, 13, 14+) | sub ||| |
Expand Down
1 change: 1 addition & 0 deletions js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ export const WEBGPU_OP_RESOLVE_RULES: Map<string, OperatorImplementation> = new
['Not', [unaryOps.not]],
['Pad', [pad]],
['Pow', [binaryOps.pow]],
['QuickGelu', [unaryOps.quickgelu, unaryOps.parseAlphaAttributes]],
['Range', [range]],
['Reciprocal', [unaryOps.reciprocal]],
['ReduceMin', [reduceMin]],
Expand Down
28 changes: 28 additions & 0 deletions js/web/lib/wasm/jsep/webgpu/ops/unary-op.ts
Original file line number Diff line number Diff line change
Expand Up @@ -314,3 +314,31 @@ export const thresholdedRelu = (context: ComputeContext, attributes: AlphaAttrib
export const log = (context: ComputeContext): void => {
context.compute(createElementwiseProgramInfo(context.inputs[0], 'Log', 'log'));
};

export const quickGeluImpl = (varType: string, alpha: number) => `
const alpha = vec4<${varType}>(${alpha});
const one = ${varType}(1.0);
const zero = ${varType}(0.0);
fn quick_gelu_impl(x: vec4<${varType}>) -> vec4<${varType}> {
let v = x *alpha;
var x1 : vec4<${varType}>;
for (var i = 0; i < 4; i = i + 1) {
if (v[i] >= zero) {
x1[i] = one / (one + exp(-v[i]));
} else {
x1[i] = one - one / (one + exp(v[i]));
}
}
return x * x1;
}
`;

export const quickGeluExpression = (x: string) => `quick_gelu_impl(${x})`;

export const quickgelu = (context: ComputeContext, attributes: AlphaAttributes): void => {
const dType = tensorTypeToWsglValueType(context.inputs[0].dataType);
context.compute(createElementwiseProgramInfo(
context.inputs[0], 'QuickGelu', quickGeluExpression, quickGeluImpl(dType, attributes.alpha), attributes.cacheKey,
context.inputs[0].dataType));
};
46 changes: 46 additions & 0 deletions js/web/test/data/ops/quick-gelu.jsonc
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
[
{
"name": "QuickGelu test",
"operator": "QuickGelu",
"opset": { "domain": "com.microsoft", "version": 1 },
"cases": [
{
"name": "[2x4]",
"inputs": [
{
"data": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, -0.8],
"dims": [2, 4],
"type": "float32"
}
],
"outputs": [
{
"data": [0.0542447, 0.116857, 0.187484, 0.265566, 0.350388, 0.441123, 0.53689, 0.636815],
"dims": [2, 4],
"type": "float32"
}
]
},
{
"name": "[3x5]",
"inputs": [
{
"data": [0.1, 0.2, 0.3, 0.4, 0.5, 1, 2, 3, 4, 5, 1.1, 1.2, 1.3, 1.4, -1.5],
"dims": [3, 5],
"type": "float32"
}
],
"outputs": [
{
"data": [
0.0542447, 0.116857, 0.187484, 0.265566, 0.350388, 0.845795, 1.9356, 2.98192, 3.99558, 4.99899, 0.953383,
1.0622, 1.17178, 1.2817, 1.39166
],
"dims": [3, 5],
"type": "float32"
}
]
}
]
}
]
Loading

0 comments on commit de44c0c

Please sign in to comment.