Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions docs/OperatorKernels.md
Original file line number Diff line number Diff line change
Expand Up @@ -242,8 +242,8 @@ Do not modify directly.*
|||[9, 12]|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|||[1, 8]|**T** = tensor(double), tensor(float)|
|MatMulInteger|*in* A:**T1**<br> *in* B:**T2**<br> *in* a_zero_point:**T1**<br> *in* b_zero_point:**T2**<br> *out* Y:**T3**|10+|**T1** = tensor(int8), tensor(uint8)<br/> **T2** = tensor(int8), tensor(uint8)<br/> **T3** = tensor(int32)|
|Max|*in* data_0:**T**<br> *out* max:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|Max|*in* data_0:**T**<br> *out* max:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)|
|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)|
|||[8, 11]|**T** = tensor(double), tensor(float)|
|||[6, 7]|**T** = tensor(float)|
|MaxPool|*in* X:**T**<br> *out* Y:**T**<br><br>or<br><br>*in* X:**T**<br> *out* Y:**T**<br> *out* Indices:**I**|22+|**I** = tensor(int64)<br/> **T** = tensor(double), tensor(float), tensor(int8), tensor(uint8)|
Expand All @@ -263,8 +263,8 @@ Do not modify directly.*
|MelWeightMatrix|*in* num_mel_bins:**T1**<br> *in* dft_length:**T1**<br> *in* sample_rate:**T1**<br> *in* lower_edge_hertz:**T2**<br> *in* upper_edge_hertz:**T2**<br> *out* output:**T3**|17+|**T1** = tensor(int32), tensor(int64)<br/> **T2** = tensor(float)<br/> **T3** = tensor(double), tensor(float), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|MemcpyFromHost|*in* X:**T**<br> *out* Y:**T**|1+|**T** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(float8e4m3fn)), seq(tensor(float8e4m3fnuz)), seq(tensor(float8e5m2)), seq(tensor(float8e5m2fnuz)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8)), tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|MemcpyToHost|*in* X:**T**<br> *out* Y:**T**|1+|**T** = seq(tensor(bfloat16)), seq(tensor(bool)), seq(tensor(double)), seq(tensor(float)), seq(tensor(float16)), seq(tensor(float8e4m3fn)), seq(tensor(float8e4m3fnuz)), seq(tensor(float8e5m2)), seq(tensor(float8e5m2fnuz)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(int8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(uint8)), tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|Min|*in* data_0:**T**<br> *out* min:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(uint32), tensor(uint64)|
|Min|*in* data_0:**T**<br> *out* min:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)|
|||12|**T** = tensor(double), tensor(float), tensor(float16), tensor(int32), tensor(int64), tensor(int8), tensor(uint32), tensor(uint64), tensor(uint8)|
|||[8, 11]|**T** = tensor(double), tensor(float)|
|||[6, 7]|**T** = tensor(float)|
|Mod|*in* A:**T**<br> *in* B:**T**<br> *out* C:**T**|13+|**T** = tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
Expand Down
12 changes: 8 additions & 4 deletions onnxruntime/core/providers/cpu/math/element_wise_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,16 @@ namespace op_kernel_type_control {
ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 8, Input, 0, float, double);

ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 12, Input, 0,
float, double, MLFloat16, int32_t, uint32_t, int64_t, uint64_t);
float, double, MLFloat16, int8_t, int32_t, uint32_t,
int64_t, uint8_t, uint64_t);
ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES(kCpuExecutionProvider, kOnnxDomain, Max, 12, Input, 0,
int32_t, int64_t);

// Min
ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 8, Input, 0, float, double);
ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 12, Input, 0,
float, double, MLFloat16, int32_t, uint32_t, int64_t, uint64_t);
float, double, MLFloat16, int8_t, int32_t, uint32_t,
int64_t, uint8_t, uint64_t);
ORT_SPECIFY_OP_KERNEL_ARG_REQUIRED_TYPES(kCpuExecutionProvider, kOnnxDomain, Min, 12, Input, 0,
int32_t, int64_t);

Expand Down Expand Up @@ -989,7 +991,8 @@ Status Min_8::Compute(OpKernelContext* context) const {
return MinMaxMLFloat16<true>(*this, context);
break;
default:
utils::MLTypeCallDispatcher<float, double, int32_t, uint32_t, int64_t, uint64_t>
utils::MLTypeCallDispatcher<float, double, int8_t, int32_t, uint32_t,
int64_t, uint8_t, uint64_t>
t_disp(dt_type);
return t_disp.InvokeRet<Status, ComputeImpl>(*this, context);
}
Expand Down Expand Up @@ -1055,7 +1058,8 @@ Status Max_8::Compute(OpKernelContext* context) const {
return MinMaxMLFloat16<false>(*this, context);
break;
default:
utils::MLTypeCallDispatcher<float, double, int32_t, uint32_t, int64_t, uint64_t>
utils::MLTypeCallDispatcher<float, double, int8_t, int32_t, uint32_t,
int64_t, uint8_t, uint64_t>
t_disp(dt_type);
return t_disp.InvokeRet<Status, ComputeImpl>(*this, context);
}
Expand Down
68 changes: 68 additions & 0 deletions onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3700,6 +3700,74 @@ TEST(MathOpTest, Equal_multidirectional_broadcastAB_bool) {
test.Run();
}

TEST(MathOpTest, Max_12_Int8) {
OpTester test("Max", 12);
test.AddInput<int8_t>("data_0", {1, 3},
{1, 2, 3});
test.AddInput<int8_t>("data_2", {3, 3},
{10, 20, 30,
40, 50, 60,
70, 80, 90});
test.AddInput<int8_t>("data_1", {3, 1},
{-1, -2, 127});
test.AddOutput<int8_t>("max", {3, 3},
{10, 20, 30,
40, 50, 60,
127, 127, 127});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider});
}

TEST(MathOpTest, Max_12_UInt8) {
OpTester test("Max", 12);
test.AddInput<uint8_t>("data_0", {1, 3},
{1, 20, 30});
test.AddInput<uint8_t>("data_2", {3, 3},
{10, 20, 30,
40, 50, 60,
70, 80, 90});
test.AddInput<uint8_t>("data_1", {3, 1},
{100, 20, 30});
test.AddOutput<uint8_t>("max", {3, 3},
{100, 100, 100,
40, 50, 60,
70, 80, 90});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider});
}

TEST(MathOpTest, Min_12_Int8) {
OpTester test("Min", 12);
test.AddInput<int8_t>("data_0", {1, 3},
{1, 2, 3});
test.AddInput<int8_t>("data_2", {3, 3},
{10, 20, 30,
40, 50, 60,
-70, -80, -90});
test.AddInput<int8_t>("data_1", {3, 1},
{-1, 20, 127});
test.AddOutput<int8_t>("min", {3, 3},
{-1, -1, -1,
1, 2, 3,
-70, -80, -90});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider});
}

TEST(MathOpTest, Min_12_UInt8) {
OpTester test("Min", 12);
test.AddInput<uint8_t>("data_0", {1, 3},
{1, 20, 30});
test.AddInput<uint8_t>("data_2", {3, 3},
{10, 20, 30,
40, 50, 60,
70, 80, 90});
test.AddInput<uint8_t>("data_1", {3, 1},
{1, 20, 30});
test.AddOutput<uint8_t>("min", {3, 3},
{1, 1, 1,
1, 20, 20,
1, 20, 30});
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider});
}

TEST(MathOpTest, Mean_6) {
OpTester test("Mean", 6);
std::vector<int64_t> dims{3, 3};
Expand Down
Loading