DriverTrac/venv/lib/python3.12/site-packages/onnx/defs/math/utils.h
2025-11-28 09:08:33 +05:30

54 lines
1.5 KiB
C++

/*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <string>
#include "onnx/defs/schema.h"
#include "onnx/defs/shape_inference.h"
#include "onnx/defs/tensor_proto_util.h"
#include "onnx/onnx_pb.h"
namespace ONNX_NAMESPACE {
namespace defs {
namespace math {
namespace utils {
std::function<void(OpSchema&)> TopKOpGenerator(const std::vector<std::string>& allowed_types);
template <typename T>
T GetScalarValueFromTensor(const ONNX_NAMESPACE::TensorProto* t) {
if (t == nullptr) {
return T{};
}
auto data_type = t->data_type();
switch (data_type) {
case ONNX_NAMESPACE::TensorProto::FLOAT:
return static_cast<T>(ONNX_NAMESPACE::ParseData<float>(t).at(0));
case ONNX_NAMESPACE::TensorProto::DOUBLE:
return static_cast<T>(ONNX_NAMESPACE::ParseData<double>(t).at(0));
case ONNX_NAMESPACE::TensorProto::INT32:
return static_cast<T>(ONNX_NAMESPACE::ParseData<int32_t>(t).at(0));
case ONNX_NAMESPACE::TensorProto::INT64:
return static_cast<T>(ONNX_NAMESPACE::ParseData<int64_t>(t).at(0));
default:
fail_shape_inference("Unsupported input data type of ", data_type);
}
}
void MatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx, int input1Idx, int input2Idx);
void QLinearMatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx);
const char* QLinearMatMulDoc();
int MathOpTwoIntegers(const std::string& op_type, int a, int b);
} // namespace utils
} // namespace math
} // namespace defs
} // namespace ONNX_NAMESPACE