Skip to content

Commit

Permalink
Enable empty list of values as attribute (#5559)
Browse files Browse the repository at this point in the history
This PR makes two fixes:

* The existing checker does not allow an attribute to have an empty list
(of ints, or floats, or strings, etc.) as a value. The PR removes this
check. It is better for op-specific checks to check for this where
required.
* There are similar checks in the shape-inference method of the Constant
op, which are also removed.

This allows a Constant op to construct an empty 1D tensor of ints or
floats, for example, conveniently.

---------

Signed-off-by: Ganesan Ramalingam <grama@microsoft.com>
  • Loading branch information
gramalingam committed Sep 26, 2023
1 parent 108fa8d commit d14f721
Show file tree
Hide file tree
Showing 6 changed files with 164 additions and 358 deletions.
111 changes: 2 additions & 109 deletions onnx/defs/generator/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <cmath>

#include "onnx/defs/function.h"
#include "onnx/defs/generator/utils.h"
#include "onnx/defs/schema.h"

namespace ONNX_NAMESPACE {
Expand Down Expand Up @@ -57,115 +58,7 @@ ONNX_OPERATOR_SET_SCHEMA(
false)
.Output(0, "output", "Output tensor containing the same value of the provided tensor.", "T")
.TypeConstraint("T", OpSchema::all_tensor_types_ir9(), "Constrain input and output types to all tensor types.")
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
auto* value = ctx.getAttribute("value");
auto* sparse_value = ctx.getAttribute("sparse_value");
auto* value_int = ctx.getAttribute("value_int");
auto* value_ints = ctx.getAttribute("value_ints");
auto* value_float = ctx.getAttribute("value_float");
auto* value_floats = ctx.getAttribute("value_floats");
auto* value_string = ctx.getAttribute("value_string");
auto* value_strings = ctx.getAttribute("value_strings");

std::vector<bool> non_null_attr = {
(nullptr != value),
(nullptr != sparse_value),
(nullptr != value_int),
(nullptr != value_ints),
(nullptr != value_float),
(nullptr != value_floats),
(nullptr != value_string),
(nullptr != value_strings)};
if (std::count(non_null_attr.begin(), non_null_attr.end(), true) != 1) {
fail_shape_inference(
"One and only one of the attributes 'value', 'value_*' or 'sparse_value' must be specified for a Constant node.");
}

if (nullptr != value) {
// OpSchema::Verify check ensures that the attribute value has_t():
const TensorProto& tensor_proto = value->t();
updateOutputElemType(ctx, 0, tensor_proto.data_type());
updateOutputShape(ctx, 0, tensor_proto);
return;
}

if (nullptr != value_int) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_int->has_i()) {
fail_shape_inference("Attribute 'value_int' expect an integer.")
}
updateOutputElemType(ctx, 0, TensorProto::INT64);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_ints) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_ints->ints_size() < 1) {
fail_shape_inference("Attribute 'value_ints' expect a list of integers.");
}
updateOutputElemType(ctx, 0, TensorProto::INT64);
appendDim(getOutputShape(ctx, 0), value_ints->ints_size());
return;
}

if (nullptr != value_float) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_float->has_f()) {
fail_shape_inference("Attribute 'value_float' expect a float.");
}
updateOutputElemType(ctx, 0, TensorProto::FLOAT);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_floats) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_floats->floats_size() < 1) {
fail_shape_inference("Attribute 'value_floats' expect a list of floats.");
}
updateOutputElemType(ctx, 0, TensorProto::FLOAT);
appendDim(getOutputShape(ctx, 0), value_floats->floats_size());
return;
}

if (nullptr != value_string) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_string->has_s()) {
fail_shape_inference("Attribute 'value_string' expect a string.");
}
updateOutputElemType(ctx, 0, TensorProto::STRING);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_strings) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_strings->strings_size() < 1) {
fail_shape_inference("Attribute 'value_strings' expect a list of strings.");
}
updateOutputElemType(ctx, 0, TensorProto::STRING);
appendDim(getOutputShape(ctx, 0), value_strings->strings_size());
return;
}

if (nullptr != sparse_value) {
// OpSchema::Verify check ensures that the attribute value
// has_sparse_tensor():
const SparseTensorProto& sparse = sparse_value->sparse_tensor();
// checker.cc::check_sparse_tensor checks that the sparse-value is
// well-formed
updateOutputElemType(ctx, 0, sparse.values().data_type());
auto* output_shape = getOutputShape(ctx, 0);
for (int i = 0; i < sparse.dims_size(); ++i)
appendDim(output_shape, sparse.dims(i));
return;
}

fail_shape_inference(
"TypeAndShapeInferenceFunction implementation incomplete: "
"this line should never be reached.");
}));
.TypeAndShapeInferenceFunction(ConstantOpInference));

static const char* ConstantOfShape_ver20_doc = R"DOC(
Generate a tensor with given value and shape.
Expand Down
223 changes: 3 additions & 220 deletions onnx/defs/generator/old.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <algorithm>
#include <functional>

#include "onnx/defs/generator/utils.h"
#include "onnx/defs/schema.h"

namespace ONNX_NAMESPACE {
Expand All @@ -13,116 +14,6 @@ This operator produces a constant tensor. Exactly one of the provided attributes
or value_* must be specified.
)DOC";

void ConstantInference(InferenceContext& ctx) {
auto* value = ctx.getAttribute("value");
auto* sparse_value = ctx.getAttribute("sparse_value");
auto* value_int = ctx.getAttribute("value_int");
auto* value_ints = ctx.getAttribute("value_ints");
auto* value_float = ctx.getAttribute("value_float");
auto* value_floats = ctx.getAttribute("value_floats");
auto* value_string = ctx.getAttribute("value_string");
auto* value_strings = ctx.getAttribute("value_strings");

std::vector<bool> non_null_attr = {
(nullptr != value),
(nullptr != sparse_value),
(nullptr != value_int),
(nullptr != value_ints),
(nullptr != value_float),
(nullptr != value_floats),
(nullptr != value_string),
(nullptr != value_strings)};
if (std::count(non_null_attr.begin(), non_null_attr.end(), true) != 1) {
fail_shape_inference(
"One and only one of the attributes 'value', 'value_*' or 'sparse_value' must be specified for a Constant node.");
}

if (nullptr != value) {
// OpSchema::Verify check ensures that the attribute value has_t():
const TensorProto& tensor_proto = value->t();
updateOutputElemType(ctx, 0, tensor_proto.data_type());
updateOutputShape(ctx, 0, tensor_proto);
return;
}

if (nullptr != value_int) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_int->has_i()) {
fail_shape_inference("Attribute 'value_int' expect an integer.")
}
updateOutputElemType(ctx, 0, TensorProto::INT64);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_ints) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_ints->ints_size() < 1) {
fail_shape_inference("Attribute 'value_ints' expect a list of integers.");
}
updateOutputElemType(ctx, 0, TensorProto::INT64);
appendDim(getOutputShape(ctx, 0), value_ints->ints_size());
return;
}

if (nullptr != value_float) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_float->has_f()) {
fail_shape_inference("Attribute 'value_float' expect a float.");
}
updateOutputElemType(ctx, 0, TensorProto::FLOAT);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_floats) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_floats->floats_size() < 1) {
fail_shape_inference("Attribute 'value_floats' expect a list of floats.");
}
updateOutputElemType(ctx, 0, TensorProto::FLOAT);
appendDim(getOutputShape(ctx, 0), value_floats->floats_size());
return;
}

if (nullptr != value_string) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_string->has_s()) {
fail_shape_inference("Attribute 'value_string' expect a string.");
}
updateOutputElemType(ctx, 0, TensorProto::STRING);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_strings) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_strings->strings_size() < 1) {
fail_shape_inference("Attribute 'value_strings' expect a list of strings.");
}
updateOutputElemType(ctx, 0, TensorProto::STRING);
appendDim(getOutputShape(ctx, 0), value_strings->strings_size());
return;
}

if (nullptr != sparse_value) {
// OpSchema::Verify check ensures that the attribute value
// has_sparse_tensor():
const SparseTensorProto& sparse = sparse_value->sparse_tensor();
// checker.cc::check_sparse_tensor checks that the sparse-value is
// well-formed
updateOutputElemType(ctx, 0, sparse.values().data_type());
auto* output_shape = getOutputShape(ctx, 0);
for (int i = 0; i < sparse.dims_size(); ++i)
appendDim(output_shape, sparse.dims(i));
return;
}

fail_shape_inference(
"TypeAndShapeInferenceFunction implementation incomplete: "
"this line should never be reached.");
}

ONNX_OPERATOR_SET_SCHEMA(
Constant,
13,
Expand Down Expand Up @@ -166,7 +57,7 @@ ONNX_OPERATOR_SET_SCHEMA(
false)
.Output(0, "output", "Output tensor containing the same value of the provided tensor.", "T")
.TypeConstraint("T", OpSchema::all_tensor_types_ir4(), "Constrain input and output types to all tensor types.")
.TypeAndShapeInferenceFunction(ConstantInference));
.TypeAndShapeInferenceFunction(ConstantOpInference));

static const char* Constant_ver12_doc = R"DOC(
This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,
Expand Down Expand Up @@ -216,115 +107,7 @@ ONNX_OPERATOR_SET_SCHEMA(
false)
.Output(0, "output", "Output tensor containing the same value of the provided tensor.", "T")
.TypeConstraint("T", OpSchema::all_tensor_types(), "Constrain input and output types to all tensor types.")
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
auto* value = ctx.getAttribute("value");
auto* sparse_value = ctx.getAttribute("sparse_value");
auto* value_int = ctx.getAttribute("value_int");
auto* value_ints = ctx.getAttribute("value_ints");
auto* value_float = ctx.getAttribute("value_float");
auto* value_floats = ctx.getAttribute("value_floats");
auto* value_string = ctx.getAttribute("value_string");
auto* value_strings = ctx.getAttribute("value_strings");

std::vector<bool> non_null_attr = {
(nullptr != value),
(nullptr != sparse_value),
(nullptr != value_int),
(nullptr != value_ints),
(nullptr != value_float),
(nullptr != value_floats),
(nullptr != value_string),
(nullptr != value_strings)};
if (std::count(non_null_attr.begin(), non_null_attr.end(), true) != 1) {
fail_shape_inference(
"One and only one of the attributes 'value', 'value_*' or 'sparse_value' must be specified for a Constant node.");
}

if (nullptr != value) {
// OpSchema::Verify check ensures that the attribute value has_t():
const TensorProto& tensor_proto = value->t();
updateOutputElemType(ctx, 0, tensor_proto.data_type());
updateOutputShape(ctx, 0, tensor_proto);
return;
}

if (nullptr != value_int) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_int->has_i()) {
fail_shape_inference("Attribute 'value_int' expect an integer.");
}
updateOutputElemType(ctx, 0, TensorProto::INT64);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_ints) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_ints->ints_size() < 1) {
fail_shape_inference("Attribute 'value_ints' expect a list of integers.");
}
updateOutputElemType(ctx, 0, TensorProto::INT64);
appendDim(getOutputShape(ctx, 0), value_ints->ints_size());
return;
}

if (nullptr != value_float) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_float->has_f()) {
fail_shape_inference("Attribute 'value_float' expect a float.");
}
updateOutputElemType(ctx, 0, TensorProto::FLOAT);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_floats) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_floats->floats_size() < 1) {
fail_shape_inference("Attribute 'value_floats' expect a list of floats.");
}
updateOutputElemType(ctx, 0, TensorProto::FLOAT);
appendDim(getOutputShape(ctx, 0), value_floats->floats_size());
return;
}

if (nullptr != value_string) {
// OpSchema::Verify check ensures that the attribute value has_i():
if (!value_string->has_s()) {
fail_shape_inference("Attribute 'value_string' expect a string.");
}
updateOutputElemType(ctx, 0, TensorProto::STRING);
updateOutputShape(ctx, 0, TensorShapeProto());
return;
}

if (nullptr != value_strings) {
// OpSchema::Verify check ensures that the attribute value has ints.
if (value_strings->strings_size() < 1) {
fail_shape_inference("Attribute 'value_strings' expect a list of strings.");
}
updateOutputElemType(ctx, 0, TensorProto::STRING);
appendDim(getOutputShape(ctx, 0), value_strings->strings_size());
return;
}

if (nullptr != sparse_value) {
// OpSchema::Verify check ensures that the attribute value
// has_sparse_tensor():
const SparseTensorProto& sparse = sparse_value->sparse_tensor();
// checker.cc::check_sparse_tensor checks that the sparse-value is
// well-formed
updateOutputElemType(ctx, 0, sparse.values().data_type());
auto* output_shape = getOutputShape(ctx, 0);
for (int i = 0; i < sparse.dims_size(); ++i)
appendDim(output_shape, sparse.dims(i));
return;
}

fail_shape_inference(
"TypeAndShapeInferenceFunction implementation incomplete: "
"this line should never be reached.");
}));
.TypeAndShapeInferenceFunction(ConstantOpInference));

static const char* Constant_ver1_doc = R"DOC(A constant tensor.)DOC";

Expand Down

0 comments on commit d14f721

Please sign in to comment.