Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion onnxscript/function_libs/torch_lib/ops/fft.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def aten__fft_c2r(
dft_length=last_dim_size,
axis=dimension,
inverse=True,
onesided=False,
onesided=True,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a way to implement this with how the dft op is defined currently? I think the current implementation has issues with normalization, but I couldn't figure out what went wrong. You expertise is appreciated!

Given the onnx update is not merged and implemented yet, we are not able to incorporate the change to torchlib at the moment as we treat it as UB (unless the current behavior is actually correct in runtimes?).

)
transformed = _fftn_onnx_normalization(
transformed,
Expand Down
24 changes: 24 additions & 0 deletions onnxscript/onnx_opset/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
# pylint: disable=W0221,W0222,R0901,W0237
# mypy: disable-error-code=override
# ruff: noqa: N801,E741,RUF036,D214,D402,D405,D411,D412,D416,D417
# --------------------------------------------------------------------------

from __future__ import annotations
Expand Down Expand Up @@ -39,11 +40,16 @@
from onnxscript.onnx_opset._impl.opset22 import Opset22
from onnxscript.onnx_opset._impl.opset23 import Opset23
from onnxscript.onnx_opset._impl.opset24 import Opset24
from onnxscript.onnx_opset._impl.opset25 import Opset25
from onnxscript.onnx_opset._impl.opset26 import Opset26
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml3 import Opset_ai_onnx_ml3
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml4 import Opset_ai_onnx_ml4
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml5 import Opset_ai_onnx_ml5
from onnxscript.onnx_opset._impl.opset_ai_onnx_preview_training1 import (
Opset_ai_onnx_preview_training1,
)
from onnxscript.values import Opset

__all__ = [
Expand Down Expand Up @@ -72,11 +78,14 @@
"opset22",
"opset23",
"opset24",
"opset25",
"opset26",
"opset_ai_onnx_ml1",
"opset_ai_onnx_ml2",
"opset_ai_onnx_ml3",
"opset_ai_onnx_ml4",
"opset_ai_onnx_ml5",
"opset_ai_onnx_preview_training1",
]


Expand Down Expand Up @@ -110,11 +119,14 @@
opset22 = Opset22()
opset23 = Opset23()
opset24 = Opset24()
opset25 = Opset25()
opset26 = Opset26()
opset_ai_onnx_ml1 = Opset_ai_onnx_ml1()
opset_ai_onnx_ml2 = Opset_ai_onnx_ml2()
opset_ai_onnx_ml3 = Opset_ai_onnx_ml3()
opset_ai_onnx_ml4 = Opset_ai_onnx_ml4()
opset_ai_onnx_ml5 = Opset_ai_onnx_ml5()
opset_ai_onnx_preview_training1 = Opset_ai_onnx_preview_training1()
all_opsets: Mapping[Tuple[str, int], Opset] = {
(
"",
Expand Down Expand Up @@ -212,6 +224,14 @@
"",
24,
): opset24,
(
"",
25,
): opset25,
(
"",
26,
): opset26,
(
"ai.onnx.ml",
1,
Expand All @@ -232,4 +252,8 @@
"ai.onnx.ml",
5,
): opset_ai_onnx_ml5,
(
"ai.onnx.preview.training",
1,
): opset_ai_onnx_preview_training1,
}
137 changes: 31 additions & 106 deletions onnxscript/onnx_opset/_impl/opset1.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
# --------------------------------------------------------------------------
# pylint: disable=W0221,W0222,R0901,W0237
# mypy: disable-error-code=override
# ruff: noqa: D214, D402, D405, D411, D416, D417
# ruff: noqa: N801,E741,RUF036,D214,D402,D405,D411,D412,D416,D417
# --------------------------------------------------------------------------

from __future__ import annotations
Expand Down Expand Up @@ -397,18 +397,7 @@ def BatchNormalization(
)

T2_Cast: TypeAlias = Union[
BOOL,
DOUBLE,
FLOAT,
FLOAT16,
INT16,
INT32,
INT64,
INT8,
UINT16,
UINT32,
UINT64,
UINT8,
BOOL, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8
]

def Cast(self, input: T1_Cast, *, to: str) -> T2_Cast:
Expand Down Expand Up @@ -847,11 +836,7 @@ def Dropout(
T_Elu = TypeVar("T_Elu", DOUBLE, FLOAT, FLOAT16)

def Elu(
self,
X: T_Elu,
*,
alpha: float = 1.0,
consumed_inputs: Optional[Sequence[int]] = None,
self, X: T_Elu, *, alpha: float = 1.0, consumed_inputs: Optional[Sequence[int]] = None
) -> T_Elu:
r"""[🌐 Elu(1)](https://onnx.ai/onnx/operators/onnx__Elu.html#elu-1 "Online Documentation")

Expand All @@ -873,9 +858,7 @@ def Elu(
schema = get_schema("Elu", 1, "")
op = Op(self, "Elu", schema)
return op(
*self._prepare_inputs(schema, X),
alpha=alpha,
consumed_inputs=consumed_inputs,
*self._prepare_inputs(schema, X), alpha=alpha, consumed_inputs=consumed_inputs
)

T_Equal = TypeVar("T_Equal", BOOL, INT32, INT64)
Expand Down Expand Up @@ -1354,12 +1337,7 @@ def GlobalMaxPool(self, X: T_GlobalMaxPool) -> T_GlobalMaxPool:
T1_Greater: TypeAlias = BOOL

def Greater(
self,
A: T_Greater,
B: T_Greater,
*,
axis: Optional[int] = None,
broadcast: int = 0,
self, A: T_Greater, B: T_Greater, *, axis: Optional[int] = None, broadcast: int = 0
) -> T1_Greater:
r"""[🌐 Greater(1)](https://onnx.ai/onnx/operators/onnx__Greater.html#greater-1 "Online Documentation")

Expand Down Expand Up @@ -1624,11 +1602,7 @@ def LRN(
schema = get_schema("LRN", 1, "")
op = Op(self, "LRN", schema)
return op(
*self._prepare_inputs(schema, X),
alpha=alpha,
beta=beta,
bias=bias,
size=size,
*self._prepare_inputs(schema, X), alpha=alpha, beta=beta, bias=bias, size=size
)

T_LSTM = TypeVar("T_LSTM", DOUBLE, FLOAT, FLOAT16)
Expand Down Expand Up @@ -1847,9 +1821,7 @@ def LeakyRelu(
schema = get_schema("LeakyRelu", 1, "")
op = Op(self, "LeakyRelu", schema)
return op(
*self._prepare_inputs(schema, X),
alpha=alpha,
consumed_inputs=consumed_inputs,
*self._prepare_inputs(schema, X), alpha=alpha, consumed_inputs=consumed_inputs
)

T_Less = TypeVar("T_Less", DOUBLE, FLOAT, FLOAT16)
Expand Down Expand Up @@ -1962,11 +1934,7 @@ def LogSoftmax(self, input: T_LogSoftmax, *, axis: int = 1) -> T_LogSoftmax:
)

def Loop(
self,
M: Optional[I_Loop],
cond: Optional[B_Loop],
*v_initial: V_Loop,
body: GraphProto,
self, M: Optional[I_Loop], cond: Optional[B_Loop], *v_initial: V_Loop, body: GraphProto
) -> V_Loop:
r"""[🌐 Loop(1)](https://onnx.ai/onnx/operators/onnx__Loop.html#loop-1 "Online Documentation")

Expand Down Expand Up @@ -2524,11 +2492,7 @@ def Or(self, A: T_Or, B: T_Or, *, axis: Optional[int] = None, broadcast: int = 0
T_PRelu = TypeVar("T_PRelu", DOUBLE, FLOAT, FLOAT16)

def PRelu(
self,
X: T_PRelu,
slope: T_PRelu,
*,
consumed_inputs: Optional[Sequence[int]] = None,
self, X: T_PRelu, slope: T_PRelu, *, consumed_inputs: Optional[Sequence[int]] = None
) -> T_PRelu:
r"""[🌐 PRelu(1)](https://onnx.ai/onnx/operators/onnx__PRelu.html#prelu-1 "Online Documentation")

Expand Down Expand Up @@ -2602,10 +2566,7 @@ def Pad(
schema = get_schema("Pad", 1, "")
op = Op(self, "Pad", schema)
return op(
*self._prepare_inputs(schema, data),
mode=mode,
paddings=paddings,
value=value,
*self._prepare_inputs(schema, data), mode=mode, paddings=paddings, value=value
)

T_Pow = TypeVar("T_Pow", DOUBLE, FLOAT, FLOAT16)
Expand Down Expand Up @@ -3013,11 +2974,7 @@ def RandomUniformLike(
schema = get_schema("RandomUniformLike", 1, "")
op = Op(self, "RandomUniformLike", schema)
return op(
*self._prepare_inputs(schema, input),
dtype=dtype,
high=high,
low=low,
seed=seed,
*self._prepare_inputs(schema, input), dtype=dtype, high=high, low=low, seed=seed
)

T_Reciprocal = TypeVar("T_Reciprocal", DOUBLE, FLOAT, FLOAT16)
Expand Down Expand Up @@ -3046,11 +3003,7 @@ def Reciprocal(
T_ReduceL1 = TypeVar("T_ReduceL1", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)

def ReduceL1(
self,
data: T_ReduceL1,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceL1, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceL1:
r"""[🌐 ReduceL1(1)](https://onnx.ai/onnx/operators/onnx__ReduceL1.html#reducel1-1 "Online Documentation")

Expand Down Expand Up @@ -3080,11 +3033,7 @@ def ReduceL1(
T_ReduceL2 = TypeVar("T_ReduceL2", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)

def ReduceL2(
self,
data: T_ReduceL2,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceL2, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceL2:
r"""[🌐 ReduceL2(1)](https://onnx.ai/onnx/operators/onnx__ReduceL2.html#reducel2-1 "Online Documentation")

Expand Down Expand Up @@ -3116,11 +3065,7 @@ def ReduceL2(
)

def ReduceLogSum(
self,
data: T_ReduceLogSum,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceLogSum, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceLogSum:
r"""[🌐 ReduceLogSum(1)](https://onnx.ai/onnx/operators/onnx__ReduceLogSum.html#reducelogsum-1 "Online Documentation")

Expand Down Expand Up @@ -3186,11 +3131,7 @@ def ReduceLogSumExp(
T_ReduceMax = TypeVar("T_ReduceMax", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)

def ReduceMax(
self,
data: T_ReduceMax,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceMax, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceMax:
r"""[🌐 ReduceMax(1)](https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax-1 "Online Documentation")

Expand Down Expand Up @@ -3222,11 +3163,7 @@ def ReduceMax(
)

def ReduceMean(
self,
data: T_ReduceMean,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceMean, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceMean:
r"""[🌐 ReduceMean(1)](https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-1 "Online Documentation")

Expand Down Expand Up @@ -3256,11 +3193,7 @@ def ReduceMean(
T_ReduceMin = TypeVar("T_ReduceMin", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)

def ReduceMin(
self,
data: T_ReduceMin,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceMin, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceMin:
r"""[🌐 ReduceMin(1)](https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin-1 "Online Documentation")

Expand Down Expand Up @@ -3292,11 +3225,7 @@ def ReduceMin(
)

def ReduceProd(
self,
data: T_ReduceProd,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceProd, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceProd:
r"""[🌐 ReduceProd(1)](https://onnx.ai/onnx/operators/onnx__ReduceProd.html#reduceprod-1 "Online Documentation")

Expand Down Expand Up @@ -3326,11 +3255,7 @@ def ReduceProd(
T_ReduceSum = TypeVar("T_ReduceSum", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)

def ReduceSum(
self,
data: T_ReduceSum,
*,
axes: Optional[Sequence[int]] = None,
keepdims: int = 1,
self, data: T_ReduceSum, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
) -> T_ReduceSum:
r"""[🌐 ReduceSum(1)](https://onnx.ai/onnx/operators/onnx__ReduceSum.html#reducesum-1 "Online Documentation")

Expand Down Expand Up @@ -3445,9 +3370,7 @@ def Reshape(
schema = get_schema("Reshape", 1, "")
op = Op(self, "Reshape", schema)
return op(
*self._prepare_inputs(schema, data),
consumed_inputs=consumed_inputs,
shape=shape,
*self._prepare_inputs(schema, data), consumed_inputs=consumed_inputs, shape=shape
)

T_Selu = TypeVar("T_Selu", DOUBLE, FLOAT, FLOAT16)
Expand Down Expand Up @@ -4036,9 +3959,16 @@ def Transpose(
r"""[🌐 Transpose(1)](https://onnx.ai/onnx/operators/onnx__Transpose.html#transpose-1 "Online Documentation")


Transpose the input tensor similar to numpy.transpose. For example, when
perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape
will be (2, 1, 3).
Returns a transpose of the input tensor. (Similar to `numpy.transpose`).
The optional attribute `perm` must be a permutation of the dimensions of
the input tensor. Axis `i` of the output tensor corresponds to the axis
`perm[i]` of the input tensor.
For example, when perm=(1, 0, 2), given an input tensor of shape (1, 2, 3),
the output shape will be (2, 1, 3).
When perm=(1, 2, 0), given an input tensor of shape (1, 2, 3),
the output shape will be (2, 3, 1).
If the attribute `perm` is omitted, its default value is `(n-1, ..., 0)`,
where `n` is the rank of the input tensor.


Args:
Expand Down Expand Up @@ -4095,12 +4025,7 @@ def Unsqueeze(self, data: T_Unsqueeze, *, axes: Sequence[int]) -> T_Unsqueeze:
T_Upsample = TypeVar("T_Upsample", BOOL, DOUBLE, FLOAT, FLOAT16, INT32, INT64)

def Upsample(
self,
X: T_Upsample,
*,
height_scale: float,
mode: str = "nearest",
width_scale: float,
self, X: T_Upsample, *, height_scale: float, mode: str = "nearest", width_scale: float
) -> T_Upsample:
r"""[🌐 Upsample(1)](https://onnx.ai/onnx/operators/onnx__Upsample.html#upsample-1 "Online Documentation")

Expand Down
2 changes: 1 addition & 1 deletion onnxscript/onnx_opset/_impl/opset10.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
# --------------------------------------------------------------------------
# pylint: disable=W0221,W0222,R0901,W0237
# mypy: disable-error-code=override
# ruff: noqa: D402
# ruff: noqa: N801,E741,RUF036,D214,D402,D405,D411,D412,D416,D417
# --------------------------------------------------------------------------

from __future__ import annotations
Expand Down
Loading