Skip to content

Conversation

@DanielSun11
Copy link
Collaborator

删除了精度监控报错的case:

paddle.Tensor.__sub__(Tensor([64, 1, 49],"float16"), Tensor([64, 49, 1],"float16"), )
paddle.Tensor.divide(Tensor([1, 16, 14, 15, 384],"float16"), Tensor([],"float32"), )
paddle.Tensor.divide(Tensor([1, 16, 70, 56, 96],"float16"), Tensor([],"float32"), )
paddle.Tensor.expand(Tensor([1, 1, 768],"float16"), tuple(112,-1,-1,), )
paddle.incubate.nn.functional.fused_bias_act(Tensor([2, 22016],"int32"), None, act_method="swiglu", compute_dtype="fp16", dequant_scales=Tensor([22016],"float32"), shift=None, smooth=None, quant_scale=0.0009654839523136616, quant_round_type=0, quant_max_bound=127.0, quant_min_bound=-127.0, )
paddle.incubate.nn.functional.fused_bias_act(Tensor([2, 22016],"int32"), None, act_method="swiglu", compute_dtype="fp16", dequant_scales=Tensor([22016],"float32"), shift=None, smooth=None, quant_scale=0.0010981468949466944, quant_round_type=0, quant_max_bound=127.0, quant_min_bound=-127.0, )
paddle.incubate.nn.functional.fused_bias_act(Tensor([2, 22016],"int32"), None, act_method="swiglu", compute_dtype="fp16", dequant_scales=Tensor([22016],"float32"), shift=None, smooth=None, quant_scale=0.0014022786635905504, quant_round_type=0, quant_max_bound=127.0, quant_min_bound=-127.0, )
paddle.incubate.nn.functional.fused_bias_act(Tensor([2, 22016],"int32"), None, act_method="swiglu", compute_dtype="fp16", dequant_scales=Tensor([22016],"float32"), shift=None, smooth=None, quant_scale=0.001479289960116148, quant_round_type=0, quant_max_bound=127.0, quant_min_bound=-127.0, )
paddle.incubate.nn.functional.fused_bias_act(x=Tensor([2, 20, 512],"int32"), bias=Tensor([512],"float16"), dequant_scales=Tensor([512],"float32"), shift=Tensor([256],"float16"), smooth=Tensor([256],"float16"), act_method="geglu", compute_dtype="fp16", quant_scale=0.5, quant_round_type=1, quant_max_bound=127.0, quant_min_bound=-127.0, )
paddle.lerp(Tensor([1, 3],"float64"), Tensor([1, 3],"float64"), Tensor([1, 3],"float64"), )
paddle.nn.functional.normalize(Tensor([1],"float32"), axis=0, epsilon=1e-10, )
paddle.nn.functional.softmax(Tensor([1004, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1028, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1064, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1108, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1136, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1140, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([116, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1176, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([120, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1204, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([124, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([128, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([13, 16, 7, 7],"float16"), axis=-1, )
paddle.nn.functional.softmax(Tensor([1368, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([14, 4, 7, 7],"float16"), )
paddle.nn.functional.softmax(Tensor([144, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1528, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1716, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([1728, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([192, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([200, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([204, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([232, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([252, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([264, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([320, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([344, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([368, 8, 3, 3],"float16"), )
paddle.nn.functional.softmax(Tensor([380, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([384, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([388, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([392, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([4, 8, 16, 16],"float16"), axis=-1, )
paddle.nn.functional.softmax(Tensor([404, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([412, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([440, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([468, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([496, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([500, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([508, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([52, 4, 7, 7],"float16"), )
paddle.nn.functional.softmax(Tensor([524, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([532, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([536, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([540, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([552, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([556, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([560, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([576, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([628, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([64, 2, 1, 128],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([684, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([712, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([724, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([736, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([744, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([756, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([768, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([776, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([792, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([796, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([804, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([808, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([812, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([816, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([832, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([864, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([924, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([928, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([932, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([944, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([948, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([96, 17],"float16"), axis=1, )
paddle.nn.functional.softmax(Tensor([968, 17],"float16"), axis=1, )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[0,1,3,],list[0,3,1,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[0,3,2,],list[2,0,3,1,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[1,0,2,3,],list[],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[1,0,3,],list[2,1,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[1,2,0,3,],list[0,2,1,3,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[1,2,3,0,],list[1,3,0,2,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[1,3,2,0,],list[2,1,0,3,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[2,0,1,3,],list[2,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[2,1,0,3,],list[2,0,3,1,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[2,1,0,3,],list[3,1,2,0,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[2,1,3,0,],list[1,2,3,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[3,0,2,1,],list[0,2,3,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[3,1,0,2,],list[2,3,1,0,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[3,1,2,],list[1,2,3,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[3,1,],list[3,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[3,2,0,],list[3,2,0,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[3,],list[2,1,0,],], )
paddle.tensordot(Tensor([1, 1, 1, 5],"float64"), Tensor([1, 5, 1, 1],"float64"), list[list[3,],list[],], )
paddle.tensordot(Tensor([1, 1, 5, 5],"float64"), Tensor([5, 5, 1, 5],"float64"), list[list[1,0,3,2,],list[2,3,0,1,],], )
paddle.tensordot(Tensor([1, 1, 5, 5],"float64"), Tensor([5, 5, 1, 5],"float64"), list[list[3,2,1,0,],list[0,1,3,2,],], )
paddle.tensordot(Tensor([1, 5, 5, 5],"float64"), Tensor([1, 1, 1, 5],"float64"), list[list[0,3,1,2,],list[3,2,1,0,],], )
paddle.tensordot(Tensor([1, 5, 5, 5],"float64"), Tensor([1, 1, 1, 5],"float64"), list[list[3,2,0,],list[2,0,3,1,],], )
paddle.tensordot(Tensor([2, 3],"float64"), Tensor([2, 3],"float64"), axes=2, )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([1, 1, 1, 1],"float64"), list[list[0,1,3,2,],list[1,3,0,2,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([1, 1, 1, 1],"float64"), list[list[0,2,3,],list[3,2,0,1,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([1, 1, 1, 1],"float64"), list[list[1,0,2,],list[0,1,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([1, 1, 1, 1],"float64"), list[list[1,3,2,0,],list[2,1,0,3,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([1, 1, 1, 1],"float64"), list[list[3,0,2,1,],list[2,1,0,3,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([1, 1, 1, 1],"float64"), list[list[3,2,0,1,],list[3,2,0,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([5, 5, 1, 5],"float64"), list[list[2,0,1,],list[0,1,3,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([5, 5, 1, 5],"float64"), list[list[3,0,2,1,],list[2,1,0,3,],], )
paddle.tensordot(Tensor([5, 5, 5, 1],"float64"), Tensor([5, 5, 1, 5],"float64"), list[list[3,1,0,2,],list[2,3,1,0,],], )
paddle.tensordot(Tensor([5, 5, 5, 5],"float32"), Tensor([5, 5, 5, 5],"float32"), list[list[0,1,3,2,],list[2,3,0,1,],], )
paddle.tensordot(Tensor([5, 5, 5, 5],"float32"), Tensor([5, 5, 5, 5],"float32"), list[list[0,2,3,],list[3,2,0,1,],], )
paddle.tensordot(Tensor([5, 5, 5, 5],"float32"), Tensor([5, 5, 5, 5],"float32"), list[list[1,0,2,3,],list[],], )
paddle.tensordot(Tensor([5, 5, 5, 5],"float32"), Tensor([5, 5, 5, 5],"float32"), list[list[1,3,2,0,],list[3,0,1,2,],], )
paddle.tensordot(Tensor([5, 5, 5, 5],"float64"), Tensor([5, 5, 5, 5],"float64"), list[list[1,0,3,2,],list[2,3,0,1,],], )
paddle.tensordot(Tensor([5, 5, 5, 5],"float64"), Tensor([5, 5, 5, 5],"float64"), list[list[1,2,3,0,],list[],], )

@paddle-bot
Copy link

paddle-bot bot commented Dec 29, 2025

Thanks for your contribution!

@DanielSun11 DanielSun11 merged commit a3c6904 into PFCCLab:main Dec 29, 2025
1 check passed
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

1 participant