whisper-small-bemba2 / quant_config.json
chiyo123's picture
Upload quant_config.json
0bbb142
raw
history blame
2.84 kB
{
"per_channel": false,
"reduce_range": false,
"per_model_config": {
"decoder_model": {
"op_types": [
"Div",
"Concat",
"Gather",
"Less",
"ReduceMean",
"MatMul",
"Erf",
"Add",
"Expand",
"Sub",
"ConstantOfShape",
"Range",
"Sqrt",
"Slice",
"Squeeze",
"Cast",
"Equal",
"Where",
"Constant",
"Pow",
"Softmax",
"Shape",
"Reshape",
"Mul",
"Transpose",
"Unsqueeze"
],
"weight_type": "QInt8"
},
"decoder_model_merged": {
"op_types": [
"Div",
"Gather",
"Concat",
"Less",
"ReduceMean",
"MatMul",
"Erf",
"Add",
"Expand",
"Sub",
"ConstantOfShape",
"Range",
"Sqrt",
"Slice",
"Squeeze",
"Cast",
"Equal",
"Where",
"If",
"Constant",
"Pow",
"Softmax",
"Shape",
"Reshape",
"Mul",
"Transpose",
"Unsqueeze"
],
"weight_type": "QInt8"
},
"decoder_with_past_model": {
"op_types": [
"Div",
"Concat",
"Gather",
"Constant",
"Pow",
"Sub",
"Mul",
"Transpose",
"Add",
"MatMul",
"Softmax",
"Sqrt",
"Slice",
"Shape",
"Reshape",
"ReduceMean",
"Erf",
"Unsqueeze"
],
"weight_type": "QInt8"
},
"encoder_model": {
"op_types": [
"Div",
"Gather",
"Concat",
"Constant",
"Pow",
"Sub",
"Unsqueeze",
"Mul",
"Conv",
"Transpose",
"MatMul",
"Softmax",
"Sqrt",
"Shape",
"Reshape",
"ReduceMean",
"Erf",
"Add"
],
"weight_type": "QUInt8"
}
}
}