File size: 1,298 Bytes
7fcbd90 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
{
"_name_or_path": "openai/clip-vit-base-patch32",
"architectures": [
"CLIPForImageClassification"
],
"id2label": {
"0": "A",
"1": "B",
"2": "C",
"3": "D",
"4": "E",
"5": "F",
"6": "G",
"7": "H",
"8": "I",
"9": "J",
"10": "K",
"11": "L",
"12": "M",
"13": "N",
"14": "O",
"15": "P",
"16": "Q",
"17": "R",
"18": "S",
"19": "T",
"20": "U",
"21": "V",
"22": "W",
"23": "X",
"24": "Y",
"25": "Z"
},
"initializer_factor": 1.0,
"label2id": {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"G": 6,
"H": 7,
"I": 8,
"J": 9,
"K": 10,
"L": 11,
"M": 12,
"N": 13,
"O": 14,
"P": 15,
"Q": 16,
"R": 17,
"S": 18,
"T": 19,
"U": 20,
"V": 21,
"W": 22,
"X": 23,
"Y": 24,
"Z": 25
},
"logit_scale_init_value": 2.6592,
"model_type": "clip",
"problem_type": "single_label_classification",
"projection_dim": 512,
"text_config": {
"bos_token_id": 0,
"dropout": 0.0,
"eos_token_id": 2,
"model_type": "clip_text_model"
},
"torch_dtype": "float32",
"transformers_version": "4.44.2",
"vision_config": {
"dropout": 0.0,
"model_type": "clip_vision_model"
}
}
|