Feature Extraction
Transformers
Safetensors
vision-encoder-decoder
custom_code
anicolson commited on
Commit
3079483
1 Parent(s): b109bce

Upload model

Browse files
README.md CHANGED
@@ -1,9 +1,9 @@
1
  ---
2
- library_name: transformers
3
- license: apache-2.0
4
  datasets:
5
  - StanfordAIMI/interpret-cxr-test-public
6
  - StanfordAIMI/interpret-cxr-test-hidden
 
 
7
  ---
8
 
9
  # CXRMate-RRG24: Entropy-Augmented Self-Critical Sequence Training for Radiology Report Generation
 
1
  ---
 
 
2
  datasets:
3
  - StanfordAIMI/interpret-cxr-test-public
4
  - StanfordAIMI/interpret-cxr-test-hidden
5
+ library_name: transformers
6
+ license: apache-2.0
7
  ---
8
 
9
  # CXRMate-RRG24: Entropy-Augmented Self-Critical Sequence Training for Radiology Report Generation
config.json CHANGED
@@ -217,5 +217,5 @@
217
  "model_type": "vision-encoder-decoder",
218
  "tie_word_embeddings": false,
219
  "torch_dtype": "float32",
220
- "transformers_version": "4.39.0"
221
  }
 
217
  "model_type": "vision-encoder-decoder",
218
  "tie_word_embeddings": false,
219
  "torch_dtype": "float32",
220
+ "transformers_version": "4.40.2"
221
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 4,
6
- "transformers_version": "4.39.0"
7
  }
 
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 4,
6
+ "transformers_version": "4.40.2"
7
  }
modelling_cxrrg.py CHANGED
@@ -7,9 +7,8 @@ from transformers import PreTrainedTokenizerFast, VisionEncoderDecoderModel
7
  from transformers.configuration_utils import PretrainedConfig
8
  from transformers.modeling_outputs import Seq2SeqLMOutput
9
  from transformers.modeling_utils import PreTrainedModel
10
- from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import (
11
- VisionEncoderDecoderConfig,
12
- )
13
  from transformers.utils import logging
14
 
15
  from .modelling_uniformer import MultiUniFormerWithProjectionHead
 
7
  from transformers.configuration_utils import PretrainedConfig
8
  from transformers.modeling_outputs import Seq2SeqLMOutput
9
  from transformers.modeling_utils import PreTrainedModel
10
+ from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import \
11
+ VisionEncoderDecoderConfig
 
12
  from transformers.utils import logging
13
 
14
  from .modelling_uniformer import MultiUniFormerWithProjectionHead
modelling_uniformer.py CHANGED
@@ -1,7 +1,7 @@
1
  from collections import OrderedDict
2
  from functools import partial
3
- from typing import Optional, Tuple, Union
4
  from math import isqrt
 
5
 
6
  import torch
7
  import torch.nn as nn
 
1
  from collections import OrderedDict
2
  from functools import partial
 
3
  from math import isqrt
4
+ from typing import Optional, Tuple, Union
5
 
6
  import torch
7
  import torch.nn as nn