agarkovv commited on
Commit
4b74c48
1 Parent(s): 14c70a9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +28 -14
README.md CHANGED
@@ -63,24 +63,38 @@ The model is fine-tuned on specific data (cryptocurrency news and price data) an
63
  To start using the model for predictions, you can follow the example code below:
64
 
65
  ```python
66
- from transformers import AutoModelForCausalLM, AutoTokenizer
67
- import torch
68
 
69
- # Load the fine-tuned model
70
- model_name = "agarkovv/CryptoTrader-LM"
71
- model = AutoModelForCausalLM.from_pretrained(model_name)
72
- tokenizer = AutoTokenizer.from_pretrained(model_name)
73
 
74
- # Example input: news articles and price data
75
- input_text = "[INST]Bitcoin price surges as ETF approval rumors circulate...[/INST]"
76
 
77
- # Tokenize and generate prediction
78
- inputs = tokenizer(input_text, return_tensors="pt")
79
- outputs = model.generate(**inputs)
80
 
81
- # Decode the output for trading decision (buy, sell, or hold)
82
- decision = tokenizer.decode(outputs[0], skip_special_tokens=True)
83
- print(f"Trading decision: {decision}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  ```
85
 
86
  ## Training Details
 
63
  To start using the model for predictions, you can follow the example code below:
64
 
65
  ```python
66
+ from peft import AutoPeftModelForCausalLM
67
+ from transformers import AutoTokenizer
68
 
69
+ from huggingface_hub import login
70
+ login("YOUR TOKEN HERE")
 
 
71
 
 
 
72
 
73
+ PROMPT = "[INST]YOUR PROMPT HERE[/INST]"
74
+ MAX_LENGTH = 32768 # Do not change
75
+ DEVICE = "cpu"
76
 
77
+
78
+ model_id = "agarkovv/CryptoTrader-LM"
79
+ base_model_id = "mistralai/Ministral-8B-Instruct-2410"
80
+
81
+ model = AutoPeftModelForCausalLM.from_pretrained(model_id)
82
+ tokenizer = AutoTokenizer.from_pretrained(base_model_id)
83
+
84
+ model = model.to(DEVICE)
85
+ model.eval()
86
+ inputs = tokenizer(
87
+ PROMPT, return_tensors="pt", padding=False, max_length=MAX_LENGTH, truncation=True
88
+ )
89
+ inputs = {key: value.to(model.device) for key, value in inputs.items()}
90
+
91
+ res = model.generate(
92
+ **inputs,
93
+ use_cache=True,
94
+ max_new_tokens=MAX_LENGTH,
95
+ )
96
+ output = tokenizer.decode(res[0], skip_special_tokens=True)
97
+ print(output)
98
  ```
99
 
100
  ## Training Details