ctn8176 commited on
Commit
727c299
1 Parent(s): a7f4470

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -14
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
- from datasets import load_dataset
5
 
6
  model_name = "Writer/palmyra-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -9,18 +9,47 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
  model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
10
 
11
  def get_movie_info(movie_title):
12
- # Load the IMDb dataset
13
- imdb = load_dataset("imdb")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- # Search for the movie in the IMDb dataset
16
- results = imdb['title'].filter(lambda x: movie_title.lower() in x.lower())
17
 
18
- # Check if any results are found
19
- if len(results) > 0:
20
- movie = results[0]
21
- return f"Title: {movie['title']}, Year: {movie['year']}, Genre: {', '.join(movie['genre'])}"
22
- else:
23
- return "Movie not found"
 
 
 
 
 
 
24
 
25
  def generate_response(prompt):
26
  input_text_template = (
@@ -30,7 +59,7 @@ def generate_response(prompt):
30
  "ASSISTANT:"
31
  )
32
 
33
- # Call the get_movie_info function
34
  movie_info = get_movie_info(prompt)
35
 
36
  # Concatenate the movie info with the input template
@@ -51,7 +80,6 @@ def generate_response(prompt):
51
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
52
  return generated_text
53
 
 
54
  iface = gr.Interface(fn=generate_response, inputs="text", outputs="text", live=True)
55
  iface.launch()
56
-
57
-
 
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
+ import requests
5
 
6
  model_name = "Writer/palmyra-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
9
  model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
10
 
11
  def get_movie_info(movie_title):
12
+ api_key = "20e959f0f28e6b3e3de49c50f358538a"
13
+ search_url = f"https://api.themoviedb.org/3/search/movie"
14
+
15
+ # Make a search query to TMDb
16
+ params = {
17
+ "api_key": api_key,
18
+ "query": movie_title,
19
+ "language": "en-US",
20
+ "page": 1,
21
+ }
22
+
23
+ try:
24
+ search_response = requests.get(search_url, params=params)
25
+ search_data = search_response.json()
26
+
27
+ # Check if any results are found
28
+ if search_data.get("results"):
29
+ movie_id = search_data["results"][0]["id"]
30
+
31
+ # Fetch detailed information using the movie ID
32
+ details_url = f"https://api.themoviedb.org/3/movie/{movie_id}"
33
+ details_params = {
34
+ "api_key": api_key,
35
+ "language": "en-US",
36
+ }
37
 
38
+ details_response = requests.get(details_url, params=details_params)
39
+ details_data = details_response.json()
40
 
41
+ # Extract relevant information
42
+ title = details_data.get("title", "Unknown Title")
43
+ year = details_data.get("release_date", "Unknown Year")[:4]
44
+ genre = ", ".join(genre["name"] for genre in details_data.get("genres", []))
45
+
46
+ return f"Title: {title}, Year: {year}, Genre: {genre}"
47
+
48
+ else:
49
+ return "Movie not found"
50
+
51
+ except Exception as e:
52
+ return f"Error: {e}"
53
 
54
  def generate_response(prompt):
55
  input_text_template = (
 
59
  "ASSISTANT:"
60
  )
61
 
62
+ # Call the get_movie_info function to enrich the response
63
  movie_info = get_movie_info(prompt)
64
 
65
  # Concatenate the movie info with the input template
 
80
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
81
  return generated_text
82
 
83
+ # Create Gradio Interface
84
  iface = gr.Interface(fn=generate_response, inputs="text", outputs="text", live=True)
85
  iface.launch()