Victor Pontis commited on
Commit
e6b0c52
1 Parent(s): a57a060

Update the log

Browse files
Files changed (3) hide show
  1. README.md +0 -11
  2. create_handler.ipynb +4 -2
  3. handler.py +3 -1
README.md CHANGED
@@ -1,14 +1,3 @@
1
- ---
2
- license: mit
3
- tags:
4
- - audio
5
- - automatic-speech-recognition
6
- - endpoints-template
7
- library_name: generic
8
- inference: false
9
- duplicated_from: philschmid/openai-whisper-endpoint
10
- ---
11
-
12
  # OpenAI [Whisper](https://github.com/openai/whisper) Inference Endpoint example
13
 
14
  > Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification.
 
 
 
 
 
 
 
 
 
 
 
 
1
  # OpenAI [Whisper](https://github.com/openai/whisper) Inference Endpoint example
2
 
3
  > Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification.
create_handler.ipynb CHANGED
@@ -106,7 +106,7 @@
106
  },
107
  {
108
  "cell_type": "code",
109
- "execution_count": 5,
110
  "metadata": {},
111
  "outputs": [
112
  {
@@ -150,8 +150,10 @@
150
  " # run inference pipeline\n",
151
  " result = self.model.transcribe(audio_nparray)\n",
152
  "\n",
 
 
153
  " # postprocess the prediction\n",
154
- " return {\"text\": result[\"text\"]}"
155
  ]
156
  },
157
  {
 
106
  },
107
  {
108
  "cell_type": "code",
109
+ "execution_count": 1,
110
  "metadata": {},
111
  "outputs": [
112
  {
 
150
  " # run inference pipeline\n",
151
  " result = self.model.transcribe(audio_nparray)\n",
152
  "\n",
153
+ " print(\"Hi this is a custom log!\")\n",
154
+ "\n",
155
  " # postprocess the prediction\n",
156
+ " return { \"text\": result[\"text\"] }"
157
  ]
158
  },
159
  {
handler.py CHANGED
@@ -28,6 +28,8 @@ class EndpointHandler():
28
 
29
  # run inference pipeline
30
  result = self.model.transcribe(audio_nparray)
 
 
31
 
32
  # postprocess the prediction
33
- return {"text": result["text"]}
 
28
 
29
  # run inference pipeline
30
  result = self.model.transcribe(audio_nparray)
31
+
32
+ print("Hi this is a custom log!")
33
 
34
  # postprocess the prediction
35
+ return { "text": result["text"] }