Nymbo commited on
Commit
4f7ce90
·
verified ·
1 Parent(s): d60bf98

adding better error logs

Browse files
Files changed (1) hide show
  1. app.py +21 -2
app.py CHANGED
@@ -6,11 +6,15 @@ import os
6
 
7
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
8
 
 
 
9
  client = OpenAI(
10
  base_url="https://api-inference.huggingface.co/v1/",
11
  api_key=ACCESS_TOKEN,
12
  )
13
 
 
 
14
  def respond(
15
  message,
16
  history: list[tuple[str, str]],
@@ -19,19 +23,27 @@ def respond(
19
  temperature,
20
  top_p,
21
  ):
 
 
 
 
 
22
  messages = [{"role": "system", "content": system_message}]
23
 
24
  for val in history:
25
  if val[0]:
26
  messages.append({"role": "user", "content": val[0]})
 
27
  if val[1]:
28
  messages.append({"role": "assistant", "content": val[1]})
 
29
 
30
  messages.append({"role": "user", "content": message})
31
 
32
  response = ""
 
33
 
34
- for message in client.chat.completions.create(
35
  model="PowerInfer/SmallThinker-3B-Preview",
36
  max_tokens=max_tokens,
37
  stream=True,
@@ -40,12 +52,16 @@ def respond(
40
  messages=messages,
41
  ):
42
  token = message.choices[0].delta.content
43
-
44
  response += token
45
  yield response
 
 
46
 
47
  chatbot = gr.Chatbot(height=600)
48
 
 
 
49
  demo = gr.ChatInterface(
50
  respond,
51
  additional_inputs=[
@@ -65,5 +81,8 @@ demo = gr.ChatInterface(
65
  chatbot=chatbot,
66
  theme="Nymbo/Nymbo_Theme",
67
  )
 
 
68
  if __name__ == "__main__":
 
69
  demo.launch()
 
6
 
7
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
8
 
9
+ print("Access token loaded.")
10
+
11
  client = OpenAI(
12
  base_url="https://api-inference.huggingface.co/v1/",
13
  api_key=ACCESS_TOKEN,
14
  )
15
 
16
+ print("OpenAI client initialized.")
17
+
18
  def respond(
19
  message,
20
  history: list[tuple[str, str]],
 
23
  temperature,
24
  top_p,
25
  ):
26
+ print(f"Received message: {message}")
27
+ print(f"History: {history}")
28
+ print(f"System message: {system_message}")
29
+ print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
30
+
31
  messages = [{"role": "system", "content": system_message}]
32
 
33
  for val in history:
34
  if val[0]:
35
  messages.append({"role": "user", "content": val[0]})
36
+ print(f"Added user message to context: {val[0]}")
37
  if val[1]:
38
  messages.append({"role": "assistant", "content": val[1]})
39
+ print(f"Added assistant message to context: {val[1]}")
40
 
41
  messages.append({"role": "user", "content": message})
42
 
43
  response = ""
44
+ print("Sending request to OpenAI API.")
45
 
46
+ for message in client.chat.completions.create(
47
  model="PowerInfer/SmallThinker-3B-Preview",
48
  max_tokens=max_tokens,
49
  stream=True,
 
52
  messages=messages,
53
  ):
54
  token = message.choices[0].delta.content
55
+ print(f"Received token: {token}")
56
  response += token
57
  yield response
58
+
59
+ print("Completed response generation.")
60
 
61
  chatbot = gr.Chatbot(height=600)
62
 
63
+ print("Chatbot interface created.")
64
+
65
  demo = gr.ChatInterface(
66
  respond,
67
  additional_inputs=[
 
81
  chatbot=chatbot,
82
  theme="Nymbo/Nymbo_Theme",
83
  )
84
+ print("Gradio interface initialized.")
85
+
86
  if __name__ == "__main__":
87
+ print("Launching the demo application.")
88
  demo.launch()