Spaces:
Running on CPU Upgrade

hysts HF staff commited on
Commit
20d974d
·
1 Parent(s): ecd6841
app_allenai.py CHANGED
@@ -38,7 +38,6 @@ def safe_chat_fn(message, history, client):
38
 
39
 
40
  with gr.Blocks() as demo:
41
-
42
  client = gr.State()
43
 
44
  model_dropdown = gr.Dropdown(
 
38
 
39
 
40
  with gr.Blocks() as demo:
 
41
  client = gr.State()
42
 
43
  model_dropdown = gr.Dropdown(
app_compare.py CHANGED
@@ -1,6 +1,4 @@
1
  import os
2
- import random
3
- from typing import Dict, List
4
 
5
  import google.generativeai as genai
6
  import gradio as gr
@@ -31,9 +29,10 @@ def get_all_models():
31
  ]
32
 
33
 
34
- def generate_discussion_prompt(original_question: str, previous_responses: List[str]) -> str:
35
  """Generate a prompt for models to discuss and build upon previous
36
- responses."""
 
37
  prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
38
 
39
  Previous responses from other AI models:
@@ -49,7 +48,7 @@ Keep your response focused and concise (max 3-4 paragraphs)."""
49
  return prompt
50
 
51
 
52
- def generate_consensus_prompt(original_question: str, discussion_history: List[str]) -> str:
53
  """Generate a prompt for final consensus building."""
54
  return f"""Review this multi-AI discussion about: "{original_question}"
55
 
@@ -65,7 +64,7 @@ As a final synthesizer, please:
65
  Keep the final consensus concise but complete."""
66
 
67
 
68
- def chat_with_openai(model: str, messages: List[Dict], api_key: str | None) -> str:
69
  import openai
70
 
71
  client = openai.OpenAI(api_key=api_key)
@@ -73,14 +72,14 @@ def chat_with_openai(model: str, messages: List[Dict], api_key: str | None) -> s
73
  return response.choices[0].message.content
74
 
75
 
76
- def chat_with_anthropic(messages: List[Dict], api_key: str | None) -> str:
77
  """Chat with Anthropic's Claude model."""
78
  client = Anthropic(api_key=api_key)
79
  response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
80
  return response.content[0].text
81
 
82
 
83
- def chat_with_gemini(messages: List[Dict], api_key: str | None) -> str:
84
  """Chat with Gemini Pro model."""
85
  genai.configure(api_key=api_key)
86
  model = genai.GenerativeModel("gemini-pro")
@@ -96,7 +95,7 @@ def chat_with_gemini(messages: List[Dict], api_key: str | None) -> str:
96
 
97
 
98
  def chat_with_sambanova(
99
- messages: List[Dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
100
  ) -> str:
101
  """Chat with SambaNova's models using their OpenAI-compatible API."""
102
  client = openai.OpenAI(
@@ -105,13 +104,16 @@ def chat_with_sambanova(
105
  )
106
 
107
  response = client.chat.completions.create(
108
- model=model_name, messages=messages, temperature=0.1, top_p=0.1 # Use the specific model name passed in
 
 
 
109
  )
110
  return response.choices[0].message.content
111
 
112
 
113
  def chat_with_hyperbolic(
114
- messages: List[Dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
115
  ) -> str:
116
  """Chat with Hyperbolic's models using their OpenAI-compatible API."""
117
  client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
@@ -132,7 +134,7 @@ def chat_with_hyperbolic(
132
 
133
 
134
  def multi_model_consensus(
135
- question: str, selected_models: List[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
136
  ) -> list[tuple[str, str]]:
137
  if not selected_models:
138
  raise gr.Error("Please select at least one model to chat with.")
@@ -172,7 +174,7 @@ def multi_model_consensus(
172
 
173
  chat_history.append((model, response))
174
  except Exception as e:
175
- chat_history.append((model, f"Error: {str(e)}"))
176
 
177
  progress(1.0, desc="Done!")
178
  return chat_history
@@ -180,9 +182,7 @@ def multi_model_consensus(
180
 
181
  with gr.Blocks() as demo:
182
  gr.Markdown("# Model Response Comparison")
183
- gr.Markdown(
184
- """Select multiple models to compare their responses"""
185
- )
186
 
187
  with gr.Row():
188
  with gr.Column():
 
1
  import os
 
 
2
 
3
  import google.generativeai as genai
4
  import gradio as gr
 
29
  ]
30
 
31
 
32
+ def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
33
  """Generate a prompt for models to discuss and build upon previous
34
+ responses.
35
+ """
36
  prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
37
 
38
  Previous responses from other AI models:
 
48
  return prompt
49
 
50
 
51
+ def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
52
  """Generate a prompt for final consensus building."""
53
  return f"""Review this multi-AI discussion about: "{original_question}"
54
 
 
64
  Keep the final consensus concise but complete."""
65
 
66
 
67
+ def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
68
  import openai
69
 
70
  client = openai.OpenAI(api_key=api_key)
 
72
  return response.choices[0].message.content
73
 
74
 
75
+ def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
76
  """Chat with Anthropic's Claude model."""
77
  client = Anthropic(api_key=api_key)
78
  response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
79
  return response.content[0].text
80
 
81
 
82
+ def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
83
  """Chat with Gemini Pro model."""
84
  genai.configure(api_key=api_key)
85
  model = genai.GenerativeModel("gemini-pro")
 
95
 
96
 
97
  def chat_with_sambanova(
98
+ messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
99
  ) -> str:
100
  """Chat with SambaNova's models using their OpenAI-compatible API."""
101
  client = openai.OpenAI(
 
104
  )
105
 
106
  response = client.chat.completions.create(
107
+ model=model_name,
108
+ messages=messages,
109
+ temperature=0.1,
110
+ top_p=0.1, # Use the specific model name passed in
111
  )
112
  return response.choices[0].message.content
113
 
114
 
115
  def chat_with_hyperbolic(
116
+ messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
117
  ) -> str:
118
  """Chat with Hyperbolic's models using their OpenAI-compatible API."""
119
  client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
 
134
 
135
 
136
  def multi_model_consensus(
137
+ question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
138
  ) -> list[tuple[str, str]]:
139
  if not selected_models:
140
  raise gr.Error("Please select at least one model to chat with.")
 
174
 
175
  chat_history.append((model, response))
176
  except Exception as e:
177
+ chat_history.append((model, f"Error: {e!s}"))
178
 
179
  progress(1.0, desc="Done!")
180
  return chat_history
 
182
 
183
  with gr.Blocks() as demo:
184
  gr.Markdown("# Model Response Comparison")
185
+ gr.Markdown("""Select multiple models to compare their responses""")
 
 
186
 
187
  with gr.Row():
188
  with gr.Column():
app_crew.py CHANGED
@@ -1,8 +1,8 @@
1
- import gradio as gr
2
  import ai_gradio
 
3
 
4
  demo = gr.load(
5
- name='crewai:gpt-4-turbo',
6
- crew_type='article', # or 'support'
7
  src=ai_gradio.registry,
8
- )
 
 
1
  import ai_gradio
2
+ import gradio as gr
3
 
4
  demo = gr.load(
5
+ name="crewai:gpt-4-turbo",
6
+ crew_type="article", # or 'support'
7
  src=ai_gradio.registry,
8
+ )
app_deepseek.py CHANGED
@@ -1,7 +1,7 @@
1
- import gradio as gr
2
  import ai_gradio
 
3
 
4
  demo = gr.load(
5
- name='deepseek:deepseek-chat',
6
  src=ai_gradio.registry,
7
- )
 
 
1
  import ai_gradio
2
+ import gradio as gr
3
 
4
  demo = gr.load(
5
+ name="deepseek:deepseek-chat",
6
  src=ai_gradio.registry,
7
+ )
app_experimental.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  import random
3
- from typing import Dict, List
4
 
5
  import google.generativeai as genai
6
  import gradio as gr
@@ -31,9 +30,10 @@ def get_all_models():
31
  ]
32
 
33
 
34
- def generate_discussion_prompt(original_question: str, previous_responses: List[str]) -> str:
35
  """Generate a prompt for models to discuss and build upon previous
36
- responses."""
 
37
  prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
38
 
39
  Previous responses from other AI models:
@@ -49,7 +49,7 @@ Keep your response focused and concise (max 3-4 paragraphs)."""
49
  return prompt
50
 
51
 
52
- def generate_consensus_prompt(original_question: str, discussion_history: List[str]) -> str:
53
  """Generate a prompt for final consensus building."""
54
  return f"""Review this multi-AI discussion about: "{original_question}"
55
 
@@ -65,7 +65,7 @@ As a final synthesizer, please:
65
  Keep the final consensus concise but complete."""
66
 
67
 
68
- def chat_with_openai(model: str, messages: List[Dict], api_key: str | None) -> str:
69
  import openai
70
 
71
  client = openai.OpenAI(api_key=api_key)
@@ -73,14 +73,14 @@ def chat_with_openai(model: str, messages: List[Dict], api_key: str | None) -> s
73
  return response.choices[0].message.content
74
 
75
 
76
- def chat_with_anthropic(messages: List[Dict], api_key: str | None) -> str:
77
  """Chat with Anthropic's Claude model."""
78
  client = Anthropic(api_key=api_key)
79
  response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
80
  return response.content[0].text
81
 
82
 
83
- def chat_with_gemini(messages: List[Dict], api_key: str | None) -> str:
84
  """Chat with Gemini Pro model."""
85
  genai.configure(api_key=api_key)
86
  model = genai.GenerativeModel("gemini-pro")
@@ -96,7 +96,7 @@ def chat_with_gemini(messages: List[Dict], api_key: str | None) -> str:
96
 
97
 
98
  def chat_with_sambanova(
99
- messages: List[Dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
100
  ) -> str:
101
  """Chat with SambaNova's models using their OpenAI-compatible API."""
102
  client = openai.OpenAI(
@@ -105,13 +105,16 @@ def chat_with_sambanova(
105
  )
106
 
107
  response = client.chat.completions.create(
108
- model=model_name, messages=messages, temperature=0.1, top_p=0.1 # Use the specific model name passed in
 
 
 
109
  )
110
  return response.choices[0].message.content
111
 
112
 
113
  def chat_with_hyperbolic(
114
- messages: List[Dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
115
  ) -> str:
116
  """Chat with Hyperbolic's models using their OpenAI-compatible API."""
117
  client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
@@ -132,7 +135,7 @@ def chat_with_hyperbolic(
132
 
133
 
134
  def multi_model_consensus(
135
- question: str, selected_models: List[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
136
  ) -> list[tuple[str, str]]:
137
  if not selected_models:
138
  raise gr.Error("Please select at least one model to chat with.")
@@ -246,7 +249,7 @@ def multi_model_consensus(
246
  messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
247
  )
248
  except Exception as e:
249
- final_consensus = f"Error getting consensus from {model}: {str(e)}"
250
 
251
  chat_history.append(("Final Consensus", final_consensus))
252
 
 
1
  import os
2
  import random
 
3
 
4
  import google.generativeai as genai
5
  import gradio as gr
 
30
  ]
31
 
32
 
33
+ def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
34
  """Generate a prompt for models to discuss and build upon previous
35
+ responses.
36
+ """
37
  prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
38
 
39
  Previous responses from other AI models:
 
49
  return prompt
50
 
51
 
52
+ def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
53
  """Generate a prompt for final consensus building."""
54
  return f"""Review this multi-AI discussion about: "{original_question}"
55
 
 
65
  Keep the final consensus concise but complete."""
66
 
67
 
68
+ def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
69
  import openai
70
 
71
  client = openai.OpenAI(api_key=api_key)
 
73
  return response.choices[0].message.content
74
 
75
 
76
+ def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
77
  """Chat with Anthropic's Claude model."""
78
  client = Anthropic(api_key=api_key)
79
  response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
80
  return response.content[0].text
81
 
82
 
83
+ def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
84
  """Chat with Gemini Pro model."""
85
  genai.configure(api_key=api_key)
86
  model = genai.GenerativeModel("gemini-pro")
 
96
 
97
 
98
  def chat_with_sambanova(
99
+ messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
100
  ) -> str:
101
  """Chat with SambaNova's models using their OpenAI-compatible API."""
102
  client = openai.OpenAI(
 
105
  )
106
 
107
  response = client.chat.completions.create(
108
+ model=model_name,
109
+ messages=messages,
110
+ temperature=0.1,
111
+ top_p=0.1, # Use the specific model name passed in
112
  )
113
  return response.choices[0].message.content
114
 
115
 
116
  def chat_with_hyperbolic(
117
+ messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
118
  ) -> str:
119
  """Chat with Hyperbolic's models using their OpenAI-compatible API."""
120
  client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
 
135
 
136
 
137
  def multi_model_consensus(
138
+ question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
139
  ) -> list[tuple[str, str]]:
140
  if not selected_models:
141
  raise gr.Error("Please select at least one model to chat with.")
 
249
  messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
250
  )
251
  except Exception as e:
252
+ final_consensus = f"Error getting consensus from {model}: {e!s}"
253
 
254
  chat_history.append(("Final Consensus", final_consensus))
255
 
app_groq.py CHANGED
@@ -1,20 +1,12 @@
1
- import os
2
-
3
  import ai_gradio
4
 
5
  from utils_ai_gradio import get_app
6
 
7
  # Get the Groq models from the registry
8
- GROQ_MODELS_FULL = [
9
- k for k in ai_gradio.registry.keys()
10
- if k.startswith('groq:')
11
- ]
12
 
13
  # Create display names without the prefix
14
- GROQ_MODELS_DISPLAY = [
15
- k.replace('groq:', '')
16
- for k in GROQ_MODELS_FULL
17
- ]
18
 
19
  demo = get_app(
20
  models=GROQ_MODELS_FULL,
@@ -22,7 +14,7 @@ demo = get_app(
22
  src=ai_gradio.registry,
23
  dropdown_label="Select Groq Model",
24
  choices=GROQ_MODELS_DISPLAY,
25
- fill_height=True
26
  )
27
 
28
  if __name__ == "__main__":
 
 
 
1
  import ai_gradio
2
 
3
  from utils_ai_gradio import get_app
4
 
5
  # Get the Groq models from the registry
6
+ GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
 
 
 
7
 
8
  # Create display names without the prefix
9
+ GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
 
 
 
10
 
11
  demo = get_app(
12
  models=GROQ_MODELS_FULL,
 
14
  src=ai_gradio.registry,
15
  dropdown_label="Select Groq Model",
16
  choices=GROQ_MODELS_DISPLAY,
17
+ fill_height=True,
18
  )
19
 
20
  if __name__ == "__main__":
app_groq_coder.py CHANGED
@@ -1,17 +1,12 @@
1
  import ai_gradio
 
2
  from utils_ai_gradio import get_app
3
 
4
  # Get the Groq models but keep their full names for loading
5
- GROQ_MODELS_FULL = [
6
- k for k in ai_gradio.registry.keys()
7
- if k.startswith('groq:')
8
- ]
9
 
10
  # Create display names without the prefix
11
- GROQ_MODELS_DISPLAY = [
12
- k.replace('groq:', '')
13
- for k in GROQ_MODELS_FULL
14
- ]
15
 
16
  # Create and launch the interface using get_app utility
17
  demo = get_app(
@@ -20,7 +15,7 @@ demo = get_app(
20
  dropdown_label="Select Groq Model",
21
  choices=GROQ_MODELS_DISPLAY, # Display names without prefix
22
  fill_height=True,
23
- coder=True
24
  )
25
 
26
 
 
1
  import ai_gradio
2
+
3
  from utils_ai_gradio import get_app
4
 
5
  # Get the Groq models but keep their full names for loading
6
+ GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
 
 
 
7
 
8
  # Create display names without the prefix
9
+ GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
 
 
 
10
 
11
  # Create and launch the interface using get_app utility
12
  demo = get_app(
 
15
  dropdown_label="Select Groq Model",
16
  choices=GROQ_MODELS_DISPLAY, # Display names without prefix
17
  fill_height=True,
18
+ coder=True,
19
  )
20
 
21
 
app_huggingface.py CHANGED
@@ -44,12 +44,11 @@ def safe_chat_fn(message, history, client):
44
  try:
45
  return create_chat_fn(client)(message, history)
46
  except Exception as e:
47
- print(f"Error during chat: {str(e)}")
48
- return f"Error during chat: {str(e)}"
49
 
50
 
51
  with gr.Blocks() as demo:
52
-
53
  client = gr.State()
54
 
55
  model_dropdown = gr.Dropdown(
 
44
  try:
45
  return create_chat_fn(client)(message, history)
46
  except Exception as e:
47
+ print(f"Error during chat: {e!s}")
48
+ return f"Error during chat: {e!s}"
49
 
50
 
51
  with gr.Blocks() as demo:
 
52
  client = gr.State()
53
 
54
  model_dropdown = gr.Dropdown(
app_hyperbolic.py CHANGED
@@ -1,17 +1,12 @@
1
  import ai_gradio
 
2
  from utils_ai_gradio import get_app
3
 
4
  # Get the hyperbolic models but keep their full names for loading
5
- HYPERBOLIC_MODELS_FULL = [
6
- k for k in ai_gradio.registry.keys()
7
- if k.startswith('hyperbolic:')
8
- ]
9
 
10
  # Create display names without the prefix
11
- HYPERBOLIC_MODELS_DISPLAY = [
12
- k.replace('hyperbolic:', '')
13
- for k in HYPERBOLIC_MODELS_FULL
14
- ]
15
 
16
 
17
  # Create and launch the interface using get_app utility
@@ -20,6 +15,5 @@ demo = get_app(
20
  default_model=HYPERBOLIC_MODELS_FULL[-1],
21
  dropdown_label="Select Hyperbolic Model",
22
  choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
23
- fill_height=True
24
  )
25
-
 
1
  import ai_gradio
2
+
3
  from utils_ai_gradio import get_app
4
 
5
  # Get the hyperbolic models but keep their full names for loading
6
+ HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
 
 
 
7
 
8
  # Create display names without the prefix
9
+ HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
 
 
 
10
 
11
 
12
  # Create and launch the interface using get_app utility
 
15
  default_model=HYPERBOLIC_MODELS_FULL[-1],
16
  dropdown_label="Select Hyperbolic Model",
17
  choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
  )
 
app_hyperbolic_coder.py CHANGED
@@ -1,17 +1,12 @@
1
  import ai_gradio
 
2
  from utils_ai_gradio import get_app
3
 
4
  # Get the hyperbolic models but keep their full names for loading
5
- HYPERBOLIC_MODELS_FULL = [
6
- k for k in ai_gradio.registry.keys()
7
- if k.startswith('hyperbolic:')
8
- ]
9
 
10
  # Create display names without the prefix
11
- HYPERBOLIC_MODELS_DISPLAY = [
12
- k.replace('hyperbolic:', '')
13
- for k in HYPERBOLIC_MODELS_FULL
14
- ]
15
 
16
 
17
  # Create and launch the interface using get_app utility
@@ -21,6 +16,5 @@ demo = get_app(
21
  dropdown_label="Select Hyperbolic Model",
22
  choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
23
  fill_height=True,
24
- coder=True
25
  )
26
-
 
1
  import ai_gradio
2
+
3
  from utils_ai_gradio import get_app
4
 
5
  # Get the hyperbolic models but keep their full names for loading
6
+ HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
 
 
 
7
 
8
  # Create display names without the prefix
9
+ HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
 
 
 
10
 
11
 
12
  # Create and launch the interface using get_app utility
 
16
  dropdown_label="Select Hyperbolic Model",
17
  choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
  fill_height=True,
19
+ coder=True,
20
  )
 
app_openai.py CHANGED
@@ -1,18 +1,12 @@
1
  import ai_gradio
2
- from utils_ai_gradio import get_app
3
 
 
4
 
5
  # Get the OpenAI models but keep their full names for loading
6
- OPENAI_MODELS_FULL = [
7
- k for k in ai_gradio.registry.keys()
8
- if k.startswith('openai:')
9
- ]
10
 
11
  # Create display names without the prefix
12
- OPENAI_MODELS_DISPLAY = [
13
- k.replace('openai:', '')
14
- for k in OPENAI_MODELS_FULL
15
- ]
16
 
17
  # Create and launch the interface using get_app utility
18
  demo = get_app(
 
1
  import ai_gradio
 
2
 
3
+ from utils_ai_gradio import get_app
4
 
5
  # Get the OpenAI models but keep their full names for loading
6
+ OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
 
 
 
7
 
8
  # Create display names without the prefix
9
+ OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
 
 
 
10
 
11
  # Create and launch the interface using get_app utility
12
  demo = get_app(
app_openai_coder.py CHANGED
@@ -1,18 +1,12 @@
1
  import ai_gradio
2
- from utils_ai_gradio import get_app
3
 
 
4
 
5
  # Get the OpenAI models but keep their full names for loading
6
- OPENAI_MODELS_FULL = [
7
- k for k in ai_gradio.registry.keys()
8
- if k.startswith('openai:')
9
- ]
10
 
11
  # Create display names without the prefix
12
- OPENAI_MODELS_DISPLAY = [
13
- k.replace('openai:', '')
14
- for k in OPENAI_MODELS_FULL
15
- ]
16
 
17
  # Create and launch the interface using get_app utility
18
  demo = get_app(
 
1
  import ai_gradio
 
2
 
3
+ from utils_ai_gradio import get_app
4
 
5
  # Get the OpenAI models but keep their full names for loading
6
+ OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
 
 
 
7
 
8
  # Create display names without the prefix
9
+ OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
 
 
 
10
 
11
  # Create and launch the interface using get_app utility
12
  demo = get_app(
app_paligemma.py CHANGED
@@ -43,8 +43,8 @@ def safe_chat_fn(message, history, client, system_prompt, temperature, max_token
43
  message, history
44
  )
45
  except Exception as e:
46
- print(f"Error during chat: {str(e)}")
47
- return f"Error during chat: {str(e)}"
48
 
49
 
50
  with gr.Blocks() as demo:
 
43
  message, history
44
  )
45
  except Exception as e:
46
+ print(f"Error during chat: {e!s}")
47
+ return f"Error during chat: {e!s}"
48
 
49
 
50
  with gr.Blocks() as demo:
app_smolagents.py CHANGED
@@ -1,16 +1,11 @@
1
  import ai_gradio
 
2
  from utils_ai_gradio import get_app
3
 
4
- SMOLAGENTS_MODELS_FULL = [
5
- k for k in ai_gradio.registry.keys()
6
- if k.startswith('smolagents:')
7
- ]
8
 
9
 
10
- SMOLAGENTS_MODELS_DISPLAY = [
11
- k.replace('smolagents:', '')
12
- for k in SMOLAGENTS_MODELS_FULL
13
- ]
14
 
15
  demo = get_app(
16
  models=SMOLAGENTS_MODELS_FULL, # Use the full names with prefix
 
1
  import ai_gradio
2
+
3
  from utils_ai_gradio import get_app
4
 
5
+ SMOLAGENTS_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("smolagents:")]
 
 
 
6
 
7
 
8
+ SMOLAGENTS_MODELS_DISPLAY = [k.replace("smolagents:", "") for k in SMOLAGENTS_MODELS_FULL]
 
 
 
9
 
10
  demo = get_app(
11
  models=SMOLAGENTS_MODELS_FULL, # Use the full names with prefix
app_transformers.py CHANGED
@@ -1,9 +1,6 @@
1
  import gradio as gr
2
 
3
- demo = gr.load(
4
- name='akhaliq/phi-4',
5
- src="spaces"
6
- )
7
 
8
  # Disable API access for all functions
9
  if hasattr(demo, "fns"):
@@ -12,7 +9,3 @@ if hasattr(demo, "fns"):
12
 
13
  if __name__ == "__main__":
14
  demo.launch()
15
-
16
-
17
-
18
-
 
1
  import gradio as gr
2
 
3
+ demo = gr.load(name="akhaliq/phi-4", src="spaces")
 
 
 
4
 
5
  # Disable API access for all functions
6
  if hasattr(demo, "fns"):
 
9
 
10
  if __name__ == "__main__":
11
  demo.launch()
 
 
 
 
pyproject.toml CHANGED
@@ -59,6 +59,7 @@ line-length = 119
59
 
60
  [tool.ruff.lint]
61
  select = ["E", "F", "I", "N", "S", "T", "UP", "W"]
 
62
 
63
  [tool.ruff.format]
64
  docstring-code-format = true
 
59
 
60
  [tool.ruff.lint]
61
  select = ["E", "F", "I", "N", "S", "T", "UP", "W"]
62
+ ignore = ["T201"]
63
 
64
  [tool.ruff.format]
65
  docstring-code-format = true
utils.py CHANGED
@@ -1,4 +1,5 @@
1
- from typing import Callable, Dict, Literal, Union
 
2
 
3
  import gradio as gr
4
 
@@ -6,7 +7,7 @@ import gradio as gr
6
  def get_app(
7
  models: list[str],
8
  default_model: str,
9
- src: Union[Callable[[str, str | None], gr.Blocks], Literal["models"], Dict[str, gr.Blocks]],
10
  accept_token: bool = False,
11
  dropdown_label: str = "Select Model",
12
  **kwargs,
@@ -37,4 +38,4 @@ def get_app(
37
  for fn in demo.fns.values():
38
  fn.api_name = False
39
 
40
- return demo
 
1
+ from collections.abc import Callable
2
+ from typing import Literal
3
 
4
  import gradio as gr
5
 
 
7
  def get_app(
8
  models: list[str],
9
  default_model: str,
10
+ src: Callable[[str, str | None], gr.Blocks] | Literal["models"] | dict[str, gr.Blocks],
11
  accept_token: bool = False,
12
  dropdown_label: str = "Select Model",
13
  **kwargs,
 
38
  for fn in demo.fns.values():
39
  fn.api_name = False
40
 
41
+ return demo