From 7c1f09c762b1802ce3b330f88dde77e60e161a25 Mon Sep 17 00:00:00 2001 From: ks6088ts Date: Tue, 13 Aug 2024 10:00:24 +0900 Subject: [PATCH] refactor UI --- apps/2_streamlit_chat/main.py | 10 ++- apps/4_streamlit_chat_history/main.py | 11 +++- apps/7_streamlit_chat_rag/main.py | 10 ++- apps/8_streamlit_azure_openai_batch/main.py | 66 ++++++++++++------- .../99_streamlit_examples/pages/1_File_Q&A.py | 10 ++- .../pages/2_Image_Q&A.py | 10 ++- .../pages/3_Camera_Q&A.py | 10 ++- .../pages/4_Translate_text.py | 10 ++- .../pages/5_Explain_data.py | 10 ++- .../pages/6_Speech_to_text.py | 34 +++++----- .../pages/7_Text_to_speech.py | 11 ++-- .../pages/8_Create_image.py | 15 ++--- .../pages/9_Visualize_location.py | 25 +++++-- .../test.jsonl | 0 14 files changed, 155 insertions(+), 77 deletions(-) rename {apps/8_streamlit_azure_openai_batch => datasets}/test.jsonl (100%) diff --git a/apps/2_streamlit_chat/main.py b/apps/2_streamlit_chat/main.py index d4cbc30..bfe7514 100644 --- a/apps/2_streamlit_chat/main.py +++ b/apps/2_streamlit_chat/main.py @@ -34,11 +34,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/2_streamlit_chat/main.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + st.title("2_streamlit_chat") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() if "messages" not in st.session_state: st.session_state["messages"] = [ @@ -53,7 +57,7 @@ st.chat_message(msg["role"]).write(msg["content"]) # Receive user input -if prompt := st.chat_input(): +if prompt := st.chat_input(disabled=not is_configured()): client = AzureOpenAI( api_key=azure_openai_api_key, api_version=azure_openai_api_version, diff --git a/apps/4_streamlit_chat_history/main.py b/apps/4_streamlit_chat_history/main.py index 2bc64c3..eafbb83 100644 --- a/apps/4_streamlit_chat_history/main.py +++ b/apps/4_streamlit_chat_history/main.py @@ -66,10 +66,15 @@ def store_chat_history(container: ContainerProxy): "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/4_streamlit_chat_history/main.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + st.title("4_streamlit_chat_history") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: + +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.write(f"Session ID: {get_session_id()}") @@ -86,7 +91,7 @@ def store_chat_history(container: ContainerProxy): st.chat_message(msg["role"]).write(msg["content"]) # Receive user input -if prompt := st.chat_input(): +if prompt := st.chat_input(disabled=not is_configured()): client = AzureOpenAI( api_key=azure_openai_api_key, api_version=azure_openai_api_version, diff --git a/apps/7_streamlit_chat_rag/main.py b/apps/7_streamlit_chat_rag/main.py index b43627d..26ffdec 100644 --- a/apps/7_streamlit_chat_rag/main.py +++ b/apps/7_streamlit_chat_rag/main.py @@ -57,9 +57,13 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/7_streamlit_chat_rag/main.py)" -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() def get_session_id(): @@ -121,7 +125,7 @@ def main(): for msg in st.session_state["memory"].chat_memory.messages: st.chat_message(msg.type).write(msg.content) - if prompt := st.chat_input(placeholder="Type your message here..."): + if prompt := st.chat_input(placeholder="Type your message here...", disabled=not is_configured()): st.chat_message("user").write(prompt) with st.chat_message("assistant"): diff --git a/apps/8_streamlit_azure_openai_batch/main.py b/apps/8_streamlit_azure_openai_batch/main.py index 2863835..1a2b2fb 100644 --- a/apps/8_streamlit_azure_openai_batch/main.py +++ b/apps/8_streamlit_azure_openai_batch/main.py @@ -36,34 +36,44 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/8_streamlit_azure_openai_batch/main.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + +def get_client(): + return AzureOpenAI( + api_key=azure_openai_api_key, + api_version=azure_openai_api_version, + azure_endpoint=azure_openai_endpoint, + ) + + st.title("8_streamlit_azure_openai_batch") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() # --------------- # Upload batch file # --------------- st.header("Upload batch file") st.info("Upload a file in JSON lines format (.jsonl)") -client = AzureOpenAI( - api_key=azure_openai_api_key, - api_version=azure_openai_api_version, - azure_endpoint=azure_openai_endpoint, -) uploaded_file = st.file_uploader("Upload an input file in JSON lines format", type=("jsonl")) if uploaded_file: bytes_data = uploaded_file.read() st.write(bytes_data.decode().split("\n")) - submit_button = st.button("Submit", key="submit") - if submit_button: + if st.button( + "Submit", + key="submit", + disabled=not is_configured(), + ): temp_file_path = "tmp.jsonl" with open(temp_file_path, "wb") as f: f.write(bytes_data) with st.spinner("Uploading..."): try: - response = client.files.create( + response = get_client().files.create( # FIXME: hardcoded for now, use uploaded_file file=open(temp_file_path, "rb"), purpose="batch", @@ -83,11 +93,14 @@ key="track_file_id", help="Enter the file ID to track the file upload status", ) -track_button = st.button("Track") -if track_file_id != "" and track_button: +if st.button( + "Track", + key="track", + disabled=not track_file_id or not is_configured(), +): with st.spinner("Tracking..."): try: - response = client.files.retrieve(track_file_id) + response = get_client().files.retrieve(track_file_id) st.write(response.model_dump()) st.write(f"status: {response.status}") except Exception as e: @@ -104,11 +117,14 @@ key="batch_file_id", help="Enter the file ID to track the file upload status", ) -batch_button = st.button("Create batch job") -if batch_file_id != "" and batch_button: +if st.button( + "Create batch job", + key="create", + disabled=not batch_file_id or not is_configured(), +): with st.spinner("Creating..."): try: - response = client.batches.create( + response = get_client().batches.create( input_file_id=batch_file_id, endpoint="/chat/completions", completion_window="24h", @@ -128,11 +144,14 @@ key="track_batch_job_id", help="Enter the batch job ID to track the job progress", ) -track_batch_job_button = st.button("Track batch job") -if track_batch_job_id != "" and track_batch_job_button: +if st.button( + "Track batch job", + key="track_batch_job", + disabled=not track_batch_job_id or not is_configured(), +): with st.spinner("Tracking..."): try: - response = client.batches.retrieve(track_batch_job_id) + response = get_client().batches.retrieve(track_batch_job_id) st.write(response.model_dump()) st.write(f"status: {response.status}") st.write(f"output_file_id: {response.output_file_id}") @@ -150,11 +169,14 @@ key="retrieve_batch_job_id", help="Enter the batch job ID to retrieve the output file", ) -retrieve_batch_job_button = st.button("Retrieve batch job output file") -if output_file_id != "" and retrieve_batch_job_button: +if st.button( + "Retrieve batch job output file", + key="retrieve_batch_job", + disabled=not output_file_id or not is_configured(), +): with st.spinner("Retrieving..."): try: - file_response = client.files.content(output_file_id) + file_response = get_client().files.content(output_file_id) raw_responses = file_response.text.strip().split("\n") for raw_response in raw_responses: diff --git a/apps/99_streamlit_examples/pages/1_File_Q&A.py b/apps/99_streamlit_examples/pages/1_File_Q&A.py index 2d1aaaf..3c0a694 100644 --- a/apps/99_streamlit_examples/pages/1_File_Q&A.py +++ b/apps/99_streamlit_examples/pages/1_File_Q&A.py @@ -34,11 +34,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/1_File_Q&A.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + st.title("File Q&A") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("Upload a file and ask a question. AI will answer the question.") @@ -49,7 +53,7 @@ disabled=not uploaded_file, ) -if uploaded_file and question: +if uploaded_file and question and is_configured(): article = uploaded_file.read().decode() client = AzureOpenAI( diff --git a/apps/99_streamlit_examples/pages/2_Image_Q&A.py b/apps/99_streamlit_examples/pages/2_Image_Q&A.py index 5ba764d..442be89 100644 --- a/apps/99_streamlit_examples/pages/2_Image_Q&A.py +++ b/apps/99_streamlit_examples/pages/2_Image_Q&A.py @@ -35,11 +35,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/2_Image_Q&A.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + st.title("Image Q&A") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("Upload an image and ask a question. AI will answer the question.") @@ -60,7 +64,7 @@ disabled=not uploaded_file, ) -if uploaded_file and question: +if uploaded_file and question and is_configured(): encoded_image = base64.b64encode(uploaded_file.read()).decode() client = AzureOpenAI( diff --git a/apps/99_streamlit_examples/pages/3_Camera_Q&A.py b/apps/99_streamlit_examples/pages/3_Camera_Q&A.py index e9990ed..05b2c77 100644 --- a/apps/99_streamlit_examples/pages/3_Camera_Q&A.py +++ b/apps/99_streamlit_examples/pages/3_Camera_Q&A.py @@ -35,11 +35,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/3_Camera_Q&A.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + st.title("Camera Q&A") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("Take a picture and ask a question. AI will answer the question.") @@ -50,7 +54,7 @@ disabled=not img_file_buffer, ) -if img_file_buffer and question: +if img_file_buffer and question and is_configured(): encoded_image = base64.b64encode(img_file_buffer.getvalue()).decode() client = AzureOpenAI( diff --git a/apps/99_streamlit_examples/pages/4_Translate_text.py b/apps/99_streamlit_examples/pages/4_Translate_text.py index f2de4fb..f5f72c5 100644 --- a/apps/99_streamlit_examples/pages/4_Translate_text.py +++ b/apps/99_streamlit_examples/pages/4_Translate_text.py @@ -34,11 +34,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/4_Translate_text.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + st.title("Translate text") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("This is a sample to translate text.") @@ -89,7 +93,7 @@ def translate(target: str, input: str) -> str: ) with row1_right: - translate_button = st.button("Translate") + translate_button = st.button("Translate", disabled=not is_configured()) # 2nd row row2_left, row2_right = st.columns(2) diff --git a/apps/99_streamlit_examples/pages/5_Explain_data.py b/apps/99_streamlit_examples/pages/5_Explain_data.py index ea71dcf..da0c725 100644 --- a/apps/99_streamlit_examples/pages/5_Explain_data.py +++ b/apps/99_streamlit_examples/pages/5_Explain_data.py @@ -38,11 +38,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/5_Explain_data.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_gpt_model + + st.title("Explain data") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_gpt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("This is a sample to explain data.") @@ -104,7 +108,7 @@ def explain_data(input: str) -> str: use_container_width=True, ) -explain_button = st.button("Explain data") +explain_button = st.button("Explain data", disabled=not is_configured()) if explain_button: with st.spinner("Numerical data analysis..."): diff --git a/apps/99_streamlit_examples/pages/6_Speech_to_text.py b/apps/99_streamlit_examples/pages/6_Speech_to_text.py index 6da0e5a..2151856 100644 --- a/apps/99_streamlit_examples/pages/6_Speech_to_text.py +++ b/apps/99_streamlit_examples/pages/6_Speech_to_text.py @@ -35,11 +35,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/6_Speech_to_text.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_stt_model + + st.title("Speech to text") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_stt_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("This is a sample to convert speech to text.") @@ -64,17 +68,17 @@ azure_endpoint=azure_openai_endpoint, ) - if st.button("Convert"): - with st.spinner("Converting..."): - response = client.audio.transcriptions.create( - model=azure_openai_stt_model, - file=uploaded_file, - response_format="text", - ) - st.write(response) - transcript_encoded = base64.b64encode(response.encode()).decode() - # Generate a link to download the result - st.markdown( - f'Download Result', - unsafe_allow_html=True, +if st.button("Convert", disabled=not uploaded_file or not is_configured()): + with st.spinner("Converting..."): + response = client.audio.transcriptions.create( + model=azure_openai_stt_model, + file=uploaded_file, + response_format="text", ) + st.write(response) + transcript_encoded = base64.b64encode(response.encode()).decode() + # Generate a link to download the result + st.markdown( + f'Download Result', + unsafe_allow_html=True, + ) diff --git a/apps/99_streamlit_examples/pages/7_Text_to_speech.py b/apps/99_streamlit_examples/pages/7_Text_to_speech.py index 3dd2ced..eb4160c 100644 --- a/apps/99_streamlit_examples/pages/7_Text_to_speech.py +++ b/apps/99_streamlit_examples/pages/7_Text_to_speech.py @@ -34,11 +34,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/7_Text_to_speech.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_tts_model + + st.title("Text to speech") -if not azure_openai_api_key or not azure_openai_endpoint or not azure_openai_api_version or not azure_openai_tts_model: +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("This is a sample to convert text to speech.") @@ -60,8 +64,7 @@ ), ) -convert_button = st.button("Convert") -if convert_button and content and voice_option: +if st.button("Convert", disabled=not content or not is_configured()): client = AzureOpenAI( api_key=azure_openai_api_key, api_version=azure_openai_api_version, diff --git a/apps/99_streamlit_examples/pages/8_Create_image.py b/apps/99_streamlit_examples/pages/8_Create_image.py index 4a00da0..0c07f5e 100644 --- a/apps/99_streamlit_examples/pages/8_Create_image.py +++ b/apps/99_streamlit_examples/pages/8_Create_image.py @@ -35,16 +35,15 @@ "[Azure OpenAI Studio](https://oai.azure.com/resource/overview)" "[View the source code](https://github.com/ks6088ts-labs/workshop-azure-openai/blob/main/apps/99_streamlit_examples/pages/8_Create_image.py)" + +def is_configured(): + return azure_openai_api_key and azure_openai_endpoint and azure_openai_api_version and azure_openai_dalle_model + + st.title("Create image") -if ( - not azure_openai_api_key - or not azure_openai_endpoint - or not azure_openai_api_version - or not azure_openai_dalle_model -): +if not is_configured(): st.warning("Please fill in the required fields at the sidebar.") - st.stop() st.info("Create an image from a text description.") @@ -53,7 +52,7 @@ placeholder="Please describe the content of the image", ) -if description: +if st.button("Create image", disabled=not description or not is_configured()): client = AzureOpenAI( api_key=azure_openai_api_key, api_version=azure_openai_api_version, diff --git a/apps/99_streamlit_examples/pages/9_Visualize_location.py b/apps/99_streamlit_examples/pages/9_Visualize_location.py index 0c7b292..0a11088 100644 --- a/apps/99_streamlit_examples/pages/9_Visualize_location.py +++ b/apps/99_streamlit_examples/pages/9_Visualize_location.py @@ -12,11 +12,28 @@ st.title("Visualize location") st.info("This is a sample to visualize location.") - uploaded_file = st.file_uploader("Upload an article", type=("csv")) + if uploaded_file: df = pd.read_csv(uploaded_file) - st.map( - data=df, - size=1, + st.write("Loaded data") +else: + # Sample data + df = pd.DataFrame( + { + "lat": [ + 35.681236, + 35.689487, + ], + "lon": [ + 139.767125, + 139.691706, + ], + } ) + st.write("Sample data") +st.write(df) +st.map( + data=df, + size=1, +) diff --git a/apps/8_streamlit_azure_openai_batch/test.jsonl b/datasets/test.jsonl similarity index 100% rename from apps/8_streamlit_azure_openai_batch/test.jsonl rename to datasets/test.jsonl