diff --git a/deployment/kustomizations/base/cm.yaml b/deployment/kustomizations/base/cm.yaml index 09533868c3..43f4eb0251 100644 --- a/deployment/kustomizations/base/cm.yaml +++ b/deployment/kustomizations/base/cm.yaml @@ -189,7 +189,8 @@ data: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: doc_endpoint: https://docs.getwren.ai diff --git a/docker/config.example.yaml b/docker/config.example.yaml index 61bdef472a..3078e91390 100644 --- a/docker/config.example.yaml +++ b/docker/config.example.yaml @@ -139,7 +139,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: doc_endpoint: https://docs.getwren.ai diff --git a/wren-ai-service/docs/config_examples/config.anthropic.yaml b/wren-ai-service/docs/config_examples/config.anthropic.yaml index 00428f0e4f..3e98269cc7 100644 --- a/wren-ai-service/docs/config_examples/config.anthropic.yaml +++ b/wren-ai-service/docs/config_examples/config.anthropic.yaml @@ -136,7 +136,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.azure.yaml b/wren-ai-service/docs/config_examples/config.azure.yaml index 53b50858a5..0b1844c0e2 100644 --- a/wren-ai-service/docs/config_examples/config.azure.yaml +++ b/wren-ai-service/docs/config_examples/config.azure.yaml @@ -149,7 +149,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.deepseek.yaml b/wren-ai-service/docs/config_examples/config.deepseek.yaml index e311a5d8e6..c69b80856f 100644 --- a/wren-ai-service/docs/config_examples/config.deepseek.yaml +++ b/wren-ai-service/docs/config_examples/config.deepseek.yaml @@ -159,7 +159,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.google_ai_studio.yaml b/wren-ai-service/docs/config_examples/config.google_ai_studio.yaml index 5f33b0659c..66b2dc0e96 100644 --- a/wren-ai-service/docs/config_examples/config.google_ai_studio.yaml +++ b/wren-ai-service/docs/config_examples/config.google_ai_studio.yaml @@ -145,7 +145,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.grok.yaml b/wren-ai-service/docs/config_examples/config.grok.yaml index 31a228a035..aa45a8fb62 100644 --- a/wren-ai-service/docs/config_examples/config.grok.yaml +++ b/wren-ai-service/docs/config_examples/config.grok.yaml @@ -141,7 +141,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.groq.yaml b/wren-ai-service/docs/config_examples/config.groq.yaml index a9905eb016..cf637b4d39 100644 --- a/wren-ai-service/docs/config_examples/config.groq.yaml +++ b/wren-ai-service/docs/config_examples/config.groq.yaml @@ -140,7 +140,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.lm_studio.yaml b/wren-ai-service/docs/config_examples/config.lm_studio.yaml index 9a133d3787..1f20f1f364 100644 --- a/wren-ai-service/docs/config_examples/config.lm_studio.yaml +++ b/wren-ai-service/docs/config_examples/config.lm_studio.yaml @@ -139,7 +139,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.ollama.yaml b/wren-ai-service/docs/config_examples/config.ollama.yaml index 2fc534cb18..92954504ca 100644 --- a/wren-ai-service/docs/config_examples/config.ollama.yaml +++ b/wren-ai-service/docs/config_examples/config.ollama.yaml @@ -137,7 +137,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/docs/config_examples/config.open_router.yaml b/wren-ai-service/docs/config_examples/config.open_router.yaml index eab5b89530..a5d7e133ec 100644 --- a/wren-ai-service/docs/config_examples/config.open_router.yaml +++ b/wren-ai-service/docs/config_examples/config.open_router.yaml @@ -137,7 +137,8 @@ pipes: document_store: qdrant - name: sql_tables_extraction llm: litellm_llm.default - + - name: data_exploration_assistance + llm: litellm_llm.default --- settings: engine_timeout: 30 diff --git a/wren-ai-service/poetry.lock b/wren-ai-service/poetry.lock index 4afefb6733..5406dd5820 100644 --- a/wren-ai-service/poetry.lock +++ b/wren-ai-service/poetry.lock @@ -173,30 +173,6 @@ typing-extensions = ">=4" [package.extras] tz = ["backports.zoneinfo", "tzdata"] -[[package]] -name = "altair" -version = "5.5.0" -description = "Vega-Altair: A declarative statistical visualization library for Python." -optional = false -python-versions = ">=3.9" -files = [ - {file = "altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c"}, - {file = "altair-5.5.0.tar.gz", hash = "sha256:d960ebe6178c56de3855a68c47b516be38640b73fb3b5111c2a9ca90546dd73d"}, -] - -[package.dependencies] -jinja2 = "*" -jsonschema = ">=3.0" -narwhals = ">=1.14.2" -packaging = "*" -typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.14\""} - -[package.extras] -all = ["altair-tiles (>=0.3.0)", "anywidget (>=0.9.0)", "numpy", "pandas (>=1.1.3)", "pyarrow (>=11)", "vega-datasets (>=0.9.0)", "vegafusion[embed] (>=1.6.6)", "vl-convert-python (>=1.7.0)"] -dev = ["duckdb (>=1.0)", "geopandas", "hatch (>=1.13.0)", "ipython[kernel]", "mistune", "mypy", "pandas (>=1.1.3)", "pandas-stubs", "polars (>=0.20.3)", "pyarrow-stubs", "pytest", "pytest-cov", "pytest-xdist[psutil] (>=3.5,<4.0)", "ruff (>=0.6.0)", "types-jsonschema", "types-setuptools"] -doc = ["docutils", "jinja2", "myst-parser", "numpydoc", "pillow (>=9,<10)", "pydata-sphinx-theme (>=0.14.1)", "scipy", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinxext-altair"] -save = ["vl-convert-python (>=1.7.0)"] - [[package]] name = "annotated-types" version = "0.7.0" @@ -784,79 +760,6 @@ files = [ test = ["PyYAML", "mock", "pytest"] yaml = ["PyYAML"] -[[package]] -name = "contourpy" -version = "1.3.1" -description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false -python-versions = ">=3.10" -files = [ - {file = "contourpy-1.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a045f341a77b77e1c5de31e74e966537bba9f3c4099b35bf4c2e3939dd54cdab"}, - {file = "contourpy-1.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:500360b77259914f7805af7462e41f9cb7ca92ad38e9f94d6c8641b089338124"}, - {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2f926efda994cdf3c8d3fdb40b9962f86edbc4457e739277b961eced3d0b4c1"}, - {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adce39d67c0edf383647a3a007de0a45fd1b08dedaa5318404f1a73059c2512b"}, - {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abbb49fb7dac584e5abc6636b7b2a7227111c4f771005853e7d25176daaf8453"}, - {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0cffcbede75c059f535725c1680dfb17b6ba8753f0c74b14e6a9c68c29d7ea3"}, - {file = "contourpy-1.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ab29962927945d89d9b293eabd0d59aea28d887d4f3be6c22deaefbb938a7277"}, - {file = "contourpy-1.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974d8145f8ca354498005b5b981165b74a195abfae9a8129df3e56771961d595"}, - {file = "contourpy-1.3.1-cp310-cp310-win32.whl", hash = "sha256:ac4578ac281983f63b400f7fe6c101bedc10651650eef012be1ccffcbacf3697"}, - {file = "contourpy-1.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:174e758c66bbc1c8576992cec9599ce8b6672b741b5d336b5c74e35ac382b18e"}, - {file = "contourpy-1.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8b974d8db2c5610fb4e76307e265de0edb655ae8169e8b21f41807ccbeec4b"}, - {file = "contourpy-1.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:20914c8c973f41456337652a6eeca26d2148aa96dd7ac323b74516988bea89fc"}, - {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d40d37c1c3a4961b4619dd9d77b12124a453cc3d02bb31a07d58ef684d3d86"}, - {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:113231fe3825ebf6f15eaa8bc1f5b0ddc19d42b733345eae0934cb291beb88b6"}, - {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4dbbc03a40f916a8420e420d63e96a1258d3d1b58cbdfd8d1f07b49fcbd38e85"}, - {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a04ecd68acbd77fa2d39723ceca4c3197cb2969633836ced1bea14e219d077c"}, - {file = "contourpy-1.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c414fc1ed8ee1dbd5da626cf3710c6013d3d27456651d156711fa24f24bd1291"}, - {file = "contourpy-1.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:31c1b55c1f34f80557d3830d3dd93ba722ce7e33a0b472cba0ec3b6535684d8f"}, - {file = "contourpy-1.3.1-cp311-cp311-win32.whl", hash = "sha256:f611e628ef06670df83fce17805c344710ca5cde01edfdc72751311da8585375"}, - {file = "contourpy-1.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:b2bdca22a27e35f16794cf585832e542123296b4687f9fd96822db6bae17bfc9"}, - {file = "contourpy-1.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ffa84be8e0bd33410b17189f7164c3589c229ce5db85798076a3fa136d0e509"}, - {file = "contourpy-1.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805617228ba7e2cbbfb6c503858e626ab528ac2a32a04a2fe88ffaf6b02c32bc"}, - {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade08d343436a94e633db932e7e8407fe7de8083967962b46bdfc1b0ced39454"}, - {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47734d7073fb4590b4a40122b35917cd77be5722d80683b249dac1de266aac80"}, - {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ba94a401342fc0f8b948e57d977557fbf4d515f03c67682dd5c6191cb2d16ec"}, - {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efa874e87e4a647fd2e4f514d5e91c7d493697127beb95e77d2f7561f6905bd9"}, - {file = "contourpy-1.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1bf98051f1045b15c87868dbaea84f92408337d4f81d0e449ee41920ea121d3b"}, - {file = "contourpy-1.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61332c87493b00091423e747ea78200659dc09bdf7fd69edd5e98cef5d3e9a8d"}, - {file = "contourpy-1.3.1-cp312-cp312-win32.whl", hash = "sha256:e914a8cb05ce5c809dd0fe350cfbb4e881bde5e2a38dc04e3afe1b3e58bd158e"}, - {file = "contourpy-1.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:08d9d449a61cf53033612cb368f3a1b26cd7835d9b8cd326647efe43bca7568d"}, - {file = "contourpy-1.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2"}, - {file = "contourpy-1.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5"}, - {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81"}, - {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2"}, - {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7"}, - {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c"}, - {file = "contourpy-1.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3"}, - {file = "contourpy-1.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1"}, - {file = "contourpy-1.3.1-cp313-cp313-win32.whl", hash = "sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82"}, - {file = "contourpy-1.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd"}, - {file = "contourpy-1.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30"}, - {file = "contourpy-1.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751"}, - {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342"}, - {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c"}, - {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f"}, - {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda"}, - {file = "contourpy-1.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242"}, - {file = "contourpy-1.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1"}, - {file = "contourpy-1.3.1-cp313-cp313t-win32.whl", hash = "sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1"}, - {file = "contourpy-1.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546"}, - {file = "contourpy-1.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b457d6430833cee8e4b8e9b6f07aa1c161e5e0d52e118dc102c8f9bd7dd060d6"}, - {file = "contourpy-1.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb76c1a154b83991a3cbbf0dfeb26ec2833ad56f95540b442c73950af2013750"}, - {file = "contourpy-1.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:44a29502ca9c7b5ba389e620d44f2fbe792b1fb5734e8b931ad307071ec58c53"}, - {file = "contourpy-1.3.1.tar.gz", hash = "sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699"}, -] - -[package.dependencies] -numpy = ">=1.23" - -[package.extras] -bokeh = ["bokeh", "selenium"] -docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] -test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] - [[package]] name = "coverage" version = "7.6.10" @@ -931,21 +834,6 @@ files = [ [package.extras] toml = ["tomli"] -[[package]] -name = "cycler" -version = "0.12.1" -description = "Composable style cycles" -optional = false -python-versions = ">=3.8" -files = [ - {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, - {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, -] - -[package.extras] -docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] -tests = ["pytest", "pytest-cov", "pytest-xdist"] - [[package]] name = "dataclasses-json" version = "0.6.7" @@ -1252,20 +1140,6 @@ files = [ [package.extras] tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] -[[package]] -name = "extra-streamlit-components" -version = "0.1.71" -description = "An all-in-one place, to find complex or just natively unavailable components on streamlit." -optional = false -python-versions = ">=3.6" -files = [ - {file = "extra_streamlit_components-0.1.71-py3-none-any.whl", hash = "sha256:c8e6f98446adecd3002756362e50d0669693b7673afaa89cebfced6415cc6bd3"}, - {file = "extra_streamlit_components-0.1.71.tar.gz", hash = "sha256:d18314cf2ed009f95641882b50aa3bdb11b6a0eb6403fb43dbc8af1722419617"}, -] - -[package.dependencies] -streamlit = ">=1.18.0" - [[package]] name = "fastapi" version = "0.115.8" @@ -1367,79 +1241,6 @@ files = [ Flask = ">=1.0.4" Werkzeug = ">=1.0.1" -[[package]] -name = "fonttools" -version = "4.55.8" -description = "Tools to manipulate font files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fonttools-4.55.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d11600f5343092697d7434f3bf77a393c7ae74be206fe30e577b9a195fd53165"}, - {file = "fonttools-4.55.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c96f2506ce1a0beeaa9595f9a8b7446477eb133f40c0e41fc078744c28149f80"}, - {file = "fonttools-4.55.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b5f05ef72e846e9f49ccdd74b9da4309901a4248434c63c1ee9321adcb51d65"}, - {file = "fonttools-4.55.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba45b637da80a262b55b7657aec68da2ac54b8ae7891cd977a5dbe5fd26db429"}, - {file = "fonttools-4.55.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:edcffaeadba9a334c1c3866e275d7dd495465e7dbd296f688901bdbd71758113"}, - {file = "fonttools-4.55.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b9f9fce3c9b2196e162182ec5db8af8eb3acd0d76c2eafe9fdba5f370044e556"}, - {file = "fonttools-4.55.8-cp310-cp310-win32.whl", hash = "sha256:f089e8da0990cfe2d67e81d9cf581ff372b48dc5acf2782701844211cd1f0eb3"}, - {file = "fonttools-4.55.8-cp310-cp310-win_amd64.whl", hash = "sha256:01ea3901b0802fc5f9e854f5aeb5bc27770dd9dd24c28df8f74ba90f8b3f5915"}, - {file = "fonttools-4.55.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:95f5a1d4432b3cea6571f5ce4f4e9b25bf36efbd61c32f4f90130a690925d6ee"}, - {file = "fonttools-4.55.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d20f152de7625a0008ba1513f126daaaa0de3b4b9030aa72dd5c27294992260"}, - {file = "fonttools-4.55.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5a3ff5bb95fd5a3962b2754f8435e6d930c84fc9e9921c51e802dddf40acd56"}, - {file = "fonttools-4.55.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b99d4fd2b6d0a00c7336c8363fccc7a11eccef4b17393af75ca6e77cf93ff413"}, - {file = "fonttools-4.55.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d637e4d33e46619c79d1a6c725f74d71b574cd15fb5bbb9b6f3eba8f28363573"}, - {file = "fonttools-4.55.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0f38bfb6b7a39c4162c3eb0820a0bdf8e3bdd125cd54e10ba242397d15e32439"}, - {file = "fonttools-4.55.8-cp311-cp311-win32.whl", hash = "sha256:acfec948de41cd5e640d5c15d0200e8b8e7c5c6bb82afe1ca095cbc4af1188ee"}, - {file = "fonttools-4.55.8-cp311-cp311-win_amd64.whl", hash = "sha256:604c805b41241b4880e2dc86cf2d4754c06777371c8299799ac88d836cb18c3b"}, - {file = "fonttools-4.55.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:63403ee0f2fa4e1de28e539f8c24f2bdca1d8ecb503fa9ea2d231d9f1e729809"}, - {file = "fonttools-4.55.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:302e1003a760b222f711d5ba6d1ad7fd5f7f713eb872cd6a3eb44390bc9770af"}, - {file = "fonttools-4.55.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e72a7816ff8a759be9ca36ca46934f8ccf4383711ef597d9240306fe1878cb8d"}, - {file = "fonttools-4.55.8-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03c2b50b54e6e8b3564b232e57e8f58be217cf441cf0155745d9e44a76f9c30f"}, - {file = "fonttools-4.55.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7230f7590f9570d26ee903b6a4540274494e200fae978df0d9325b7b9144529"}, - {file = "fonttools-4.55.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:466a78984f0572305c3c48377f4e3f7f4e909f1209f45ef8e7041d5c8a744a56"}, - {file = "fonttools-4.55.8-cp312-cp312-win32.whl", hash = "sha256:243cbfc0b7cb1c307af40e321f8343a48d0a080bc1f9466cf2b5468f776ef108"}, - {file = "fonttools-4.55.8-cp312-cp312-win_amd64.whl", hash = "sha256:a19059aa892676822c1f05cb5a67296ecdfeb267fe7c47d4758f3e8e942c2b2a"}, - {file = "fonttools-4.55.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:332883b6280b9d90d2ba7e9e81be77cf2ace696161e60cdcf40cfcd2b3ed06fa"}, - {file = "fonttools-4.55.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6b8d7c149d47b47de7ec81763396c8266e5ebe2e0b14aa9c3ccf29e52260ab2f"}, - {file = "fonttools-4.55.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4dfae7c94987149bdaa0388e6c937566aa398fa0eec973b17952350a069cff4e"}, - {file = "fonttools-4.55.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0fe12f06169af2fdc642d26a8df53e40adc3beedbd6ffedb19f1c5397b63afd"}, - {file = "fonttools-4.55.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f971aa5f50c22dc4b63a891503624ae2c77330429b34ead32f23c2260c5618cd"}, - {file = "fonttools-4.55.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:708cb17b2590b7f6c6854999df0039ff1140dda9e6f56d67c3599ba6f968fab5"}, - {file = "fonttools-4.55.8-cp313-cp313-win32.whl", hash = "sha256:cfe9cf30f391a0f2875247a3e5e44d8dcb61596e5cf89b360cdffec8a80e9961"}, - {file = "fonttools-4.55.8-cp313-cp313-win_amd64.whl", hash = "sha256:1e10efc8ee10d6f1fe2931d41bccc90cd4b872f2ee4ff21f2231a2c293b2dbf8"}, - {file = "fonttools-4.55.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9b6fcff4dc755b32faff955d989ee26394ddad3a90ea7d558db17a4633c8390c"}, - {file = "fonttools-4.55.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:02c41322e5bdcb484b61b776fcea150215c83619b39c96aa0b44d4fd87bb5574"}, - {file = "fonttools-4.55.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9164f44add0acec0f12fce682824c040dc52e483bfe3838c37142897150c8364"}, - {file = "fonttools-4.55.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2248ebfbcea0d0b3cb459d76a9f67f2eadc10ec0d07e9cadab8777d3f016bf2"}, - {file = "fonttools-4.55.8-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3461347016c94cb42b36caa907e11565878c4c2c375604f3651d11dc06d1ab3e"}, - {file = "fonttools-4.55.8-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:67df1c3935838fb9e56f227d7f506c9043b149a4a3b667bef17929c7a1114d19"}, - {file = "fonttools-4.55.8-cp38-cp38-win32.whl", hash = "sha256:cb121d6dd34625cece32234a5fa0359475bb118838b6b4295ffdb13b935edb04"}, - {file = "fonttools-4.55.8-cp38-cp38-win_amd64.whl", hash = "sha256:285c1ac10c160fbdff6d05358230e66c4f98cbbf271f3ec7eb34e967771543e8"}, - {file = "fonttools-4.55.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8abd135e427d88e461a4833c03cf96cfb9028c78c15d58123291f22398e25492"}, - {file = "fonttools-4.55.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:65cb8f97eed7906dcf19bc2736b70c6239e9d7e77aad7c6110ba7239ae082e81"}, - {file = "fonttools-4.55.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:450c354c04a6e12a3db968e915fe05730f79ff3d39560947ef8ee6eaa2ab2212"}, - {file = "fonttools-4.55.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2232012a1502b2b8ab4c6bc1d3524bfe90238c0c1a50ac94a0a2085aa87a58a5"}, - {file = "fonttools-4.55.8-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d39f0c977639be0f9f5505d4c7c478236737f960c567a35f058649c056e41434"}, - {file = "fonttools-4.55.8-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:de78d6d0dbe32561ce059265437021f4746e56073c4799f0f1095828ae7232bd"}, - {file = "fonttools-4.55.8-cp39-cp39-win32.whl", hash = "sha256:bf4b5b3496ddfdd4e57112e77ec51f1ab388d35ac17322c1248addb2eb0d429a"}, - {file = "fonttools-4.55.8-cp39-cp39-win_amd64.whl", hash = "sha256:ccf8ae02918f431953d338db4d0a675a395faf82bab3a76025582cf32a2f3b7b"}, - {file = "fonttools-4.55.8-py3-none-any.whl", hash = "sha256:07636dae94f7fe88561f9da7a46b13d8e3f529f87fdb221b11d85f91eabceeb7"}, - {file = "fonttools-4.55.8.tar.gz", hash = "sha256:54d481d456dcd59af25d4a9c56b2c4c3f20e9620b261b84144e5950f33e8df17"}, -] - -[package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] -graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "pycairo", "scipy"] -lxml = ["lxml (>=4.0)"] -pathops = ["skia-pathops (>=0.5.0)"] -plot = ["matplotlib"] -repacker = ["uharfbuzz (>=0.23.0)"] -symfont = ["sympy"] -type1 = ["xattr"] -ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.1.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] - [[package]] name = "frozenlist" version = "1.5.0" @@ -1761,38 +1562,6 @@ benchmarks = ["httplib2", "httpx", "requests", "urllib3"] dev = ["dpkt", "pytest", "requests"] examples = ["oauth2"] -[[package]] -name = "gitdb" -version = "4.0.12" -description = "Git Object Database" -optional = false -python-versions = ">=3.7" -files = [ - {file = "gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf"}, - {file = "gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571"}, -] - -[package.dependencies] -smmap = ">=3.0.1,<6" - -[[package]] -name = "gitpython" -version = "3.1.44" -description = "GitPython is a Python library used to interact with Git repositories" -optional = false -python-versions = ">=3.7" -files = [ - {file = "GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110"}, - {file = "gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269"}, -] - -[package.dependencies] -gitdb = ">=4.0.1,<5" - -[package.extras] -doc = ["sphinx (>=7.1.2,<7.2)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"] -test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] - [[package]] name = "google-auth" version = "2.38.0" @@ -2686,95 +2455,6 @@ traitlets = ">=5.3" docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] -[[package]] -name = "kiwisolver" -version = "1.4.8" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.10" -files = [ - {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"}, - {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"}, - {file = "kiwisolver-1.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e"}, - {file = "kiwisolver-1.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751"}, - {file = "kiwisolver-1.4.8-cp310-cp310-win_arm64.whl", hash = "sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271"}, - {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84"}, - {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561"}, - {file = "kiwisolver-1.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67"}, - {file = "kiwisolver-1.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34"}, - {file = "kiwisolver-1.4.8-cp311-cp311-win_arm64.whl", hash = "sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2"}, - {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502"}, - {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31"}, - {file = "kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8"}, - {file = "kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50"}, - {file = "kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476"}, - {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09"}, - {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1"}, - {file = "kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb"}, - {file = "kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2"}, - {file = "kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b"}, - {file = "kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e"}, -] - [[package]] name = "langchain" version = "0.3.17" @@ -3140,63 +2820,6 @@ dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"] tests = ["pytest", "simplejson"] -[[package]] -name = "matplotlib" -version = "3.10.0" -description = "Python plotting package" -optional = false -python-versions = ">=3.10" -files = [ - {file = "matplotlib-3.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2c5829a5a1dd5a71f0e31e6e8bb449bc0ee9dbfb05ad28fc0c6b55101b3a4be6"}, - {file = "matplotlib-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2a43cbefe22d653ab34bb55d42384ed30f611bcbdea1f8d7f431011a2e1c62e"}, - {file = "matplotlib-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:607b16c8a73943df110f99ee2e940b8a1cbf9714b65307c040d422558397dac5"}, - {file = "matplotlib-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01d2b19f13aeec2e759414d3bfe19ddfb16b13a1250add08d46d5ff6f9be83c6"}, - {file = "matplotlib-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e6c6461e1fc63df30bf6f80f0b93f5b6784299f721bc28530477acd51bfc3d1"}, - {file = "matplotlib-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:994c07b9d9fe8d25951e3202a68c17900679274dadfc1248738dcfa1bd40d7f3"}, - {file = "matplotlib-3.10.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:fd44fc75522f58612ec4a33958a7e5552562b7705b42ef1b4f8c0818e304a363"}, - {file = "matplotlib-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c58a9622d5dbeb668f407f35f4e6bfac34bb9ecdcc81680c04d0258169747997"}, - {file = "matplotlib-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:845d96568ec873be63f25fa80e9e7fae4be854a66a7e2f0c8ccc99e94a8bd4ef"}, - {file = "matplotlib-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5439f4c5a3e2e8eab18e2f8c3ef929772fd5641876db71f08127eed95ab64683"}, - {file = "matplotlib-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4673ff67a36152c48ddeaf1135e74ce0d4bce1bbf836ae40ed39c29edf7e2765"}, - {file = "matplotlib-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e8632baebb058555ac0cde75db885c61f1212e47723d63921879806b40bec6a"}, - {file = "matplotlib-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4659665bc7c9b58f8c00317c3c2a299f7f258eeae5a5d56b4c64226fca2f7c59"}, - {file = "matplotlib-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d44cb942af1693cced2604c33a9abcef6205601c445f6d0dc531d813af8a2f5a"}, - {file = "matplotlib-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a994f29e968ca002b50982b27168addfd65f0105610b6be7fa515ca4b5307c95"}, - {file = "matplotlib-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b0558bae37f154fffda54d779a592bc97ca8b4701f1c710055b609a3bac44c8"}, - {file = "matplotlib-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:503feb23bd8c8acc75541548a1d709c059b7184cde26314896e10a9f14df5f12"}, - {file = "matplotlib-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:c40ba2eb08b3f5de88152c2333c58cee7edcead0a2a0d60fcafa116b17117adc"}, - {file = "matplotlib-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96f2886f5c1e466f21cc41b70c5a0cd47bfa0015eb2d5793c88ebce658600e25"}, - {file = "matplotlib-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:12eaf48463b472c3c0f8dbacdbf906e573013df81a0ab82f0616ea4b11281908"}, - {file = "matplotlib-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fbbabc82fde51391c4da5006f965e36d86d95f6ee83fb594b279564a4c5d0d2"}, - {file = "matplotlib-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad2e15300530c1a94c63cfa546e3b7864bd18ea2901317bae8bbf06a5ade6dcf"}, - {file = "matplotlib-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3547d153d70233a8496859097ef0312212e2689cdf8d7ed764441c77604095ae"}, - {file = "matplotlib-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:c55b20591ced744aa04e8c3e4b7543ea4d650b6c3c4b208c08a05b4010e8b442"}, - {file = "matplotlib-3.10.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ade1003376731a971e398cc4ef38bb83ee8caf0aee46ac6daa4b0506db1fd06"}, - {file = "matplotlib-3.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:95b710fea129c76d30be72c3b38f330269363fbc6e570a5dd43580487380b5ff"}, - {file = "matplotlib-3.10.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cdbaf909887373c3e094b0318d7ff230b2ad9dcb64da7ade654182872ab2593"}, - {file = "matplotlib-3.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d907fddb39f923d011875452ff1eca29a9e7f21722b873e90db32e5d8ddff12e"}, - {file = "matplotlib-3.10.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3b427392354d10975c1d0f4ee18aa5844640b512d5311ef32efd4dd7db106ede"}, - {file = "matplotlib-3.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5fd41b0ec7ee45cd960a8e71aea7c946a28a0b8a4dcee47d2856b2af051f334c"}, - {file = "matplotlib-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:81713dd0d103b379de4516b861d964b1d789a144103277769238c732229d7f03"}, - {file = "matplotlib-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:359f87baedb1f836ce307f0e850d12bb5f1936f70d035561f90d41d305fdacea"}, - {file = "matplotlib-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae80dc3a4add4665cf2faa90138384a7ffe2a4e37c58d83e115b54287c4f06ef"}, - {file = "matplotlib-3.10.0.tar.gz", hash = "sha256:b886d02a581b96704c9d1ffe55709e49b4d2d52709ccebc4be42db856e511278"}, -] - -[package.dependencies] -contourpy = ">=1.0.1" -cycler = ">=0.10" -fonttools = ">=4.22.0" -kiwisolver = ">=1.3.1" -numpy = ">=1.23" -packaging = ">=20.0" -pillow = ">=8" -pyparsing = ">=2.3.1" -python-dateutil = ">=2.7" - -[package.extras] -dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] - [[package]] name = "matplotlib-inline" version = "0.1.7" @@ -3453,32 +3076,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "narwhals" -version = "1.24.1" -description = "Extremely lightweight compatibility layer between dataframe libraries" -optional = false -python-versions = ">=3.8" -files = [ - {file = "narwhals-1.24.1-py3-none-any.whl", hash = "sha256:d8983fe14851c95d60576ddca37c094bd4ed24ab9ea98396844fb20ad9aaf184"}, - {file = "narwhals-1.24.1.tar.gz", hash = "sha256:b09b8253d945f23cdb683a84685abf3afb9f96114d89e9f35dc876e143f65007"}, -] - -[package.extras] -core = ["duckdb", "pandas", "polars", "pyarrow", "pyarrow-stubs"] -cudf = ["cudf (>=24.10.0)"] -dask = ["dask[dataframe] (>=2024.8)"] -dev = ["covdefaults", "hypothesis", "pre-commit", "pytest", "pytest-cov", "pytest-env", "pytest-randomly", "typing-extensions"] -docs = ["black", "duckdb", "jinja2", "markdown-exec[ansi]", "mkdocs", "mkdocs-autorefs", "mkdocs-material", "mkdocstrings[python]", "pandas", "polars (>=1.0.0)", "pyarrow"] -duckdb = ["duckdb (>=1.0)"] -extra = ["scikit-learn"] -ibis = ["ibis-framework (>=6.0.0)", "packaging", "pyarrow-hotfix", "rich"] -modin = ["modin"] -pandas = ["pandas (>=0.25.3)"] -polars = ["polars (>=0.20.3)"] -pyarrow = ["pyarrow (>=11.0.0)"] -pyspark = ["pyspark (>=3.5.0)"] - [[package]] name = "nbformat" version = "5.10.4" @@ -3996,94 +3593,6 @@ files = [ [package.dependencies] ptyprocess = ">=0.5" -[[package]] -name = "pillow" -version = "11.1.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"}, - {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"}, - {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"}, - {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"}, - {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"}, - {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"}, - {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"}, - {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"}, - {file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"}, - {file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"}, - {file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"}, - {file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"}, - {file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"}, - {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"}, - {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"}, - {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"}, - {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"}, - {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"}, - {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"}, - {file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"}, - {file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"}, - {file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"}, - {file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"}, - {file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"}, - {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"}, - {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"}, - {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"}, - {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"}, - {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"}, - {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"}, - {file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"}, - {file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"}, - {file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"}, - {file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"}, - {file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"}, - {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"}, - {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"}, - {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"}, - {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"}, - {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"}, - {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"}, - {file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"}, - {file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"}, - {file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"}, - {file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"}, - {file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"}, - {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"}, - {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"}, - {file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"}, - {file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"}, - {file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"}, - {file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"}, - {file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"}, - {file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"}, - {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"}, - {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"}, - {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"}, - {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"}, - {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"}, - {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"}, - {file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"}, - {file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"}, - {file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"}, - {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"}, - {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"}, - {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"}, - {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"}, - {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"}, - {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"}, - {file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"}, - {file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] -fpx = ["olefile"] -mic = ["olefile"] -tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] -typing = ["typing-extensions"] -xmp = ["defusedxml"] - [[package]] name = "platformdirs" version = "4.3.6" @@ -4699,25 +4208,6 @@ azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0 toml = ["tomli (>=2.0.1)"] yaml = ["pyyaml (>=6.0.1)"] -[[package]] -name = "pydeck" -version = "0.9.1" -description = "Widget for deck.gl maps" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038"}, - {file = "pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605"}, -] - -[package.dependencies] -jinja2 = ">=2.10.1" -numpy = ">=1.16.4" - -[package.extras] -carto = ["pydeck-carto"] -jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "traitlets (>=4.3.2)"] - [[package]] name = "pygments" version = "2.19.1" @@ -4732,20 +4222,6 @@ files = [ [package.extras] windows-terminal = ["colorama (>=0.4.6)"] -[[package]] -name = "pyparsing" -version = "3.2.1" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1"}, - {file = "pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - [[package]] name = "pysocks" version = "1.7.1" @@ -5642,17 +5118,6 @@ files = [ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] -[[package]] -name = "smmap" -version = "5.0.2" -description = "A pure Python implementation of a sliding window memory map manager" -optional = false -python-versions = ">=3.7" -files = [ - {file = "smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e"}, - {file = "smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5"}, -] - [[package]] name = "sniffio" version = "1.3.1" @@ -5800,17 +5265,6 @@ files = [ dev = ["build", "hatch"] doc = ["sphinx"] -[[package]] -name = "sseclient-py" -version = "1.8.0" -description = "SSE client for Python" -optional = false -python-versions = "*" -files = [ - {file = "sseclient-py-1.8.0.tar.gz", hash = "sha256:c547c5c1a7633230a38dc599a21a2dc638f9b5c297286b48b46b935c71fac3e8"}, - {file = "sseclient_py-1.8.0-py2.py3-none-any.whl", hash = "sha256:4ecca6dc0b9f963f8384e9d7fd529bf93dd7d708144c4fb5da0e0a1a926fee83"}, -] - [[package]] name = "stack-data" version = "0.6.3" @@ -5847,55 +5301,6 @@ anyio = ">=3.6.2,<5" [package.extras] full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] -[[package]] -name = "streamlit" -version = "1.41.1" -description = "A faster way to build and share data apps" -optional = false -python-versions = "!=3.9.7,>=3.9" -files = [ - {file = "streamlit-1.41.1-py2.py3-none-any.whl", hash = "sha256:0def00822480071d642e6df36cd63c089f991da3a69fd9eb4ab8f65ce27de4e0"}, - {file = "streamlit-1.41.1.tar.gz", hash = "sha256:6626d32b098ba1458b71eebdd634c62af2dd876380e59c4b6a1e828a39d62d69"}, -] - -[package.dependencies] -altair = ">=4.0,<6" -blinker = ">=1.0.0,<2" -cachetools = ">=4.0,<6" -click = ">=7.0,<9" -gitpython = ">=3.0.7,<3.1.19 || >3.1.19,<4" -numpy = ">=1.23,<3" -packaging = ">=20,<25" -pandas = ">=1.4.0,<3" -pillow = ">=7.1.0,<12" -protobuf = ">=3.20,<6" -pyarrow = ">=7.0" -pydeck = ">=0.8.0b4,<1" -requests = ">=2.27,<3" -rich = ">=10.14.0,<14" -tenacity = ">=8.1.0,<10" -toml = ">=0.10.1,<2" -tornado = ">=6.0.3,<7" -typing-extensions = ">=4.3.0,<5" -watchdog = {version = ">=2.1.5,<7", markers = "platform_system != \"Darwin\""} - -[package.extras] -snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python[modin] (>=1.17.0)"] - -[[package]] -name = "streamlit-tags" -version = "1.2.8" -description = "Tags custom component for Streamlit" -optional = false -python-versions = ">=3.6" -files = [ - {file = "streamlit_tags-1.2.8-py3-none-any.whl", hash = "sha256:c71b10666f3fce67d8e0b3c089aa50dc48830d310223fb88005b08f157586f95"}, - {file = "streamlit_tags-1.2.8.tar.gz", hash = "sha256:9ea46b21f206dc73164e59e3c800a96c863c90af57afde20115f001a6d986583"}, -] - -[package.dependencies] -streamlit = ">=0.63" - [[package]] name = "structlog" version = "25.1.0" @@ -6353,53 +5758,6 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] -[[package]] -name = "watchdog" -version = "4.0.2" -description = "Filesystem events monitoring" -optional = false -python-versions = ">=3.8" -files = [ - {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, - {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, - {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, - {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, - {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, - {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, - {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, - {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, - {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, - {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, - {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, - {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, - {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, - {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, - {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, - {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, - {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, - {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, - {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, - {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, - {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, - {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, - {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, - {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, - {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, - {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, - {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, - {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, -] - -[package.extras] -watchmedo = ["PyYAML (>=3.10)"] - [[package]] name = "watchfiles" version = "1.0.4" @@ -7111,4 +6469,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.12.*, <3.13" -content-hash = "891a93be6c7064485fb82d2a1552c1f82be2337491e048167b5a4f5e8d11b5a6" +content-hash = "f6e2c0356d1362362662040eab0a8dea3d15730c692875ce3e2fa407f02076ad" diff --git a/wren-ai-service/pyproject.toml b/wren-ai-service/pyproject.toml index bdc3055dba..620127099e 100644 --- a/wren-ai-service/pyproject.toml +++ b/wren-ai-service/pyproject.toml @@ -39,26 +39,19 @@ qdrant-client = "==1.11.0" [tool.poetry.group.dev.dependencies] pre-commit = "^3.7.1" -streamlit = "^1.37.0" -watchdog = "^4.0.0" pandas = "^2.2.2" -matplotlib = "^3.9.2" -sseclient-py = "^1.8.0" -dspy-ai = "^2.5.26" requests = "^2.32.2" -extra-streamlit-components = "^0.1.71" -deepeval = "^1.0.6" -tomlkit = "^0.13.0" -nltk = "^3.9.1" [tool.poetry.group.eval.dependencies] -gitpython = "^3.1.43" plotly = "^5.24.1" nbformat = "^5.1.3" ipykernel = "^6.29.5" itables = "^2.2.1" gdown = "^5.2.0" -streamlit-tags = "^1.2.8" +nltk = "^3.9.1" +tomlkit = "^0.13.0" +deepeval = "^1.0.6" +dspy-ai = "^2.5.26" [tool.poetry.group.test.dependencies] locust = "^2.32.0" diff --git a/wren-ai-service/src/__main__.py b/wren-ai-service/src/__main__.py index 167ba88d07..eb77672936 100644 --- a/wren-ai-service/src/__main__.py +++ b/wren-ai-service/src/__main__.py @@ -18,6 +18,7 @@ setup_custom_logger, ) from src.web.v1 import routers +from src.web.v2 import routers as v2_routers setup_custom_logger( "wren-ai-service", level_str=settings.logging_level, is_dev=settings.development @@ -54,6 +55,7 @@ async def lifespan(app: FastAPI): allow_headers=["*"], ) app.include_router(routers.router, prefix="/v1", tags=["v1"]) +app.include_router(v2_routers.router, prefix="/v2", tags=["v2"]) # TODO: deprecated, it was used for load testing using locust only. should be removed in the future if settings.development: from src.web import development diff --git a/wren-ai-service/src/globals.py b/wren-ai-service/src/globals.py index dd62d50798..5ac1ffe0ab 100644 --- a/wren-ai-service/src/globals.py +++ b/wren-ai-service/src/globals.py @@ -9,6 +9,7 @@ from src.pipelines import generation, indexing, retrieval from src.utils import fetch_wren_ai_docs from src.web.v1 import services +from src.web.v2 import services as v2_services logger = logging.getLogger("wren-ai-service") @@ -27,6 +28,7 @@ class ServiceContainer: sql_question_service: services.SqlQuestionService instructions_service: services.InstructionsService sql_correction_service: services.SqlCorrectionService + conversation_service: v2_services.ConversationService @dataclass @@ -269,6 +271,87 @@ def create_service_container( }, **query_cache, ), + conversation_service=v2_services.ConversationService( + pipelines={ + "intent_classification": generation.IntentClassificationV2( + **pipe_components["intent_classification"], + wren_ai_docs=wren_ai_docs, + ), + "misleading_assistance": generation.MisleadingAssistance( + **pipe_components["misleading_assistance"], + ), + "data_assistance": generation.DataAssistance( + **pipe_components["data_assistance"] + ), + "user_guide_assistance": generation.UserGuideAssistance( + **pipe_components["user_guide_assistance"], + wren_ai_docs=wren_ai_docs, + ), + "data_exploration_assistance": generation.DataExplorationAssistance( + **pipe_components["data_exploration_assistance"], + ), + "db_schema_retrieval": retrieval.DbSchemaRetrieval( + **pipe_components["db_schema_retrieval"], + table_retrieval_size=settings.table_retrieval_size, + table_column_retrieval_size=settings.table_column_retrieval_size, + ), + "historical_question": retrieval.HistoricalQuestionRetrieval( + **pipe_components["historical_question_retrieval"], + historical_question_retrieval_similarity_threshold=settings.historical_question_retrieval_similarity_threshold, + ), + "sql_pairs_retrieval": retrieval.SqlPairsRetrieval( + **pipe_components["sql_pairs_retrieval"], + sql_pairs_similarity_threshold=settings.sql_pairs_similarity_threshold, + sql_pairs_retrieval_max_size=settings.sql_pairs_retrieval_max_size, + ), + "instructions_retrieval": retrieval.Instructions( + **pipe_components["instructions_retrieval"], + similarity_threshold=settings.instructions_similarity_threshold, + top_k=settings.instructions_top_k, + ), + "sql_generation": generation.SQLGeneration( + **pipe_components["sql_generation"], + engine_timeout=settings.engine_timeout, + ), + "sql_generation_reasoning": generation.SQLGenerationReasoning( + **pipe_components["sql_generation_reasoning"], + ), + "followup_sql_generation_reasoning": generation.FollowUpSQLGenerationReasoning( + **pipe_components["followup_sql_generation_reasoning"], + ), + "sql_correction": generation.SQLCorrection( + **pipe_components["sql_correction"], + engine_timeout=settings.engine_timeout, + ), + "followup_sql_generation": generation.FollowUpSQLGeneration( + **pipe_components["followup_sql_generation"], + engine_timeout=settings.engine_timeout, + ), + "sql_regeneration": generation.SQLRegeneration( + **pipe_components["sql_regeneration"], + engine_timeout=settings.engine_timeout, + ), + "sql_functions_retrieval": retrieval.SqlFunctions( + **pipe_components["sql_functions_retrieval"], + engine_timeout=settings.engine_timeout, + ), + "sql_executor": retrieval.SQLExecutor( + **pipe_components["sql_executor"], + engine_timeout=settings.engine_timeout, + ), + "sql_answer": generation.SQLAnswer( + **pipe_components["sql_answer"], + engine_timeout=settings.engine_timeout, + ), + "chart_generation": generation.ChartGeneration( + **pipe_components["chart_generation"], + ), + "chart_adjustment": generation.ChartAdjustmentV2( + **pipe_components["chart_adjustment"], + ), + }, + max_histories=settings.max_histories, + ), ) diff --git a/wren-ai-service/src/pipelines/generation/__init__.py b/wren-ai-service/src/pipelines/generation/__init__.py index 6940643217..7d3d74f259 100644 --- a/wren-ai-service/src/pipelines/generation/__init__.py +++ b/wren-ai-service/src/pipelines/generation/__init__.py @@ -1,9 +1,12 @@ from .chart_adjustment import ChartAdjustment +from .chart_adjustment_v2 import ChartAdjustmentV2 from .chart_generation import ChartGeneration from .data_assistance import DataAssistance +from .data_exploration_assistance import DataExplorationAssistance from .followup_sql_generation import FollowUpSQLGeneration from .followup_sql_generation_reasoning import FollowUpSQLGenerationReasoning from .intent_classification import IntentClassification +from .intent_classification_v2 import IntentClassificationV2 from .misleading_assistance import MisleadingAssistance from .question_recommendation import QuestionRecommendation from .relationship_recommendation import RelationshipRecommendation @@ -20,9 +23,11 @@ __all__ = [ "ChartGeneration", "ChartAdjustment", + "ChartAdjustmentV2", "DataAssistance", "FollowUpSQLGeneration", "IntentClassification", + "IntentClassificationV2", "QuestionRecommendation", "RelationshipRecommendation", "SemanticsDescription", @@ -36,4 +41,5 @@ "FollowUpSQLGenerationReasoning", "MisleadingAssistance", "SQLTablesExtraction", + "DataExplorationAssistance", ] diff --git a/wren-ai-service/src/pipelines/generation/chart_adjustment_v2.py b/wren-ai-service/src/pipelines/generation/chart_adjustment_v2.py new file mode 100644 index 0000000000..0e7e0bd789 --- /dev/null +++ b/wren-ai-service/src/pipelines/generation/chart_adjustment_v2.py @@ -0,0 +1,182 @@ +import logging +import sys +from typing import Any, Dict + +import orjson +from hamilton import base +from hamilton.async_driver import AsyncDriver +from haystack.components.builders.prompt_builder import PromptBuilder +from langfuse.decorators import observe + +from src.core.pipeline import BasicPipeline +from src.core.provider import LLMProvider +from src.pipelines.generation.utils.chart import ( + ChartDataPreprocessor, + ChartGenerationPostProcessor, + ChartGenerationResults, + chart_generation_instructions, +) + +logger = logging.getLogger("wren-ai-service") + + +chart_adjustment_system_prompt = f""" +### TASK ### + +You are a data analyst great at visualizing data using vega-lite! Given the user's request, SQL, sample data, sample column values, original vega-lite schema, +you need to re-generate vega-lite schema in JSON and provide suitable chart type. +Besides, you need to give a concise and easy-to-understand reasoning to describe why you provide such vega-lite schema based on the question, SQL, sample data, sample column values, original vega-lite schema and adjustment options. + +{chart_generation_instructions} +- If you think the user's request are not suitable for the data or not suitable for generating the chart, you can return an empty string for the schema and chart type and give reasoning to explain why. + +### OUTPUT FORMAT ### + +Please provide your chain of thought reasoning, chart type and the vega-lite schema in JSON format. + +{{ + "reasoning": , + "chart_type": "line" | "multi_line" | "bar" | "pie" | "grouped_bar" | "stacked_bar" | "area" | "", + "chart_schema": +}} +""" + +chart_adjustment_user_prompt_template = """ +### INPUT ### +User's request: {{ query }} +User's SQL: {{ sql }} +User's Vega-Lite Schema: {{ chart_schema }} +Sample Data: {{ sample_data }} +Sample Column Values: {{ sample_column_values }} +Language: {{ language }} + +Please think step by step +""" + + +## Start of Pipeline +@observe(capture_input=False) +def preprocess_data( + data: Dict[str, Any], chart_data_preprocessor: ChartDataPreprocessor +) -> dict: + return chart_data_preprocessor.run(data) + + +@observe(capture_input=False) +def prompt( + query: str, + sql: str, + chart_schema: dict, + preprocess_data: dict, + language: str, + prompt_builder: PromptBuilder, +) -> dict: + sample_data = preprocess_data.get("sample_data") + sample_column_values = preprocess_data.get("sample_column_values") + + return prompt_builder.run( + query=query, + sql=sql, + chart_schema=chart_schema, + sample_data=sample_data, + sample_column_values=sample_column_values, + language=language, + ) + + +@observe(as_type="generation", capture_input=False) +async def generate_chart_adjustment(prompt: dict, generator: Any) -> dict: + return await generator(prompt=prompt.get("prompt")) + + +@observe(capture_input=False) +def post_process( + generate_chart_adjustment: dict, + vega_schema: Dict[str, Any], + preprocess_data: dict, + post_processor: ChartGenerationPostProcessor, +) -> dict: + return post_processor.run( + generate_chart_adjustment.get("replies"), + vega_schema, + preprocess_data["sample_data"], + ) + + +## End of Pipeline +CHART_ADJUSTMENT_MODEL_KWARGS = { + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "chart_adjustment_results", + "schema": ChartGenerationResults.model_json_schema(), + }, + } +} + + +class ChartAdjustmentV2(BasicPipeline): + def __init__( + self, + llm_provider: LLMProvider, + **kwargs, + ): + self._components = { + "prompt_builder": PromptBuilder( + template=chart_adjustment_user_prompt_template + ), + "generator": llm_provider.get_generator( + system_prompt=chart_adjustment_system_prompt, + generation_kwargs=CHART_ADJUSTMENT_MODEL_KWARGS, + ), + "chart_data_preprocessor": ChartDataPreprocessor(), + "post_processor": ChartGenerationPostProcessor(), + } + + with open("src/pipelines/generation/utils/vega-lite-schema-v5.json", "r") as f: + _vega_schema = orjson.loads(f.read()) + + self._configs = { + "vega_schema": _vega_schema, + } + super().__init__( + AsyncDriver({}, sys.modules[__name__], result_builder=base.DictResult()) + ) + + @observe(name="Chart Adjustment") + async def run( + self, + query: str, + sql: str, + chart_schema: dict, + data: dict, + language: str, + ) -> dict: + logger.info("Chart Adjustment pipeline is running...") + + return await self._pipe.execute( + ["post_process"], + inputs={ + "query": query, + "sql": sql, + "chart_schema": chart_schema, + "data": data, + "language": language, + **self._components, + **self._configs, + }, + ) + + +if __name__ == "__main__": + from src.pipelines.common import dry_run_pipeline + + dry_run_pipeline( + ChartAdjustmentV2, + "chart_adjustment", + query="show me the dataset", + sql="", + chart_schema={}, + data={}, + language="English", + ) diff --git a/wren-ai-service/src/pipelines/generation/data_exploration_assistance.py b/wren-ai-service/src/pipelines/generation/data_exploration_assistance.py new file mode 100644 index 0000000000..84d955022b --- /dev/null +++ b/wren-ai-service/src/pipelines/generation/data_exploration_assistance.py @@ -0,0 +1,155 @@ +import asyncio +import logging +import sys +from typing import Any, Optional + +from hamilton import base +from hamilton.async_driver import AsyncDriver +from haystack.components.builders.prompt_builder import PromptBuilder +from langfuse.decorators import observe + +from src.core.pipeline import BasicPipeline +from src.core.provider import LLMProvider + +logger = logging.getLogger("wren-ai-service") + + +data_exploration_assistance_system_prompt = """ +You are a great data analyst good at exploring data. +You are given a user question and a sql data. +You need to understand the user question and the sql data, and then answer the user question. + +### INSTRUCTIONS ### +1. Your answer should be in the same language as the language user provided. +2. You must follow the sql data to answer the user question. +3. You should provide your answer in Markdown format. +4. You have the following skills: +- explain the data in a easy to understand manner +- provide insights and trends in the data +- find out anomalies and outliers in the data +5. You only need to use the skills required to answer the user question based on the user question and the sql data. + +### OUTPUT FORMAT ### +Please provide your response in proper Markdown format without ```markdown``` tags. +""" + +data_exploration_assistance_user_prompt_template = """ +User Question: {{query}} +Language: {{language}} +SQL Data: +{{ sql_data }} + +Please think step by step. +""" + + +## Start of Pipeline +@observe(capture_input=False) +def prompt( + query: str, + language: str, + sql_data: dict, + prompt_builder: PromptBuilder, +) -> dict: + return prompt_builder.run( + query=query, + language=language, + sql_data=sql_data, + ) + + +@observe(as_type="generation", capture_input=False) +async def data_exploration_assistance( + prompt: dict, generator: Any, query_id: str +) -> dict: + return await generator(prompt=prompt.get("prompt"), query_id=query_id) + + +## End of Pipeline + + +class DataExplorationAssistance(BasicPipeline): + def __init__( + self, + llm_provider: LLMProvider, + **kwargs, + ): + self._user_queues = {} + self._components = { + "generator": llm_provider.get_generator( + system_prompt=data_exploration_assistance_system_prompt, + streaming_callback=self._streaming_callback, + ), + "prompt_builder": PromptBuilder( + template=data_exploration_assistance_user_prompt_template + ), + } + + super().__init__( + AsyncDriver({}, sys.modules[__name__], result_builder=base.DictResult()) + ) + + def _streaming_callback(self, chunk, query_id): + if query_id not in self._user_queues: + self._user_queues[ + query_id + ] = asyncio.Queue() # Create a new queue for the user if it doesn't exist + # Put the chunk content into the user's queue + asyncio.create_task(self._user_queues[query_id].put(chunk.content)) + if chunk.meta.get("finish_reason"): + asyncio.create_task(self._user_queues[query_id].put("")) + + async def get_streaming_results(self, query_id): + async def _get_streaming_results(query_id): + return await self._user_queues[query_id].get() + + if query_id not in self._user_queues: + self._user_queues[query_id] = asyncio.Queue() + + while True: + try: + # Wait for an item from the user's queue + self._streaming_results = await asyncio.wait_for( + _get_streaming_results(query_id), timeout=120 + ) + if ( + self._streaming_results == "" + ): # Check for end-of-stream signal + del self._user_queues[query_id] + break + if self._streaming_results: # Check if there are results to yield + yield self._streaming_results + self._streaming_results = "" # Clear after yielding + except TimeoutError: + break + + @observe(name="Data Exploration Assistance") + async def run( + self, + query: str, + sql_data: dict, + language: str, + query_id: Optional[str] = None, + ): + logger.info("Data Exploration Assistance pipeline is running...") + return await self._pipe.execute( + ["data_exploration_assistance"], + inputs={ + "query": query, + "language": language, + "query_id": query_id or "", + "sql_data": sql_data, + **self._components, + }, + ) + + +if __name__ == "__main__": + from src.pipelines.common import dry_run_pipeline + + dry_run_pipeline( + DataExplorationAssistance, + "data_exploration_assistance", + query="what can Wren AI do?", + language="en", + ) diff --git a/wren-ai-service/src/pipelines/generation/intent_classification_v2.py b/wren-ai-service/src/pipelines/generation/intent_classification_v2.py new file mode 100644 index 0000000000..35167f81f2 --- /dev/null +++ b/wren-ai-service/src/pipelines/generation/intent_classification_v2.py @@ -0,0 +1,460 @@ +import ast +import logging +import sys +from typing import Any, Literal, Optional + +import orjson +from hamilton import base +from hamilton.async_driver import AsyncDriver +from haystack import Document +from haystack.components.builders.prompt_builder import PromptBuilder +from langfuse.decorators import observe +from pydantic import BaseModel + +from src.core.pipeline import BasicPipeline +from src.core.provider import DocumentStoreProvider, EmbedderProvider, LLMProvider +from src.pipelines.common import build_table_ddl +from src.pipelines.generation.utils.sql import construct_instructions +from src.web.v1.services import Configuration +from src.web.v1.services.ask import AskHistory + +logger = logging.getLogger("wren-ai-service") + + +intent_classification_system_prompt = """ +### Task ### +You are an expert detective specializing in intent classification. Combine the user's current question and previous questions to determine their true intent based on the provided database schema or sql data if provided. +Classify the intent into one of these categories: `MISLEADING_QUERY`, `TEXT_TO_SQL`, `DATA_EXPLORATION`, `GENERAL`, `CHART`, or `USER_GUIDE`. Additionally, provide a concise reasoning (maximum 20 words) for your classification. + +### Instructions ### +- **Follow the user's previous questions:** If there are previous questions, try to understand the user's current question as following the previous questions. +- **Consider Both Inputs:** Combine the user's current question and their previous questions together to identify the user's true intent. +- **Rephrase Question":** Rewrite follow-up questions into full standalone questions using prior conversation context." +- **Concise Reasoning:** The reasoning must be clear, concise, and limited to 20 words. +- **Language Consistency:** Use the same language as specified in the user's output language for the rephrased question and reasoning. +- **Vague Queries:** If the question is vague or does not related to a table or property from the schema, classify it as `MISLEADING_QUERY`. + +### Intent Definitions ### + + +**When to Use:** +- The user's question is about data exploration such as asking for data details, asking for explanation of the data, asking for insights, asking for recommendations, asking for comparison, etc. + +**Requirements:** +- SQL DATA is provided and the user's question is about exploring the data. +- The user's question can be answered by the SQL DATA. +- The row size of the SQL DATA is less than 500. + +**Examples:** +- "Show me the part where the data appears abnormal" +- "Please explain the data in the table" +- "What's the trend of the data?" + + + +**When to Use:** +- The user's inputs are about modifying SQL from previous questions. +- The user's inputs are related to the database schema and requires an SQL query. +- The question (or related previous query) includes references to specific tables, columns, or data details. + +**Requirements:** +- Include specific table and column names from the schema in your reasoning or modifying SQL from previous questions. +- Reference phrases from the user's inputs that clearly relate to the schema. +- The SQL DATA is not provided or SQL DATA cannot answer the user's question, and the user's question can be answered given the database schema. + +**Examples:** +- "What is the total sales for last quarter?" +- "Show me all customers who purchased product X." +- "List the top 10 products by revenue." + + + +**When to Use:** +- The user's question is about generating a chart or modifying a chart. + +**Requirements:** +- Should pick the last SQL from user query histories. + +**Examples:** +- "Show me the bar chart of the data" +- "Change the bar chart to a line chart" +- "Change the color of the bars to red" + + + +**When to Use:** +- The user seeks general information about the database schema or its overall capabilities. +- The combined queries do not provide enough detail to generate a specific SQL query. + +**Requirements:** +- Highlight phrases from the user's inputs that indicate a general inquiry not tied to specific schema details. + +**Examples:** +- "What is the dataset about?" +- "Tell me more about the database." +- "How can I analyze customer behavior with this data?" + + + +**When to Use:** +- The user's inputs pertains to Wren AI's features, usage, or capabilities. +- The query relates directly to content in the user guide. + +**Examples:** +- "What can Wren AI do?" +- "How can I reset a project?" +- "How can I delete a project?" +- "How can I connect to other databases?" +- "How do I draw a chart?" + + + +**When to Use:** +- The user's inputs is irrelevant to the database schema or includes SQL code. +- The user's inputs lacks specific details (like table names or columns) needed to generate an SQL query. +- It appears off-topic or is simply a casual conversation starter. +- The user's question is about generating a chart but the SQL DATA is not provided. + +**Requirements:** +- For generating SQL: respond to users by incorporating phrases from the user's inputs that indicate the lack of relevance to the database schema. +- For generating chart: respond to users that we can generate chart only if there is some data available. + +**Examples:** +- "How are you?" +- "What's the weather like today?" +- "Tell me a joke." + + +### Output Format ### +Return your response as a JSON object with the following structure: + +{ + "rephrased_question": "", + "reasoning": "", + "intent": "MISLEADING_QUERY" | "TEXT_TO_SQL" | "GENERAL" | "USER_GUIDE" | "DATA_EXPLORATION" | "CHART", + "sql": "" +} +""" + +intent_classification_user_prompt_template = """ +### DATABASE SCHEMA ### +{% for db_schema in db_schemas %} + {{ db_schema }} +{% endfor %} + +{% if sql_samples %} +### SQL SAMPLES ### +{% for sql_sample in sql_samples %} +Question: +{{sql_sample.question}} +SQL: +{{sql_sample.sql}} +{% endfor %} +{% endif %} + +{% if instructions %} +### INSTRUCTIONS ### +{{ instructions }} +{% endif %} + +### USER GUIDE ### +{% for doc in docs %} +- {{doc.path}}: {{doc.content}} +{% endfor %} + +{% if sql_data %} +### SQL DATA ### +{{ sql_data }} + +row size of SQL DATA: {{ sql_data_size }} +{% endif %} + +{% if chart_schema %} +### CHART SCHEMA ### +{{ chart_schema }} +{% endif %} + +### INPUT ### +{% if histories %} +User's previous questions: +{% for history in histories %} +User's Question: +{{ history.question }} +Assistant's Response: +{{ history.sql }} +{% endfor %} +{% endif %} + +User's current question: {{query}} +Current Time: {{ current_time }} +Output Language: {{ language }} + +Let's think step by step +""" + + +## Start of Pipeline +@observe(capture_input=False, capture_output=False) +async def embedding(query: str, embedder: Any, histories: list[AskHistory]) -> dict: + previous_query_summaries = ( + [history.question for history in histories] if histories else [] + ) + + query = "\n".join(previous_query_summaries) + "\n" + query + + return await embedder.run(query) + + +@observe(capture_input=False) +async def table_retrieval( + embedding: dict, project_id: str, table_retriever: Any +) -> dict: + filters = { + "operator": "AND", + "conditions": [ + {"field": "type", "operator": "==", "value": "TABLE_DESCRIPTION"}, + ], + } + + if project_id: + filters["conditions"].append( + {"field": "project_id", "operator": "==", "value": project_id} + ) + + return await table_retriever.run( + query_embedding=embedding.get("embedding"), + filters=filters, + ) + + +@observe(capture_input=False) +async def dbschema_retrieval( + table_retrieval: dict, embedding: dict, project_id: str, dbschema_retriever: Any +) -> list[Document]: + tables = table_retrieval.get("documents", []) + table_names = [] + for table in tables: + content = ast.literal_eval(table.content) + table_names.append(content["name"]) + + logger.info(f"dbschema_retrieval with table_names: {table_names}") + + table_name_conditions = [ + {"field": "name", "operator": "==", "value": table_name} + for table_name in table_names + ] + + filters = { + "operator": "AND", + "conditions": [ + {"field": "type", "operator": "==", "value": "TABLE_SCHEMA"}, + {"operator": "OR", "conditions": table_name_conditions}, + ], + } + + if project_id: + filters["conditions"].append( + {"field": "project_id", "operator": "==", "value": project_id} + ) + + results = await dbschema_retriever.run( + query_embedding=embedding.get("embedding"), filters=filters + ) + return results["documents"] + + +@observe() +def construct_db_schemas(dbschema_retrieval: list[Document]) -> list[str]: + db_schemas = {} + for document in dbschema_retrieval: + content = ast.literal_eval(document.content) + if content["type"] == "TABLE": + if document.meta["name"] not in db_schemas: + db_schemas[document.meta["name"]] = content + else: + db_schemas[document.meta["name"]] = { + **content, + "columns": db_schemas[document.meta["name"]].get("columns", []), + } + elif content["type"] == "TABLE_COLUMNS": + if document.meta["name"] not in db_schemas: + db_schemas[document.meta["name"]] = {"columns": content["columns"]} + else: + if "columns" not in db_schemas[document.meta["name"]]: + db_schemas[document.meta["name"]]["columns"] = content["columns"] + else: + db_schemas[document.meta["name"]]["columns"] += content["columns"] + + # remove incomplete schemas + db_schemas = {k: v for k, v in db_schemas.items() if "type" in v and "columns" in v} + + db_schemas_in_ddl = [] + for table_schema in list(db_schemas.values()): + if table_schema["type"] == "TABLE": + ddl, _ = build_table_ddl(table_schema) + db_schemas_in_ddl.append(ddl) + + return db_schemas_in_ddl + + +@observe(capture_input=False) +def prompt( + query: str, + wren_ai_docs: list[dict], + construct_db_schemas: list[str], + histories: list[AskHistory], + prompt_builder: PromptBuilder, + sql_data: dict, + chart_schema: dict, + sql_samples: Optional[list[dict]] = None, + instructions: Optional[list[dict]] = None, + configuration: Configuration | None = None, +) -> dict: + return prompt_builder.run( + query=query, + language=configuration.language, + db_schemas=construct_db_schemas, + histories=histories, + sql_samples=sql_samples, + instructions=construct_instructions( + instructions=instructions, + configuration=configuration, + ), + current_time=configuration.show_current_time(), + docs=wren_ai_docs, + sql_data=sql_data, + sql_data_size=len(sql_data.get("data", [])), + chart_schema=chart_schema, + ) + + +@observe(as_type="generation", capture_input=False) +async def classify_intent(prompt: dict, generator: Any) -> dict: + return await generator(prompt=prompt.get("prompt")) + + +@observe(capture_input=False) +def post_process(classify_intent: dict, construct_db_schemas: list[str]) -> dict: + try: + results = orjson.loads(classify_intent.get("replies")[0]) + return { + "rephrased_question": results["rephrased_question"], + "intent": results["intent"], + "reasoning": results["reasoning"], + "sql": results["sql"], + "db_schemas": construct_db_schemas, + } + except Exception: + return { + "rephrased_question": "", + "intent": "TEXT_TO_SQL", + "reasoning": "", + "sql": "", + "db_schemas": construct_db_schemas, + } + + +## End of Pipeline + + +class IntentClassificationResult(BaseModel): + rephrased_question: str + intent: Literal[ + "MISLEADING_QUERY", + "TEXT_TO_SQL", + "GENERAL", + "USER_GUIDE", + "DATA_EXPLORATION", + "CHART", + ] + reasoning: str + sql: Optional[str] = "" + + +INTENT_CLASSIFICAION_MODEL_KWARGS = { + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "intent_classification", + "schema": IntentClassificationResult.model_json_schema(), + }, + } +} + + +class IntentClassificationV2(BasicPipeline): + def __init__( + self, + llm_provider: LLMProvider, + embedder_provider: EmbedderProvider, + document_store_provider: DocumentStoreProvider, + wren_ai_docs: list[dict], + table_retrieval_size: Optional[int] = 50, + table_column_retrieval_size: Optional[int] = 100, + **kwargs, + ): + self._components = { + "embedder": embedder_provider.get_text_embedder(), + "table_retriever": document_store_provider.get_retriever( + document_store_provider.get_store(dataset_name="table_descriptions"), + top_k=table_retrieval_size, + ), + "dbschema_retriever": document_store_provider.get_retriever( + document_store_provider.get_store(), + top_k=table_column_retrieval_size, + ), + "generator": llm_provider.get_generator( + system_prompt=intent_classification_system_prompt, + generation_kwargs=INTENT_CLASSIFICAION_MODEL_KWARGS, + ), + "prompt_builder": PromptBuilder( + template=intent_classification_user_prompt_template + ), + } + + self._configs = { + "wren_ai_docs": wren_ai_docs, + } + + super().__init__( + AsyncDriver({}, sys.modules[__name__], result_builder=base.DictResult()) + ) + + @observe(name="Intent Classification") + async def run( + self, + query: str, + project_id: Optional[str] = None, + histories: Optional[list[AskHistory]] = None, + sql_samples: Optional[list[dict]] = None, + instructions: Optional[list[dict]] = None, + configuration: Configuration = Configuration(), + sql_data: Optional[dict] = None, + chart_schema: Optional[dict] = None, + ): + logger.info("Intent Classification pipeline is running...") + return await self._pipe.execute( + ["post_process"], + inputs={ + "query": query, + "project_id": project_id or "", + "histories": histories or [], + "sql_samples": sql_samples or [], + "instructions": instructions or [], + "configuration": configuration, + "sql_data": sql_data or {}, + "chart_schema": chart_schema or {}, + **self._components, + **self._configs, + }, + ) + + +if __name__ == "__main__": + from src.pipelines.common import dry_run_pipeline + + dry_run_pipeline( + IntentClassificationV2, + "intent_classification", + query="show me the dataset", + ) diff --git a/wren-ai-service/src/pipelines/generation/utils/sql.py b/wren-ai-service/src/pipelines/generation/utils/sql.py index d57cf6c1b6..3b52f0704f 100644 --- a/wren-ai-service/src/pipelines/generation/utils/sql.py +++ b/wren-ai-service/src/pipelines/generation/utils/sql.py @@ -403,7 +403,7 @@ def construct_instructions( ): _instructions = "" if configuration: - if configuration.fiscal_year: + if hasattr(configuration, "fiscal_year") and configuration.fiscal_year: _instructions += f"\n- For calendar year related computation, it should be started from {configuration.fiscal_year.start} to {configuration.fiscal_year.end}\n\n" if has_calculated_field: _instructions += calculated_field_instructions diff --git a/wren-ai-service/src/providers/engine/wren.py b/wren-ai-service/src/providers/engine/wren.py index 3c8965e1d5..18ee233fad 100644 --- a/wren-ai-service/src/providers/engine/wren.py +++ b/wren-ai-service/src/providers/engine/wren.py @@ -115,7 +115,7 @@ async def execute_sql( limit: int = 500, **kwargs, ) -> Tuple[bool, Optional[Dict[str, Any]]]: - api_endpoint = f"{self._endpoint}/v2/connector/{self._source}/query" + api_endpoint = f"{self._endpoint}/v3/connector/{self._source}/query" if dry_run: api_endpoint += "?dryRun=true&limit=1" else: @@ -147,14 +147,14 @@ async def execute_sql( return ( False, - None, + {}, { "error_message": res, "correlation_id": "", }, ) except asyncio.TimeoutError: - return False, None, f"Request timed out: {timeout} seconds" + return False, {}, f"Request timed out: {timeout} seconds" async def get_func_list( self, @@ -234,11 +234,11 @@ async def execute_sql( return ( False, - None, + {}, { "error_message": res, "correlation_id": "", }, ) except asyncio.TimeoutError: - return False, None, f"Request timed out: {timeout} seconds" + return False, {}, f"Request timed out: {timeout} seconds" diff --git a/wren-ai-service/src/web/v1/services/chart_adjustment.py b/wren-ai-service/src/web/v1/services/chart_adjustment.py index dff22fe010..67a56d55a9 100644 --- a/wren-ai-service/src/web/v1/services/chart_adjustment.py +++ b/wren-ai-service/src/web/v1/services/chart_adjustment.py @@ -28,8 +28,8 @@ class ChartAdjustmentRequest(BaseModel): _query_id: str | None = None query: str sql: str - adjustment_option: ChartAdjustmentOption chart_schema: dict + adjustment_option: Optional[ChartAdjustmentOption] = None project_id: Optional[str] = None thread_id: Optional[str] = None configurations: Optional[Configuration] = Configuration() @@ -91,6 +91,7 @@ class ChartAdjustmentResultResponse(BaseModel): error: Optional[ChartAdjustmentError] = None trace_id: Optional[str] = None + class ChartAdjustmentService: def __init__( self, diff --git a/wren-ai-service/src/web/v2/__init__.py b/wren-ai-service/src/web/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/wren-ai-service/src/web/v2/routers/__init__.py b/wren-ai-service/src/web/v2/routers/__init__.py new file mode 100644 index 0000000000..112182fa68 --- /dev/null +++ b/wren-ai-service/src/web/v2/routers/__init__.py @@ -0,0 +1,6 @@ +from fastapi import APIRouter + +from src.web.v2.routers import conversation + +router = APIRouter() +router.include_router(conversation.router) diff --git a/wren-ai-service/src/web/v2/routers/conversation.py b/wren-ai-service/src/web/v2/routers/conversation.py new file mode 100644 index 0000000000..3776c546f2 --- /dev/null +++ b/wren-ai-service/src/web/v2/routers/conversation.py @@ -0,0 +1,73 @@ +import uuid +from dataclasses import asdict + +from fastapi import APIRouter, BackgroundTasks, Depends, Request +from fastapi.responses import JSONResponse, StreamingResponse + +from src.globals import ( + ServiceContainer, + ServiceMetadata, + get_service_container, + get_service_metadata, +) +from src.web.v2.services import QueueNotFoundError +from src.web.v2.services.conversation import ( + ConversationRequest, + ConversationResponse, +) + +router = APIRouter() + + +@router.post("/conversations") +async def start_conversation( + conversation_request: ConversationRequest, + background_tasks: BackgroundTasks, + service_container: ServiceContainer = Depends(get_service_container), + service_metadata: ServiceMetadata = Depends(get_service_metadata), +) -> ConversationResponse: + query_id = str(uuid.uuid4()) + conversation_request.query_id = query_id + + background_tasks.add_task( + service_container.conversation_service.start_conversation, + conversation_request, + service_metadata=asdict(service_metadata), + ) + return ConversationResponse(query_id=query_id) + + +@router.post("/conversations/{query_id}/stop") +async def stop_conversation( + query_id: str, + service_container: ServiceContainer = Depends(get_service_container), +): + try: + service_container.conversation_service.stop_conversation(query_id) + except QueueNotFoundError: + return JSONResponse( + { + "stopped": False, + }, + status_code=404, + ) + + return {"stopped": True} + + +@router.get("/conversations/{query_id}/stream") +async def get_conversation_streaming_result( + query_id: str, + request: Request, + service_container: ServiceContainer = Depends(get_service_container), +) -> StreamingResponse: + event_generator = ( + await service_container.conversation_service.get_conversation_streaming_result( + query_id, request + ) + ) + + return StreamingResponse( + event_generator, + media_type="text/event-stream", + ) diff --git a/wren-ai-service/src/web/v2/services/__init__.py b/wren-ai-service/src/web/v2/services/__init__.py new file mode 100644 index 0000000000..5c2a4c19ec --- /dev/null +++ b/wren-ai-service/src/web/v2/services/__init__.py @@ -0,0 +1,215 @@ +import asyncio +from datetime import datetime +from typing import Callable, Literal, Optional + +import orjson +import pytz +from pydantic import BaseModel + + +async def ensure_async(iterable): + # if it's already async, just yield from it; + # otherwise wrap the sync iterable. + if hasattr(iterable, "__aiter__"): + async for item in iterable: + yield item + else: + for item in iterable: + yield item + + +class Configurations(BaseModel): + def show_current_time(self): + # Get the current time in the specified timezone + tz = pytz.timezone( + self.timezone + ) # Assuming timezone.name contains the timezone string + current_time = datetime.now(tz) + + return f"{current_time.strftime('%Y-%m-%d %A %H:%M:%S')}" # YYYY-MM-DD weekday_name HH:MM:SS, ex: 2024-10-23 Wednesday 12:00:00 + + language: Optional[str] = "English" + timezone: Optional[str] = "UTC" + + +class Error(BaseModel): + code: Literal["NO_RELEVANT_DATA", "NO_RELEVANT_SQL", "OTHERS"] + message: str + invalid_sql: Optional[str] = None + + +class QueueNotFoundError(Exception): + """Raised when someone tries to access or stop a non-existent queue.""" + + def __init__(self, query_id: str): + super().__init__(f"No result found for query_id: {query_id}") + self.query_id = query_id + + +class QueryEventManager: + def __init__(self): + # one queue per query_id + self._queues: dict[str, asyncio.Queue] = {} + + def get_queue(self, query_id: str) -> asyncio.Queue: + if query_id not in self._queues: + raise QueueNotFoundError(query_id) + return self._queues[query_id] + + def start_queue(self, query_id: str): + self._queues[query_id] = asyncio.Queue() + + def stop_queue(self, query_id: str): + q = self.get_queue(query_id) + _event = "message_stop" + _data = { + "type": "message_stop", + "message": { + "query_id": query_id, + }, + } + q.put_nowait((_event, _data)) + self.cleanup(query_id) + + async def _publish(self, query_id: str, event: str, data: dict): + q = self.get_queue(query_id) + await q.put((event, data)) + + def cleanup(self, query_id: str): + # remove the queue so it can be GC’d + self._queues.pop(query_id, None) + + async def emit_message_start( + self, + query_id: str, + trace_id: str, + ): + self.start_queue(query_id) + await self._publish( + query_id, + "message_start", + { + "type": "message_start", + "message": { + "query_id": query_id, + "trace_id": trace_id, + }, + }, + ) + + async def emit_message_stop( + self, + query_id: str, + trace_id: str, + ): + await self._publish( + query_id, + "message_stop", + { + "type": "message_stop", + "message": { + "query_id": query_id, + "trace_id": trace_id, + }, + }, + ) + + async def emit_error( + self, + query_id: str, + trace_id: str, + error: Error, + ): + await self._publish( + query_id, + "error", + { + "type": "error", + "message": { + "query_id": query_id, + "trace_id": trace_id, + "code": error.code, + "message": error.message, + "invalid_sql": error.invalid_sql or "", + }, + }, + ) + + async def emit_content_block( + self, + query_id: str, + trace_id: str, + index: int, + emit_content_func: Callable, + *, + emit_content_func_kwargs: Optional[dict] = {}, + content_block_label: Optional[str] = None, + block_type: Literal["tool_use", "text", "think"] = "tool_use", + stream: bool = False, + should_put_in_conversation_history: bool = False, + ): + """Emit a complete content block (start → delta → stop).""" + # 1) start + await self._publish( + query_id, + "content_block_start", + { + "type": "content_block_start", + "index": index, + "message": { + "type": block_type, + "content_block_label": content_block_label or "", + "trace_id": trace_id, + "should_put_in_conversation_history": should_put_in_conversation_history, + }, + }, + ) + + result = emit_content_func(**emit_content_func_kwargs) + if not stream: + result, result_for_pipeline = await result + final_result = result_for_pipeline + else: + final_result = "" + + async for chunk in ensure_async(result): + if stream and (block_type == "text" or block_type == "think"): + final_result += chunk + await self._publish( + query_id, + "content_block_delta", + { + "type": "content_block_delta", + "index": index, + "message": { + "type": ("json" if block_type == "tool_use" else "text") + + "_delta", + "content_block_label": content_block_label or "", + "content": ( + orjson.dumps(chunk) if block_type == "json" else chunk + ), + "trace_id": trace_id, + "should_put_in_conversation_history": should_put_in_conversation_history, + }, + }, + ) + + # 3) stop + await self._publish( + query_id, + "content_block_stop", + { + "type": "content_block_stop", + "index": index, + "message": { + "trace_id": trace_id, + }, + }, + ) + + return final_result, index + 1 + + +from .conversation import ConversationService # noqa: E402 + +__all__ = ["ConversationService"] diff --git a/wren-ai-service/src/web/v2/services/conversation.py b/wren-ai-service/src/web/v2/services/conversation.py new file mode 100644 index 0000000000..bde9934362 --- /dev/null +++ b/wren-ai-service/src/web/v2/services/conversation.py @@ -0,0 +1,1094 @@ +import asyncio +import logging +import random +import time +from typing import Dict, List, Literal, Optional + +import orjson +from fastapi import Request +from langfuse.decorators import observe +from pydantic import BaseModel, Field + +from src.core.pipeline import BasicPipeline +from src.utils import ( + trace_metadata, +) +from src.web.v1.services.ask import AskHistory +from src.web.v2.services import ( + Configurations, + Error, + QueryEventManager, +) + +logger = logging.getLogger("wren-ai-service") + + +class QuestionResult(BaseModel): + sql: str + type: Literal["llm", "view"] = "llm" + viewId: Optional[str] = None + + +class ConversationHistory(BaseModel): + class ConversationRequest(BaseModel): + query: str + additional_info: Optional[dict] = None + + request: ConversationRequest + response: str | dict + + +# POST /v2/conversations +class ConversationRequest(BaseModel): + _query_id: str | None = None + query: str + sql_data: Optional[Dict] = None + chart_schema: Optional[Dict] = None + project_id: Optional[str] = None + mdl_hash: Optional[str] = None + histories: Optional[List[ConversationHistory]] = Field(default_factory=list) + configurations: Optional[Configurations] = Configurations() + + @property + def query_id(self) -> str: + return self._query_id + + @query_id.setter + def query_id(self, query_id: str): + self._query_id = query_id + + +class ConversationResponse(BaseModel): + query_id: str + + +# although history.response may not be a sql, we still use AskHistory as the type +# because the AskHistory type is used in the Ask pipeline +def convert_conversation_history_to_ask_history( + conversation_history: list[ConversationHistory], +) -> list[AskHistory]: + return [ + AskHistory(question=history.request.query, sql=history.response["sql"]) + for history in conversation_history + if isinstance(history.response, dict) and history.response.get("sql") + ] + + +class ConversationService: + def __init__( + self, + pipelines: Dict[str, BasicPipeline], + max_histories: int = 5, + ): + self._pipelines = pipelines + self._query_event_manager = QueryEventManager() + self._max_histories = max_histories + + def _run_greetings(self): + greetings = [ + "Got your query! Let me look into that for you.", + "Thanks for your question — I'm working on it now.", + "Query received! I'll get back with insights shortly.", + "You're all set. Let me process that for you.", + "Thanks! Let me dig into that and find the best answer.", + "Appreciate your patience — working on your request!", + "Thanks for submitting your query. I'm on it!", + "Received and understood. Let's get you some answers.", + "Great question! Let me get the details for you.", + "Thanks! I'll get back to you with what I find.", + ] + + for text in random.choice(greetings): + yield text + + async def _run_historical_question_pipeline(self, query: str, project_id: str): + historical_question = await self._pipelines["historical_question"].run( + query=query, + project_id=project_id, + ) + + # we only return top 1 result + historical_question_result = historical_question.get( + "formatted_output", {} + ).get("documents", [])[:1] + + if historical_question_result: + return [ + { + "sql": historical_question_result[0].get("statement"), + "type": ( + "view" if historical_question_result[0].get("viewId") else "llm" + ), + "viewId": historical_question_result[0].get("viewId"), + } + ], { + "sql": historical_question_result[0].get("statement"), + "type": ( + "view" if historical_question_result[0].get("viewId") else "llm" + ), + "viewId": historical_question_result[0].get("viewId"), + } + else: + return [], {} + + async def _run_sql_pairs_retrieval( + self, + query: str, + project_id: str, + ): + sql_pairs_retrieval = await self._pipelines["sql_pairs_retrieval"].run( + query=query, + project_id=project_id, + ) + return ( + sql_pairs_retrieval.get("formatted_output", {}).get("documents", []), + sql_pairs_retrieval.get("formatted_output", {}).get("documents", []), + ) + + async def _run_instructions_retrieval( + self, + query: str, + project_id: str, + ): + instructions_retrieval = await self._pipelines["instructions_retrieval"].run( + query=query, + project_id=project_id, + ) + return ( + instructions_retrieval.get("formatted_output", {}).get("documents", []), + instructions_retrieval.get("formatted_output", {}).get("documents", []), + ) + + async def _run_intent_classification( + self, + query: str, + histories: List[ConversationHistory], + sql_samples: List[QuestionResult], + instructions: List[QuestionResult], + project_id: str, + configurations: Configurations, + sql_data: Optional[Dict] = None, + chart_schema: Optional[Dict] = None, + ): + intent_classification_result = ( + await self._pipelines["intent_classification"].run( + query=query, + histories=convert_conversation_history_to_ask_history(histories), + sql_samples=sql_samples, + instructions=instructions, + project_id=project_id, + configuration=configurations, + sql_data=sql_data, + chart_schema=chart_schema, + ) + ).get("post_process", {}) + + return [ + { + "intent": intent_classification_result.get("intent"), + "rephrased_question": intent_classification_result.get( + "rephrased_question" + ), + "reasoning": intent_classification_result.get("reasoning"), + } + ], intent_classification_result + + def _run_misleading_assistance( + self, + query: str, + histories: List[ConversationHistory], + db_schemas: List[str], + language: str, + query_id: str, + ): + asyncio.create_task( + self._pipelines["misleading_assistance"].run( + query=query, + histories=convert_conversation_history_to_ask_history(histories), + db_schemas=db_schemas, + language=language, + query_id=query_id, + ) + ) + + return self._pipelines["misleading_assistance"].get_streaming_results(query_id) + + def _run_data_assistance( + self, + query: str, + histories: List[ConversationHistory], + db_schemas: List[str], + language: str, + query_id: str, + ): + asyncio.create_task( + self._pipelines["data_assistance"].run( + query=query, + histories=convert_conversation_history_to_ask_history(histories), + db_schemas=db_schemas, + language=language, + query_id=query_id, + ) + ) + + return self._pipelines["data_assistance"].get_streaming_results(query_id) + + def _run_user_guide_assistance( + self, + query: str, + language: str, + query_id: str, + ): + asyncio.create_task( + self._pipelines["user_guide_assistance"].run( + query=query, + language=language, + query_id=query_id, + ) + ) + + return self._pipelines["user_guide_assistance"].get_streaming_results(query_id) + + def _run_data_exploration_assistance( + self, + query: str, + sql_data: Dict, + language: str, + query_id: str, + ): + asyncio.create_task( + self._pipelines["data_exploration_assistance"].run( + query=query, + sql_data=sql_data, + language=language, + query_id=query_id, + ) + ) + + return self._pipelines["data_exploration_assistance"].get_streaming_results( + query_id + ) + + async def _run_chart_generation( + self, + query: str, + sql: str, + sql_data: Dict, + language: str, + ): + chart_generation_result = await self._pipelines["chart_generation"].run( + query=query, + sql=sql, + data=sql_data, + language=language, + ) + + return [ + { + "chart_result": chart_generation_result["post_process"]["results"], + } + ], { + "chart_result": chart_generation_result["post_process"]["results"], + "sql": sql, + } + + async def _run_chart_adjustment( + self, + query: str, + sql: str, + sql_data: Dict, + chart_schema: Dict, + language: str, + ): + chart_adjustment_result = await self._pipelines["chart_adjustment"].run( + query=query, + sql=sql, + chart_schema=chart_schema, + data=sql_data, + language=language, + ) + + return [ + { + "chart_result": chart_adjustment_result["post_process"]["results"], + } + ], { + "chart_result": chart_adjustment_result["post_process"]["results"], + "sql": sql, + } + + async def _run_db_schema_retrieval( + self, + query: str, + histories: List[ConversationHistory], + project_id: str, + ): + retrieval_results = ( + await self._pipelines["db_schema_retrieval"].run( + query=query, + histories=convert_conversation_history_to_ask_history(histories), + project_id=project_id, + ) + ).get("construct_retrieval_results", {}) + + return [ + { + "retrieved_tables": [ + document.get("table_name") + for document in retrieval_results.get("retrieval_results", []) + ], + } + ], retrieval_results + + def _run_followup_sql_generation_reasoning( + self, + query: str, + contexts: List[str], + histories: List[ConversationHistory], + sql_samples: List[QuestionResult], + instructions: List[QuestionResult], + configuration: Configurations, + query_id: str, + ): + asyncio.create_task( + self._pipelines["followup_sql_generation_reasoning"].run( + query=query, + contexts=contexts, + histories=convert_conversation_history_to_ask_history(histories), + sql_samples=sql_samples, + instructions=instructions, + configuration=configuration, + query_id=query_id, + ) + ) + + return self._pipelines[ + "followup_sql_generation_reasoning" + ].get_streaming_results(query_id) + + def _run_sql_generation_reasoning( + self, + query: str, + contexts: List[str], + sql_samples: List[QuestionResult], + instructions: List[QuestionResult], + configuration: Configurations, + query_id: str, + ): + asyncio.create_task( + self._pipelines["sql_generation_reasoning"].run( + query=query, + contexts=contexts, + sql_samples=sql_samples, + instructions=instructions, + configuration=configuration, + query_id=query_id, + ) + ) + + return self._pipelines["sql_generation_reasoning"].get_streaming_results( + query_id + ) + + # no emit content block at the moment + async def _run_sql_functions_retrieval( + self, + project_id: str, + ): + sql_functions = await self._pipelines["sql_functions_retrieval"].run( + project_id=project_id, + ) + + return sql_functions + + async def _run_followup_sql_generation( + self, + query: str, + contexts: List[str], + sql_generation_reasoning: str, + histories: List[ConversationHistory], + project_id: str, + configurations: Configurations, + sql_samples: List[QuestionResult], + instructions: List[QuestionResult], + has_calculated_field: bool, + has_metric: bool, + sql_functions: List[str], + ): + followup_sql_generation_results = await self._pipelines[ + "followup_sql_generation" + ].run( + query=query, + contexts=contexts, + sql_generation_reasoning=sql_generation_reasoning, + histories=convert_conversation_history_to_ask_history(histories), + project_id=project_id, + configuration=configurations, + sql_samples=sql_samples, + instructions=instructions, + has_calculated_field=has_calculated_field, + has_metric=has_metric, + sql_functions=sql_functions, + ) + + if sql_valid_results := followup_sql_generation_results["post_process"][ + "valid_generation_results" + ]: + return [ + { + "sql": sql_valid_results[0].get("sql"), + } + ], followup_sql_generation_results + else: + return [], followup_sql_generation_results + + async def _run_sql_generation( + self, + query: str, + contexts: List[str], + sql_generation_reasoning: str, + project_id: str, + configurations: Configurations, + sql_samples: List[QuestionResult], + instructions: List[QuestionResult], + has_calculated_field: bool, + has_metric: bool, + sql_functions: List[str], + ): + sql_generation_results = await self._pipelines["sql_generation"].run( + query=query, + contexts=contexts, + sql_generation_reasoning=sql_generation_reasoning, + project_id=project_id, + configuration=configurations, + sql_samples=sql_samples, + instructions=instructions, + has_calculated_field=has_calculated_field, + has_metric=has_metric, + sql_functions=sql_functions, + ) + + if sql_valid_results := sql_generation_results["post_process"][ + "valid_generation_results" + ]: + return [ + { + "sql": sql_valid_results[0].get("sql"), + } + ], sql_generation_results + else: + return [], sql_generation_results + + async def _run_sql_correction( + self, + contexts: List[str], + invalid_generation_results: List[QuestionResult], + project_id: str, + ): + sql_correction_results = await self._pipelines["sql_correction"].run( + contexts=contexts, + invalid_generation_results=invalid_generation_results, + project_id=project_id, + ) + + if sql_valid_results := sql_correction_results["post_process"][ + "valid_generation_results" + ]: + return [ + { + "sql": sql_valid_results[0].get("sql"), + } + ], sql_correction_results + else: + return [], sql_correction_results + + # no emit content block at the moment + async def _run_sql_executor( + self, + sql: str, + project_id: str, + ): + sql_data = ( + await self._pipelines["sql_executor"].run( + sql=sql, + project_id=project_id, + ) + )["execute_sql"]["results"] + + return sql_data + + async def _run_DATA_PREVIEW( + self, + data: Dict, + ): + return [data], data + + def _run_sql_answer( + self, + query: str, + sql: str, + sql_data: Dict, + configurations: Configurations, + query_id: str, + ): + asyncio.create_task( + self._pipelines["sql_answer"].run( + query=query, + sql=sql, + sql_data=sql_data, + language=configurations.language, + query_id=query_id, + ) + ) + + return self._pipelines["sql_answer"].get_streaming_results(query_id) + + @observe(name="Start Conversation") + @trace_metadata + async def start_conversation( + self, + conversation_request: ConversationRequest, + **kwargs, + ): + trace_id = kwargs.get("trace_id") + results = { + "conversation_result": {}, + "metadata": { + "type": "", + "error_type": "", + "error_message": "", + }, + } + + query_id = conversation_request.query_id + user_query = conversation_request.query + project_id = conversation_request.project_id + histories = conversation_request.histories[: self._max_histories] + configurations = conversation_request.configurations + sql_data = conversation_request.sql_data + chart_schema = conversation_request.chart_schema + index = 0 + has_sql = False + sql = "" + + try: + await self._query_event_manager.emit_message_start( + query_id, + trace_id, + ) + + _, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_greetings, + content_block_label="GREETINGS", + block_type="text", + stream=True, + ) + + ( + historical_question_result, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_historical_question_pipeline, + emit_content_func_kwargs={ + "query": user_query, + "project_id": project_id, + }, + content_block_label="HISTORICAL_QUESTION_RETRIEVAL", + block_type="tool_use", + should_put_in_conversation_history=True, + ) + + if not historical_question_result: + sql_samples, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_sql_pairs_retrieval, + emit_content_func_kwargs={ + "query": user_query, + "project_id": project_id, + }, + content_block_label="SQL_PAIRS_RETRIEVAL", + block_type="tool_use", + ) + + ( + instructions, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_instructions_retrieval, + emit_content_func_kwargs={ + "query": user_query, + "project_id": project_id, + }, + content_block_label="INSTRUCTIONS_RETRIEVAL", + block_type="tool_use", + ) + + ( + intent_classification_result, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_intent_classification, + emit_content_func_kwargs={ + "query": user_query, + "histories": histories, + "sql_samples": sql_samples, + "instructions": instructions, + "project_id": project_id, + "configurations": configurations, + "sql_data": sql_data, + "chart_schema": chart_schema, + }, + content_block_label="INTENT_CLASSIFICATION", + block_type="tool_use", + ) + + intent = intent_classification_result.get("intent") + rephrased_question = intent_classification_result.get( + "rephrased_question" + ) + db_schemas = intent_classification_result.get("db_schemas") + intent_sql = intent_classification_result.get("sql") + + if rephrased_question: + user_query = rephrased_question + + if intent == "MISLEADING_QUERY": + _, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_misleading_assistance, + emit_content_func_kwargs={ + "query": user_query, + "histories": histories, + "db_schemas": db_schemas, + "language": configurations.language, + "query_id": query_id, + }, + content_block_label="MISLEADING_QUERY_ASSISTANCE", + block_type="text", + stream=True, + should_put_in_conversation_history=True, + ) + elif intent == "GENERAL": + _, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_data_assistance, + emit_content_func_kwargs={ + "query": user_query, + "histories": histories, + "db_schemas": db_schemas, + "language": configurations.language, + "query_id": query_id, + }, + content_block_label="GENERAL_ASSISTANCE", + block_type="text", + stream=True, + should_put_in_conversation_history=True, + ) + elif intent == "USER_GUIDE": + _, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_user_guide_assistance, + emit_content_func_kwargs={ + "query": user_query, + "language": configurations.language, + "query_id": query_id, + }, + content_block_label="USER_GUIDE_ASSISTANCE", + block_type="text", + stream=True, + should_put_in_conversation_history=True, + ) + elif intent == "DATA_EXPLORATION": + _, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_data_exploration_assistance, + emit_content_func_kwargs={ + "query": user_query, + "sql_data": sql_data, + "language": configurations.language, + "query_id": query_id, + }, + content_block_label="DATA_EXPLORATION", + block_type="text", + stream=True, + should_put_in_conversation_history=True, + ) + elif intent == "CHART": + sql_data = await self._run_sql_executor( + sql=intent_sql, + project_id=project_id, + ) + + if chart_schema: + ( + chart_generation_result, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_chart_adjustment, + emit_content_func_kwargs={ + "query": user_query, + "sql": intent_sql, + "sql_data": sql_data, + "chart_schema": chart_schema, + "language": configurations.language, + }, + content_block_label="CHART_ADJUSTMENT", + block_type="tool_use", + should_put_in_conversation_history=True, + ) + else: + ( + chart_generation_result, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_chart_generation, + emit_content_func_kwargs={ + "query": user_query, + "sql": intent_sql, + "sql_data": sql_data, + "language": configurations.language, + }, + content_block_label="CHART_GENERATION", + block_type="tool_use", + should_put_in_conversation_history=True, + ) + + if chart_schema := chart_generation_result.get( + "chart_result", {} + ).get("chart_schema"): + ( + _, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_DATA_PREVIEW, + emit_content_func_kwargs={ + "data": { + "type": "CHART", + "payload": { + "title": user_query, + "chart_schema": chart_schema, + "sql": chart_generation_result.get("sql"), + }, + }, + }, + content_block_label="DATA_PREVIEW", + block_type="tool_use", + ) + else: # TEXT_TO_SQL + ( + retrieval_results, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_db_schema_retrieval, + emit_content_func_kwargs={ + "query": user_query, + "histories": histories, + "project_id": project_id, + }, + content_block_label="DB_SCHEMA_RETRIEVAL", + block_type="tool_use", + ) + + documents = retrieval_results.get("retrieval_results", []) + table_names = [document.get("table_name") for document in documents] + table_ddls = [document.get("table_ddl") for document in documents] + + if table_names: + if histories: + ( + sql_generation_reasoning, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_followup_sql_generation_reasoning, + emit_content_func_kwargs={ + "query": user_query, + "contexts": table_ddls, + "histories": histories, + "sql_samples": sql_samples, + "instructions": instructions, + "configuration": configurations, + "query_id": query_id, + }, + content_block_label="SQL_GENERATION_REASONING", + block_type="think", + stream=True, + ) + else: + ( + sql_generation_reasoning, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_sql_generation_reasoning, + emit_content_func_kwargs={ + "query": user_query, + "contexts": table_ddls, + "sql_samples": sql_samples, + "instructions": instructions, + "configuration": configurations, + "query_id": query_id, + }, + content_block_label="SQL_GENERATION_REASONING", + block_type="think", + stream=True, + ) + + sql_functions = await self._run_sql_functions_retrieval( + project_id + ) + + has_calculated_field = retrieval_results.get( + "has_calculated_field", False + ) + has_metric = retrieval_results.get("has_metric", False) + + if histories: + ( + text_to_sql_generation_results, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_followup_sql_generation, + emit_content_func_kwargs={ + "query": user_query, + "contexts": table_ddls, + "sql_generation_reasoning": sql_generation_reasoning, + "histories": histories, + "project_id": project_id, + "configurations": configurations, + "sql_samples": sql_samples, + "instructions": instructions, + "has_calculated_field": has_calculated_field, + "has_metric": has_metric, + "sql_functions": sql_functions, + }, + content_block_label="SQL_GENERATION", + block_type="tool_use", + should_put_in_conversation_history=True, + ) + else: + ( + text_to_sql_generation_results, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_sql_generation, + emit_content_func_kwargs={ + "query": user_query, + "contexts": table_ddls, + "sql_generation_reasoning": sql_generation_reasoning, + "project_id": project_id, + "configurations": configurations, + "sql_samples": sql_samples, + "instructions": instructions, + "has_calculated_field": has_calculated_field, + "has_metric": has_metric, + "sql_functions": sql_functions, + }, + content_block_label="SQL_GENERATION", + block_type="tool_use", + should_put_in_conversation_history=True, + ) + + if failed_dry_run_results := text_to_sql_generation_results[ + "post_process" + ]["invalid_generation_results"]: + if ( + failed_dry_run_results[0]["type"] != "TIME_OUT" + and failed_dry_run_results[0]["type"] != "ADD_QUOTES" + ): + ( + sql_correction_results, + index, + ) = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_sql_correction, + emit_content_func_kwargs={ + "contexts": [], + "invalid_generation_results": failed_dry_run_results, + "project_id": project_id, + }, + content_block_label="SQL_CORRECTION", + block_type="tool_use", + ) + + if failed_dry_run_results := sql_correction_results[ + "post_process" + ]["invalid_generation_results"]: + await self._query_event_manager.emit_error( + query_id=query_id, + trace_id=trace_id, + error=Error( + code="NO_RELEVANT_SQL", + message=failed_dry_run_results[0]["error"], + invalid_sql=failed_dry_run_results[0][ + "sql" + ], + ), + ) + else: + sql = sql_correction_results["post_process"][ + "valid_generation_results" + ][0]["sql"] + has_sql = True + else: + await self._query_event_manager.emit_error( + query_id=query_id, + trace_id=trace_id, + error=Error( + code="NO_RELEVANT_SQL", + message=failed_dry_run_results[0]["error"], + invalid_sql=failed_dry_run_results[0]["sql"], + ), + ) + else: + sql = text_to_sql_generation_results["post_process"][ + "valid_generation_results" + ][0]["sql"] + has_sql = True + else: + sql = historical_question_result["sql"] + has_sql = True + + if has_sql: + _, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_DATA_PREVIEW, + emit_content_func_kwargs={ + "data": { + "type": "TABLE", + "payload": { + "title": user_query, + "sql": sql, + }, + }, + }, + content_block_label="DATA_PREVIEW", + block_type="tool_use", + ) + + sql_data = await self._run_sql_executor( + sql=sql, + project_id=project_id, + ) + + _, index = await self._query_event_manager.emit_content_block( + query_id, + trace_id, + index=index, + emit_content_func=self._run_sql_answer, + emit_content_func_kwargs={ + "query": user_query, + "sql": sql, + "sql_data": sql_data, + "configurations": configurations, + "query_id": query_id, + }, + content_block_label="SQL_ANSWER", + block_type="text", + stream=True, + ) + + await self._query_event_manager.emit_message_stop( + query_id, + trace_id, + ) + except Exception as e: + logger.exception(f"conversation pipeline - OTHERS: {e}") + + await self._query_event_manager.emit_error( + query_id, + trace_id, + Error( + code="OTHERS", + message=str(e), + ), + ) + + results["metadata"]["error_type"] = "OTHERS" + results["metadata"]["error_message"] = str(e) + + return results + + def stop_conversation(self, query_id: str): + self._query_event_manager.stop_queue(query_id) + + async def get_conversation_streaming_result(self, query_id: str, request: Request): + queue = self._query_event_manager.get_queue(query_id) + + async def event_generator(): + last_ping = time.monotonic() + + while True: + # if client disconnects, break + if await request.is_disconnected(): + break + + try: + event, data = await asyncio.wait_for(queue.get(), timeout=15) + except asyncio.TimeoutError: + now = time.monotonic() + if now - last_ping >= 10: + # sending a line that starts with a colon is the canonical way to emit an SSE “comment,” + # which browsers and EventSource clients ignore as data but do reset idle timeouts + yield ": keep-alive\n\n" + last_ping = now + continue + + payload = orjson.dumps(data).decode() + yield f"event: {event}\n" + yield f"data: {payload}\n\n" + + if event in ("message_stop", "error"): + break + + self._query_event_manager.cleanup(query_id) + + return event_generator()