Skip to content
Merged
4 changes: 3 additions & 1 deletion deployment/kustomizations/base/cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,17 @@ data:
kwargs:
temperature: 0
n: 1
seed: 0
max_tokens: 4096
response_format:
type: json_object
- model: gpt-4o
- model: gpt-4o-2024-08-06
api_base: https://api.openai.com/v1
api_key_name: LLM_OPENAI_API_KEY
kwargs:
temperature: 0
n: 1
seed: 0
max_tokens: 4096
response_format:
type: json_object
Expand Down
4 changes: 4 additions & 0 deletions docker/config.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ models:
kwargs:
temperature: 0
n: 1
# for better consistency of llm response, refer: https://platform.openai.com/docs/api-reference/chat/create#chat-create-seed
seed: 0
max_tokens: 4096
response_format:
type: json_object
Expand All @@ -17,6 +19,8 @@ models:
kwargs:
temperature: 0
n: 1
# for better consistency of llm response, refer: https://platform.openai.com/docs/api-reference/chat/create#chat-create-seed
seed: 0
max_tokens: 4096
response_format:
type: json_object
Expand Down
5 changes: 4 additions & 1 deletion wren-ai-service/Justfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ up: prepare-wren-engine
down:
docker compose -f ./tools/dev/docker-compose-dev.yaml --env-file ./tools/dev/.env down

start:
start: use-wren-ui-as-engine
poetry run python -m src.__main__

curate_eval_data:
Expand Down Expand Up @@ -63,3 +63,6 @@ prepare-wren-engine:
mkdir -p tools/dev/etc/mdl
echo "{\"catalog\": \"test_catalog\", \"schema\": \"test_schema\", \"models\": []}" \\
> tools/dev/etc/mdl/sample.json

use-wren-ui-as-engine:
poetry run python -m src.force_update_config
4 changes: 0 additions & 4 deletions wren-ai-service/demo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -659,7 +659,6 @@ def display_sql_answer(query_id: str):
placeholder.markdown(markdown_content)


@st.cache_data
def get_sql_answer(
query: str,
sql: str,
Expand Down Expand Up @@ -708,7 +707,6 @@ def get_sql_answer(
)


@st.cache_data
def ask_details():
asks_details_response = requests.post(
f"{WREN_AI_SERVICE_BASE_URL}/v1/ask-details",
Expand Down Expand Up @@ -842,7 +840,6 @@ def fill_vega_lite_values(vega_lite_schema: dict, df: pd.DataFrame) -> dict:
return schema


@st.cache_data
def generate_chart(
query: str,
sql: str,
Expand Down Expand Up @@ -893,7 +890,6 @@ def generate_chart(
return chart_response


@st.cache_data
def adjust_chart(
query: str,
sql: str,
Expand Down
1 change: 1 addition & 0 deletions wren-ai-service/src/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def health():
port=settings.port,
reload=settings.development,
reload_includes=["src/**/*.py", ".env.dev", "config.yaml"],
reload_excludes=["demo/*.py", "tests/**/*.py", "eval/**/*.py"],
workers=1,
loop="uvloop",
http="httptools",
Expand Down
26 changes: 26 additions & 0 deletions wren-ai-service/src/force_update_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import yaml


def update_config():
# Read the config file
with open("config.yaml", "r") as file:
# Load all documents from YAML file (since it has multiple documents separated by ---)
documents = list(yaml.safe_load_all(file))

# Find the pipeline configuration document
for doc in documents:
if doc.get("type") == "pipeline":
# Update engine name in all pipelines
for pipe in doc.get("pipes", []):
if "engine" in pipe:
pipe["engine"] = "wren_ui"

# Write back to the file
with open("config.yaml", "w") as file:
yaml.safe_dump_all(documents, file, default_flow_style=False)

print("Successfully updated engine names to 'wren_ui' in all pipelines")


if __name__ == "__main__":
update_config()
Loading
Loading