diff --git a/crates/goose/src/providers/declarative/groq.json b/crates/goose/src/providers/declarative/groq.json index 8578ed8af4a7..f2a7000abd6f 100644 --- a/crates/goose/src/providers/declarative/groq.json +++ b/crates/goose/src/providers/declarative/groq.json @@ -6,26 +6,56 @@ "api_key_env": "GROQ_API_KEY", "base_url": "https://api.groq.com/openai/v1/chat/completions", "models": [ + { + "name": "moonshotai/kimi-k2-instruct-0905", + "context_limit": 262144, + "max_tokens": 16384 + }, { "name": "openai/gpt-oss-120b", - "context_limit": 131072 + "context_limit": 131072, + "max_tokens": 65536 }, { - "name": "llama-3.1-8b-instant", - "context_limit": 131072 + "name": "openai/gpt-oss-20b", + "context_limit": 131072, + "max_tokens": 65536 + }, + { + "name": "meta-llama/llama-4-maverick-17b-128e-instruct", + "context_limit": 131072, + "max_tokens": 8192 + }, + { + "name": "meta-llama/llama-4-scout-17b-16e-instruct", + "context_limit": 131072, + "max_tokens": 8192 + }, + { + "name": "qwen/qwen3-32b", + "context_limit": 131072, + "max_tokens": 40960 }, { "name": "llama-3.3-70b-versatile", - "context_limit": 131072 + "context_limit": 131072, + "max_tokens": 32768 }, { - "name": "meta-llama/llama-guard-4-12b", - "context_limit": 131072 + "name": "llama-3.1-8b-instant", + "context_limit": 131072, + "max_tokens": 131072 }, { - "name": "openai/gpt-oss-20b", - "context_limit": 131072 + "name": "openai/gpt-oss-safeguard-20b", + "context_limit": 131072, + "max_tokens": 65536 + }, + { + "name": "meta-llama/llama-guard-4-12b", + "context_limit": 131072, + "max_tokens": 1024 } ], "supports_streaming": true -} \ No newline at end of file +}