Skip to content

Commit 469269e

Browse files
committed
refactor: refactor core logic into modular helper functions
- Refactor token usage printing into a dedicated printTokenUsage function - Refactor token usage output mapping into addTokenUsageToOutput function - Move tool schema parsing and validation logic to prepareToolSchema - Move chat completion request construction to buildChatRequest - Centralize response extraction logic in extractResponse - Simplify main run function by delegating responsibilities to new helper functions Signed-off-by: Bo-Yi Wu <appleboy.tw@gmail.com>
1 parent c6c9eba commit 469269e

1 file changed

Lines changed: 128 additions & 87 deletions

File tree

main.go

Lines changed: 128 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,120 @@ func maskAPIKey(apiKey string) string {
2727
return apiKey[:4] + "****" + apiKey[len(apiKey)-4:]
2828
}
2929

30+
// printTokenUsage prints token usage statistics to stdout
31+
func printTokenUsage(usage openai.Usage) {
32+
fmt.Println("--- Token Usage ---")
33+
fmt.Printf("Prompt Tokens: %d\n", usage.PromptTokens)
34+
fmt.Printf("Completion Tokens: %d\n", usage.CompletionTokens)
35+
fmt.Printf("Total Tokens: %d\n", usage.TotalTokens)
36+
if usage.PromptTokensDetails != nil {
37+
fmt.Printf("Cached Tokens: %d\n", usage.PromptTokensDetails.CachedTokens)
38+
}
39+
if d := usage.CompletionTokensDetails; d != nil {
40+
fmt.Printf("Reasoning Tokens: %d\n", d.ReasoningTokens)
41+
fmt.Printf("Accepted Prediction Tokens: %d\n", d.AcceptedPredictionTokens)
42+
fmt.Printf("Rejected Prediction Tokens: %d\n", d.RejectedPredictionTokens)
43+
}
44+
fmt.Println("--- End Token Usage ---")
45+
}
46+
47+
// addTokenUsageToOutput adds token usage metrics to the output map
48+
func addTokenUsageToOutput(output map[string]string, usage openai.Usage) {
49+
output["prompt_tokens"] = strconv.Itoa(usage.PromptTokens)
50+
output["completion_tokens"] = strconv.Itoa(usage.CompletionTokens)
51+
output["total_tokens"] = strconv.Itoa(usage.TotalTokens)
52+
53+
if usage.PromptTokensDetails != nil {
54+
output["prompt_cached_tokens"] = strconv.Itoa(usage.PromptTokensDetails.CachedTokens)
55+
}
56+
57+
if d := usage.CompletionTokensDetails; d != nil {
58+
output["completion_reasoning_tokens"] = strconv.Itoa(d.ReasoningTokens)
59+
output["completion_accepted_prediction_tokens"] = strconv.Itoa(d.AcceptedPredictionTokens)
60+
output["completion_rejected_prediction_tokens"] = strconv.Itoa(d.RejectedPredictionTokens)
61+
}
62+
}
63+
64+
// extractResponse extracts the response content from the API response
65+
func extractResponse(
66+
resp openai.ChatCompletionResponse,
67+
toolMeta *ToolMeta,
68+
debug bool,
69+
) (string, error) {
70+
if len(resp.Choices) == 0 {
71+
return "", fmt.Errorf("no response from LLM")
72+
}
73+
74+
if toolMeta != nil {
75+
// Extract function call arguments when tool schema is used
76+
if len(resp.Choices[0].Message.ToolCalls) > 0 {
77+
// Debug: Print tool call details if debug mode is enabled
78+
if debug {
79+
fmt.Println("=== Debug Mode: Tool Calls ===")
80+
if err := godump.Dump(resp.Choices[0].Message.ToolCalls); err != nil {
81+
fmt.Fprintf(os.Stderr, "Warning: failed to dump tool calls: %v\n", err)
82+
}
83+
fmt.Println("==============================")
84+
}
85+
return resp.Choices[0].Message.ToolCalls[0].Function.Arguments, nil
86+
}
87+
return "", fmt.Errorf("expected tool call response but got none")
88+
}
89+
90+
return resp.Choices[0].Message.Content, nil
91+
}
92+
93+
// prepareToolSchema parses and validates the tool schema if provided
94+
func prepareToolSchema(config *Config) (*ToolMeta, error) {
95+
if config.ToolSchema == "" {
96+
return nil, nil
97+
}
98+
99+
toolMeta, err := ParseToolSchema(config.ToolSchema)
100+
if err != nil {
101+
return nil, fmt.Errorf("failed to parse tool schema: %v", err)
102+
}
103+
104+
// Debug: Print tool schema if debug mode is enabled
105+
if config.Debug {
106+
fmt.Println("=== Debug Mode: Tool Schema ===")
107+
if err := godump.Dump(toolMeta); err != nil {
108+
fmt.Fprintf(os.Stderr, "Warning: failed to dump tool schema: %v\n", err)
109+
}
110+
fmt.Println("===============================")
111+
}
112+
113+
return toolMeta, nil
114+
}
115+
116+
// buildChatRequest creates a chat completion request with optional tool support
117+
func buildChatRequest(
118+
config *Config,
119+
messages []openai.ChatCompletionMessage,
120+
toolMeta *ToolMeta,
121+
) openai.ChatCompletionRequest {
122+
req := openai.ChatCompletionRequest{
123+
Model: config.Model,
124+
Messages: messages,
125+
Temperature: float32(config.Temperature),
126+
MaxTokens: config.MaxTokens,
127+
}
128+
129+
// Add tool if schema provided
130+
if toolMeta != nil {
131+
req.Tools = []openai.Tool{toolMeta.ToOpenAITool()}
132+
// Force the model to use this specific function
133+
req.ToolChoice = &openai.ToolChoice{
134+
Type: openai.ToolTypeFunction,
135+
Function: openai.ToolFunction{
136+
Name: toolMeta.Name,
137+
},
138+
}
139+
}
140+
141+
return req
142+
}
143+
30144
func run() error {
31145
// Load configuration
32146
config, err := LoadConfig()
@@ -55,13 +169,10 @@ func run() error {
55169
// Build messages
56170
messages := BuildMessages(config)
57171

58-
// Parse tool schema if provided
59-
var toolMeta *ToolMeta
60-
if config.ToolSchema != "" {
61-
toolMeta, err = ParseToolSchema(config.ToolSchema)
62-
if err != nil {
63-
return fmt.Errorf("failed to parse tool schema: %v", err)
64-
}
172+
// Parse and validate tool schema if provided
173+
toolMeta, err := prepareToolSchema(config)
174+
if err != nil {
175+
return err
65176
}
66177

67178
// Debug: Print messages if debug mode is enabled
@@ -71,35 +182,10 @@ func run() error {
71182
fmt.Fprintf(os.Stderr, "Warning: failed to dump messages: %v\n", err)
72183
}
73184
fmt.Println("============================")
74-
75-
if toolMeta != nil {
76-
fmt.Println("=== Debug Mode: Tool Schema ===")
77-
if err := godump.Dump(toolMeta); err != nil {
78-
fmt.Fprintf(os.Stderr, "Warning: failed to dump tool schema: %v\n", err)
79-
}
80-
fmt.Println("===============================")
81-
}
82185
}
83186

84-
// Create chat completion request
85-
req := openai.ChatCompletionRequest{
86-
Model: config.Model,
87-
Messages: messages,
88-
Temperature: float32(config.Temperature),
89-
MaxTokens: config.MaxTokens,
90-
}
91-
92-
// Add tool if schema provided
93-
if toolMeta != nil {
94-
req.Tools = []openai.Tool{toolMeta.ToOpenAITool()}
95-
// Force the model to use this specific function
96-
req.ToolChoice = &openai.ToolChoice{
97-
Type: openai.ToolTypeFunction,
98-
Function: openai.ToolFunction{
99-
Name: toolMeta.Name,
100-
},
101-
}
102-
}
187+
// Create chat completion request with optional tool support
188+
req := buildChatRequest(config, messages, toolMeta)
103189

104190
fmt.Println("Sending request to LLM...")
105191
fmt.Printf("Model: %s\n", config.Model)
@@ -111,50 +197,19 @@ func run() error {
111197
return fmt.Errorf("chat completion error: %v", err)
112198
}
113199

114-
// Extract response
115-
if len(resp.Choices) == 0 {
116-
return fmt.Errorf("no response from LLM")
117-
}
118-
119-
var response string
120-
if toolMeta != nil {
121-
// Extract function call arguments when tool schema is used
122-
if len(resp.Choices[0].Message.ToolCalls) > 0 {
123-
// Debug: Print tool call details if debug mode is enabled
124-
if config.Debug {
125-
fmt.Println("=== Debug Mode: Tool Calls ===")
126-
if err := godump.Dump(resp.Choices[0].Message.ToolCalls); err != nil {
127-
fmt.Fprintf(os.Stderr, "Warning: failed to dump tool calls: %v\n", err)
128-
}
129-
fmt.Println("==============================")
130-
}
131-
response = resp.Choices[0].Message.ToolCalls[0].Function.Arguments
132-
} else {
133-
return fmt.Errorf("expected tool call response but got none")
134-
}
135-
} else {
136-
response = resp.Choices[0].Message.Content
200+
// Extract response content
201+
response, err := extractResponse(resp, toolMeta, config.Debug)
202+
if err != nil {
203+
return err
137204
}
138205

139206
// Print response for debugging
140207
fmt.Println("--- LLM Response ---")
141208
fmt.Println(response)
142209
fmt.Println("--- End Response ---")
143210

144-
// Print token usage
145-
fmt.Println("--- Token Usage ---")
146-
fmt.Printf("Prompt Tokens: %d\n", resp.Usage.PromptTokens)
147-
fmt.Printf("Completion Tokens: %d\n", resp.Usage.CompletionTokens)
148-
fmt.Printf("Total Tokens: %d\n", resp.Usage.TotalTokens)
149-
if resp.Usage.PromptTokensDetails != nil {
150-
fmt.Printf("Cached Tokens: %d\n", resp.Usage.PromptTokensDetails.CachedTokens)
151-
}
152-
if d := resp.Usage.CompletionTokensDetails; d != nil {
153-
fmt.Printf("Reasoning Tokens: %d\n", d.ReasoningTokens)
154-
fmt.Printf("Accepted Prediction Tokens: %d\n", d.AcceptedPredictionTokens)
155-
fmt.Printf("Rejected Prediction Tokens: %d\n", d.RejectedPredictionTokens)
156-
}
157-
fmt.Println("--- End Token Usage ---")
211+
// Print token usage statistics
212+
printTokenUsage(resp.Usage)
158213

159214
// Set GitHub Actions output
160215
var toolArgs map[string]string
@@ -177,22 +232,8 @@ func run() error {
177232
)
178233
}
179234

180-
// Add token usage to output
181-
output["prompt_tokens"] = strconv.Itoa(resp.Usage.PromptTokens)
182-
output["completion_tokens"] = strconv.Itoa(resp.Usage.CompletionTokens)
183-
output["total_tokens"] = strconv.Itoa(resp.Usage.TotalTokens)
184-
185-
// Add prompt token details if available
186-
if resp.Usage.PromptTokensDetails != nil {
187-
output["prompt_cached_tokens"] = strconv.Itoa(resp.Usage.PromptTokensDetails.CachedTokens)
188-
}
189-
190-
// Add completion token details if available
191-
if d := resp.Usage.CompletionTokensDetails; d != nil {
192-
output["completion_reasoning_tokens"] = strconv.Itoa(d.ReasoningTokens)
193-
output["completion_accepted_prediction_tokens"] = strconv.Itoa(d.AcceptedPredictionTokens)
194-
output["completion_rejected_prediction_tokens"] = strconv.Itoa(d.RejectedPredictionTokens)
195-
}
235+
// Add token usage metrics to output
236+
addTokenUsageToOutput(output, resp.Usage)
196237

197238
if err := gh.SetOutput(output); err != nil {
198239
return fmt.Errorf("failed to set output: %v", err)

0 commit comments

Comments
 (0)