Skip to content

Commit 69b4a95

Browse files
committed
Fix: The tool for aws, gcp, and Claude's /v1/messages endpoint.
1 parent 997f670 commit 69b4a95

File tree

5 files changed

+110
-30
lines changed

5 files changed

+110
-30
lines changed

relay/channel/anthropic/adaptor.go

+16-4
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,14 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.Rel
5858
if !meta.IsClaude {
5959
if meta.IsStream {
6060
var responseText string
61-
err, _, responseText = StreamHandler(c, resp)
62-
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
61+
err, usage, responseText = StreamHandler(c, resp)
62+
if usage == nil {
63+
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
64+
}
65+
if usage.TotalTokens != 0 && usage.CompletionTokens == 0 || usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens
66+
usage.PromptTokens = meta.PromptTokens
67+
usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens
68+
}
6369

6470
if usage.CompletionTokens == 0 {
6571
if config.BlankReplyRetryEnabled {
@@ -81,8 +87,14 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.Rel
8187
} else {
8288
if meta.IsStream {
8389
var responseText string
84-
err, _, responseText = ClaudeStreamHandler(c, resp)
85-
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
90+
err, usage, responseText = ClaudeStreamHandler(c, resp)
91+
if usage == nil {
92+
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
93+
}
94+
if usage.TotalTokens != 0 && usage.CompletionTokens == 0 || usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens
95+
usage.PromptTokens = meta.PromptTokens
96+
usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens
97+
}
8698
if usage.CompletionTokens == 0 {
8799
if config.BlankReplyRetryEnabled {
88100
return "", nil, &model.ErrorWithStatusCode{

relay/channel/anthropic/main.go

+53-9
Original file line numberDiff line numberDiff line change
@@ -467,9 +467,6 @@ func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
467467
},
468468
}
469469

470-
// 打印返回的 OpenAI 格式响应
471-
fmt.Printf("返回的 OpenAI 格式响应:\n%+v\n", fullTextResponse)
472-
473470
return &fullTextResponse
474471
}
475472

@@ -478,16 +475,35 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
478475
var response *Response
479476
var responseText string
480477
var stopReason string
478+
tools := make([]model.Tool, 0)
479+
481480
switch claudeResponse.Type {
482481
case "message_start":
483482
return nil, claudeResponse.Message
484483
case "content_block_start":
485484
if claudeResponse.ContentBlock != nil {
486485
responseText = claudeResponse.ContentBlock.Text
486+
if claudeResponse.ContentBlock.Type == "tool_use" {
487+
tools = append(tools, model.Tool{
488+
Id: claudeResponse.ContentBlock.Id,
489+
Type: "function",
490+
Function: model.Function{
491+
Name: claudeResponse.ContentBlock.Name,
492+
Arguments: "",
493+
},
494+
})
495+
}
487496
}
488497
case "content_block_delta":
489498
if claudeResponse.Delta != nil {
490499
responseText = claudeResponse.Delta.Text
500+
if claudeResponse.Delta.Type == "input_json_delta" {
501+
tools = append(tools, model.Tool{
502+
Function: model.Function{
503+
Arguments: claudeResponse.Delta.PartialJson,
504+
},
505+
})
506+
}
491507
}
492508
case "message_delta":
493509
if claudeResponse.Usage != nil {
@@ -501,6 +517,10 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
501517
}
502518
var choice openai.ChatCompletionsStreamResponseChoice
503519
choice.Delta.Content = responseText
520+
if len(tools) > 0 {
521+
choice.Delta.Content = nil // compatible with other OpenAI derivative applications, like LobeOpenAICompatibleFactory ...
522+
choice.Delta.ToolCalls = tools
523+
}
504524
choice.Delta.Role = "assistant"
505525
finishReason := stopReasonClaude2OpenAI(&stopReason)
506526
if finishReason != "null" {
@@ -511,7 +531,6 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
511531
openaiResponse.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
512532
return &openaiResponse, response
513533
}
514-
515534
func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
516535
var responseText string
517536
if len(claudeResponse.Content) > 0 {
@@ -578,6 +597,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
578597
var modelName string
579598
var id string
580599
var streamError *model.ErrorWithStatusCode
600+
var lastToolCallChoice openai.ChatCompletionsStreamResponseChoice
581601
c.Stream(func(w io.Writer) bool {
582602
select {
583603
case data := <-dataChan:
@@ -593,15 +613,29 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
593613
if meta != nil {
594614
usage.PromptTokens += meta.Usage.InputTokens
595615
usage.CompletionTokens += meta.Usage.OutputTokens
596-
modelName = meta.Model
597-
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
598-
return true
616+
if len(meta.Id) > 0 { // only message_start has an id, otherwise it's a finish_reason event.
617+
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
618+
return true
619+
} else { // finish_reason case
620+
ProcessToolCalls(&lastToolCallChoice, response)
621+
}
599622
}
600623
if response == nil {
601624
return true
602625
}
603-
responsePart := response.Choices[0].Delta.Content.(string)
604-
responseTextBuilder.WriteString(responsePart)
626+
if response != nil && len(response.Choices) > 0 {
627+
choice := response.Choices[0]
628+
if choice.Delta.Content != nil {
629+
if content, ok := choice.Delta.Content.(string); ok {
630+
responseTextBuilder.WriteString(content)
631+
} else if content, ok := choice.Delta.Content.(map[string]interface{}); ok {
632+
// 处理其他可能的内容类型
633+
if textContent, exists := content["text"].(string); exists {
634+
responseTextBuilder.WriteString(textContent)
635+
}
636+
}
637+
}
638+
}
605639
response.Id = id
606640
response.Model = modelName
607641
response.Created = createdTime
@@ -816,3 +850,13 @@ func ClaudeHandler(c *gin.Context, resp *http.Response, promptTokens int, modelN
816850
_, err = c.Writer.Write(responseBody)
817851
return nil, &usage, aitext
818852
}
853+
func ProcessToolCalls(lastToolCallChoice *openai.ChatCompletionsStreamResponseChoice, response *openai.ChatCompletionsStreamResponse) {
854+
if len(lastToolCallChoice.Delta.ToolCalls) > 0 {
855+
lastToolCall := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1]
856+
if lastToolCall != nil && lastToolCall.Function.Arguments == nil {
857+
lastToolCall.Function.Arguments = "{}"
858+
response.Choices[len(response.Choices)-1].Delta.Content = nil
859+
response.Choices[len(response.Choices)-1].Delta.ToolCalls = lastToolCallChoice.Delta.ToolCalls
860+
}
861+
}
862+
}

relay/channel/aws/adaptor.go

+16-4
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,14 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.Rel
7474
if !meta.IsClaude {
7575
if meta.IsStream {
7676
var responseText string
77-
err, _, responseText = StreamHandler(c, a.awsClient)
78-
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
77+
err, usage, responseText = StreamHandler(c, a.awsClient)
78+
if usage == nil {
79+
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
80+
}
81+
if usage.TotalTokens != 0 && usage.CompletionTokens == 0 || usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens
82+
usage.PromptTokens = meta.PromptTokens
83+
usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens
84+
}
7985

8086
if usage.CompletionTokens == 0 {
8187
if config.BlankReplyRetryEnabled {
@@ -97,8 +103,14 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.Rel
97103
} else {
98104
if meta.IsStream {
99105
var responseText string
100-
err, _, responseText = StreamClaudeHandler(c, a.awsClient)
101-
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
106+
err, usage, responseText = StreamClaudeHandler(c, a.awsClient)
107+
if usage == nil {
108+
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
109+
}
110+
if usage.TotalTokens != 0 && usage.CompletionTokens == 0 || usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens
111+
usage.PromptTokens = meta.PromptTokens
112+
usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens
113+
}
102114
if usage.CompletionTokens == 0 {
103115
if config.BlankReplyRetryEnabled {
104116
return "", nil, &model.ErrorWithStatusCode{

relay/channel/aws/main.go

+9-9
Original file line numberDiff line numberDiff line change
@@ -214,20 +214,20 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
214214
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
215215
return true
216216
} else { // finish_reason case
217-
if len(lastToolCallChoice.Delta.ToolCalls) > 0 {
218-
lastArgs := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1].Function
219-
if len(lastArgs.Arguments.(string)) == 0 { // compatible with OpenAI sending an empty object `{}` when no arguments.
220-
lastArgs.Arguments = "{}"
221-
response.Choices[len(response.Choices)-1].Delta.Content = nil
222-
response.Choices[len(response.Choices)-1].Delta.ToolCalls = lastToolCallChoice.Delta.ToolCalls
223-
}
224-
}
217+
anthropic.ProcessToolCalls(&lastToolCallChoice, response)
225218
}
226219
}
227220
if response == nil {
228221
return true
229222
}
230-
responseText += response.Choices[0].Delta.Content.(string)
223+
if response.Choices != nil && len(response.Choices) > 0 {
224+
choice := response.Choices[0]
225+
if choice.Delta.Content != nil {
226+
if content, ok := choice.Delta.Content.(string); ok {
227+
responseText += content
228+
}
229+
}
230+
}
231231
response.Id = id
232232
response.Model = c.GetString(ctxkey.OriginalModel)
233233
response.Created = createdTime

relay/channel/gcpclaude/adaptor.go

+16-4
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,14 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.Rel
112112
if meta.IsStream {
113113

114114
var responseText string
115-
err, _, responseText = anthropic.StreamHandler(c, resp)
116-
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
115+
err, usage, responseText = anthropic.StreamHandler(c, resp)
116+
if usage == nil {
117+
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
118+
}
119+
if usage.TotalTokens != 0 && usage.CompletionTokens == 0 || usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens
120+
usage.PromptTokens = meta.PromptTokens
121+
usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens
122+
}
117123

118124
if usage.CompletionTokens == 0 {
119125
if config.BlankReplyRetryEnabled {
@@ -137,8 +143,14 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.Rel
137143
if meta.IsStream {
138144

139145
var responseText string
140-
err, _, responseText = anthropic.ClaudeStreamHandler(c, resp)
141-
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
146+
err, usage, responseText = anthropic.ClaudeStreamHandler(c, resp)
147+
if usage == nil {
148+
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
149+
}
150+
if usage.TotalTokens != 0 && usage.CompletionTokens == 0 || usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens
151+
usage.PromptTokens = meta.PromptTokens
152+
usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens
153+
}
142154
if usage.CompletionTokens == 0 {
143155
if config.BlankReplyRetryEnabled {
144156
return "", nil, &model.ErrorWithStatusCode{

0 commit comments

Comments
 (0)