Documentation
¶
Index ¶
- Constants
- Variables
- func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func EmbeddingHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func GetImageScannerBuffer() *[]byte
- func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func PutImageScannerBuffer(buf *[]byte)
- func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- type Adaptor
- func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)
- func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *model.Usage, err *relaymodel.ErrorWithStatusCode)
- func (a *Adaptor) GetBaseURL() string
- func (a *Adaptor) GetChannelName() string
- func (a *Adaptor) GetModelList() []*model.ModelConfig
- func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)
- func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error
- type BatchEmbeddingRequest
- type ChatCandidate
- type ChatContent
- type ChatGenerationConfig
- type ChatPromptFeedback
- type ChatRequest
- type ChatResponse
- type ChatSafetyRating
- type ChatSafetySettings
- type ChatTools
- type CountTokensResponse
- type EmbeddingData
- type EmbeddingRequest
- type EmbeddingResponse
- type Error
- type FunctionCall
- type FunctionCallingConfig
- type FunctionResponse
- type InlineData
- type Part
- type PromptTokensDetail
- type ToolConfig
- type UsageMetadata
Constants ¶
View Source
const (
VisionMaxImageNum = 16
)
Variables ¶
View Source
var ModelList = []*model.ModelConfig{ { Model: "gemini-1.5-pro", Type: mode.ChatCompletions, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.0025, OutputPrice: 0.01, }, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(2097152), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-1.5-flash", Type: mode.ChatCompletions, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.00015, OutputPrice: 0.0006, }, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-1.5-flash-8b", Type: mode.ChatCompletions, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.000075, OutputPrice: 0.0003, }, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-flash", Type: mode.ChatCompletions, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.0001, OutputPrice: 0.0004, }, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-flash-lite-preview", Type: mode.ChatCompletions, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.000075, OutputPrice: 0.0003, }, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-flash-thinking-exp", Type: mode.ChatCompletions, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.0001, OutputPrice: 0.0004, }, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-pro-exp", Type: mode.ChatCompletions, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.0025, OutputPrice: 0.01, }, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(2097152), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "text-embedding-004", Type: mode.Embeddings, Owner: model.ModelOwnerGoogle, Price: model.Price{ InputPrice: 0.0001, }, RPM: 1500, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(2048), model.WithModelConfigMaxOutputTokens(768), ), }, }
Functions ¶
func ConvertEmbeddingRequest ¶
func ConvertRequest ¶
Setting safety to the lowest possible values since Gemini is already powerless enough
func EmbeddingHandler ¶
func EmbeddingHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func GetImageScannerBuffer ¶ added in v0.1.5
func GetImageScannerBuffer() *[]byte
func Handler ¶
func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func PutImageScannerBuffer ¶ added in v0.1.5
func PutImageScannerBuffer(buf *[]byte)
func StreamHandler ¶
func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
Types ¶
type Adaptor ¶
type Adaptor struct{}
func (*Adaptor) ConvertRequest ¶
func (*Adaptor) DoResponse ¶
func (*Adaptor) GetBaseURL ¶
func (*Adaptor) GetChannelName ¶
func (*Adaptor) GetModelList ¶
func (a *Adaptor) GetModelList() []*model.ModelConfig
type BatchEmbeddingRequest ¶
type BatchEmbeddingRequest struct {
Requests []EmbeddingRequest `json:"requests"`
}
type ChatCandidate ¶
type ChatCandidate struct {
FinishReason string `json:"finishReason"`
Content ChatContent `json:"content"`
SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
Index int64 `json:"index"`
}
type ChatContent ¶
type ChatGenerationConfig ¶
type ChatGenerationConfig struct {
ResponseSchema any `json:"responseSchema,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"topP,omitempty"`
ResponseMimeType string `json:"responseMimeType,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
ResponseModalities []string `json:"responseModalities,omitempty"`
}
type ChatPromptFeedback ¶
type ChatPromptFeedback struct {
SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
}
type ChatRequest ¶
type ChatRequest struct {
Contents []*ChatContent `json:"contents"`
SystemInstruction *ChatContent `json:"system_instruction,omitempty"`
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig *ChatGenerationConfig `json:"generation_config,omitempty"`
Tools []ChatTools `json:"tools,omitempty"`
ToolConfig *ToolConfig `json:"tool_config,omitempty"`
}
type ChatResponse ¶
type ChatResponse struct {
Candidates []*ChatCandidate `json:"candidates"`
PromptFeedback ChatPromptFeedback `json:"promptFeedback"`
UsageMetadata *UsageMetadata `json:"usageMetadata"`
ModelVersion string `json:"modelVersion"`
}
func (*ChatResponse) GetResponseText ¶
func (g *ChatResponse) GetResponseText() string
type ChatSafetyRating ¶
type ChatSafetySettings ¶
type ChatTools ¶
type ChatTools struct {
FunctionDeclarations any `json:"function_declarations,omitempty"`
}
type CountTokensResponse ¶
type EmbeddingData ¶
type EmbeddingData struct {
Values []float64 `json:"values"`
}
type EmbeddingRequest ¶
type EmbeddingRequest struct {
Model string `json:"model"`
TaskType string `json:"taskType,omitempty"`
Title string `json:"title,omitempty"`
Content ChatContent `json:"content"`
OutputDimensionality int `json:"outputDimensionality,omitempty"`
}
type EmbeddingResponse ¶
type EmbeddingResponse struct {
Error *Error `json:"error,omitempty"`
Embeddings []EmbeddingData `json:"embeddings"`
}
type FunctionCall ¶
type FunctionCallingConfig ¶
type FunctionResponse ¶
type InlineData ¶
type Part ¶
type Part struct {
InlineData *InlineData `json:"inlineData,omitempty"`
FunctionCall *FunctionCall `json:"functionCall,omitempty"`
FunctionResponse *FunctionResponse `json:"functionResponse,omitempty"`
Text string `json:"text,omitempty"`
}
type PromptTokensDetail ¶ added in v0.1.6
type ToolConfig ¶
type ToolConfig struct {
FunctionCallingConfig FunctionCallingConfig `json:"function_calling_config"`
}
type UsageMetadata ¶
type UsageMetadata struct {
PromptTokenCount int64 `json:"promptTokenCount"`
CandidatesTokenCount int64 `json:"candidatesTokenCount"`
TotalTokenCount int64 `json:"totalTokenCount"`
PromptTokensDetails []PromptTokensDetail `json:"promptTokensDetails"`
}
Click to show internal directories.
Click to hide internal directories.