diff --git a/models/gemini/manifest.yaml b/models/gemini/manifest.yaml index 9537e8bd..af5e3c48 100644 --- a/models/gemini/manifest.yaml +++ b/models/gemini/manifest.yaml @@ -33,4 +33,4 @@ resource: tool: enabled: true type: plugin -version: 0.0.2 +version: 0.0.3 diff --git a/models/gemini/models/llm/_position.yaml b/models/gemini/models/llm/_position.yaml index ab3081db..f2f4bd85 100644 --- a/models/gemini/models/llm/_position.yaml +++ b/models/gemini/models/llm/_position.yaml @@ -1,3 +1,9 @@ +- gemini-2.0-flash-001 +- gemini-2.0-flash-exp +- gemini-2.0-flash-lite-preview-02-05 +- gemini-2.0-pro-exp-02-05 +- gemini-2.0-flash-thinking-exp-1219 +- gemini-2.0-flash-thinking-exp-01-21 - gemini-1.5-pro - gemini-1.5-pro-latest - gemini-1.5-pro-001 diff --git a/models/gemini/models/llm/gemini-2.0-flash-001.yaml b/models/gemini/models/llm/gemini-2.0-flash-001.yaml new file mode 100644 index 00000000..bef7ca5e --- /dev/null +++ b/models/gemini/models/llm/gemini-2.0-flash-001.yaml @@ -0,0 +1,41 @@ +model: gemini-2.0-flash-001 +label: + en_US: Gemini 2.0 Flash 001 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document + - video + - audio +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/models/gemini/models/llm/gemini-2.0-flash-exp.yaml b/models/gemini/models/llm/gemini-2.0-flash-exp.yaml new file mode 100644 index 00000000..966617e9 --- /dev/null +++ b/models/gemini/models/llm/gemini-2.0-flash-exp.yaml @@ -0,0 +1,41 @@ +model: gemini-2.0-flash-exp +label: + en_US: Gemini 2.0 Flash Exp +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document + - video + - audio +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/models/gemini/models/llm/gemini-2.0-flash-lite-preview-02-05.yaml b/models/gemini/models/llm/gemini-2.0-flash-lite-preview-02-05.yaml new file mode 100644 index 00000000..702f052a --- /dev/null +++ b/models/gemini/models/llm/gemini-2.0-flash-lite-preview-02-05.yaml @@ -0,0 +1,42 @@ +model: gemini-2.0-flash-lite-preview-02-05 +label: + en_US: Gemini 2.0 Flash Lite Preview 0205 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document + - video + - audio +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD + diff --git a/models/gemini/models/llm/gemini-2.0-flash-thinking-exp-01-21.yaml b/models/gemini/models/llm/gemini-2.0-flash-thinking-exp-01-21.yaml new file mode 100644 index 00000000..71676264 --- /dev/null +++ b/models/gemini/models/llm/gemini-2.0-flash-thinking-exp-01-21.yaml @@ -0,0 +1,39 @@ +model: gemini-2.0-flash-thinking-exp-01-21 +label: + en_US: Gemini 2.0 Flash Thinking Exp 01-21 +model_type: llm +features: + - agent-thought + - vision + - document + - video + - audio +model_properties: + mode: chat + context_size: 32767 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/models/gemini/models/llm/gemini-2.0-flash-thinking-exp-1219.yaml b/models/gemini/models/llm/gemini-2.0-flash-thinking-exp-1219.yaml new file mode 100644 index 00000000..dfcf8fd0 --- /dev/null +++ b/models/gemini/models/llm/gemini-2.0-flash-thinking-exp-1219.yaml @@ -0,0 +1,39 @@ +model: gemini-2.0-flash-thinking-exp-1219 +label: + en_US: Gemini 2.0 Flash Thinking Exp 1219 +model_type: llm +features: + - agent-thought + - vision + - document + - video + - audio +model_properties: + mode: chat + context_size: 32767 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/models/gemini/models/llm/gemini-2.0-pro-exp-02-05.yaml b/models/gemini/models/llm/gemini-2.0-pro-exp-02-05.yaml new file mode 100644 index 00000000..fb571f08 --- /dev/null +++ b/models/gemini/models/llm/gemini-2.0-pro-exp-02-05.yaml @@ -0,0 +1,41 @@ +model: gemini-2.0-pro-exp-02-05 +label: + en_US: Gemini 2.0 pro exp 02-05 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document + - video + - audio +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/models/gemini/requirements.txt b/models/gemini/requirements.txt index 6dc12270..12ec24ca 100644 --- a/models/gemini/requirements.txt +++ b/models/gemini/requirements.txt @@ -1,4 +1,4 @@ -dify_plugin~=0.0.1b61 +dify_plugin~=0.0.1b64 google-ai-generativelanguage~=0.6.9 google-api-python-client~=2.90.0 google-api-core~=2.18.0