From 6b05d731383c4fce72d96714515fd091cd96a5d7 Mon Sep 17 00:00:00 2001 From: Takashi Kawachi Date: Sun, 26 Mar 2023 17:46:05 +0900 Subject: [PATCH] Add short option for model flag and change default value for max-tokens flag. This commit adds a short option for the model flag (-m) and changes the default value for the max-tokens flag to 0. --- aichat.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aichat.go b/aichat.go index 7d50139..50f59d0 100644 --- a/aichat.go +++ b/aichat.go @@ -230,12 +230,12 @@ func main() { var split = false var model = gogpt.GPT4 getopt.FlagLong(&temperature, "temperature", 't', "temperature") - getopt.FlagLong(&maxTokens, "max-tokens", 'm', "max tokens, 0 to use default") + getopt.FlagLong(&maxTokens, "max-tokens", 0, "max tokens, 0 to use default") getopt.FlagLong(&verbose, "verbose", 'v', "verbose output") getopt.FlagLong(&listPrompts, "list-prompts", 'l', "list prompts") getopt.FlagLong(&nonStreaming, "non-streaming", 0, "non streaming mode") getopt.FlagLong(&split, "split", 0, "split input") - getopt.FlagLong(&model, "model", 0, "model") + getopt.FlagLong(&model, "model", 'm', "model") getopt.Parse() if listPrompts {