Commit Diff


commit - 0ea3f8734f94c843e8eecbb49b317f2c39e127f0
commit + 6e8bd96c92d7cec89fa519b4ed03856f0fb2838e
blob - d7d2ff9e7f32aeeb5e468970e9afab5961e4defa
blob + 339b222c2242873bf6c3f17666aa76e86da248c6
--- man/llm.1
+++ man/llm.1
@@ -8,7 +8,6 @@
 .Op Fl c
 .Op Fl m Ar model
 .Op Fl s Ar prompt
-.Op Fl u Ar url
 .Sh DESCRIPTION
 .Nm
 starts a chat with a large language model.
@@ -27,19 +26,35 @@ a line consisting of just a literal dot character
 sends the prompt.
 Subsequent replies and prompts are included as context for the model's responses.
 .Pp
-An API key written to
-.Pa $HOME/.config/openai/key
-will be included with each request for authentication.
+A configuration file written to
+.Pa $HOME/.config/openai
+will direct
+.Nm
+how to connect to the chat completion HTTP API
+and set any completion options.
+The file consists of key-value pairs separated by whitespace,
+one per line.
+Blank lines and lines beginning with "#" are ignored.
+The following options may be set:
+.Bl -tag -width Ds
+.It Ic url Ar url
+The base URL where an OpenAI-compatible HTTP API is served.
+The default is
+.Ar http://127.0.0.1:8080 .
+.It Ic token Ar string
+The bearer token used to authenticate requests against the HTTP API.
+.It Ic model Ar name
+Request prompts be completed by model
+.Ar name .
+.El
 .Pp
-The following flags are understood:
+The following command-line flags are understood:
 .Bl -tag -width Ds
 .It Fl c
 Start a back-and-forth chat.
 .It Fl m Ar model
 Prompt
 .Ar model .
-The default is
-.Ar ministral-8b-latest .
 Note that
 .Xr llama-server 1
 from llama.cpp ignores this value.
@@ -47,17 +62,23 @@ from llama.cpp ignores this value.
 Set
 .Ar prompt
 as the system prompt.
-.It Fl u Ar url
-Connect to the OpenAI API root at
-.Ar url .
-The default is
-.Ar http://127.0.0.1:8080 .
-.Sh EXAMPLE
+.El
+.Sh EXAMPLES
 .Pp
 Chat with a locally-hosted Mistral NeMo model:
 .Bd -literal -offset Ds
 llama-server -m models/Mistral-Nemo-Instruct-2407-Q6_K.gguf -c 16384 -fa &
 echo "Hello, world!" | llm
 .Ed
+.Pp
+A configuration file to use Mistral's hosted language models,
+using a small model if none is specified at the command line:
+.Bd -literal -offset Ds
+url https://api.mistral.ai
+token abcdef12345678
+# use smaller models to boil oceans a bit more slowly
+model ministral-8b-latest
+# model mistral-small-latest
+.Ed
 .Sh EXIT STATUS
 .Ex
blob - 7ef90797d8b9ad16985f6e289e849e9ef889e5a7
blob + 16f8e0a576c5242d51537d4326a3084387ea4c6f
--- src/llm/llm.go
+++ src/llm/llm.go
@@ -15,20 +15,10 @@ import (
 	"olowe.co/x/openai"
 )
 
-var model = flag.String("m", "ministral-8b-latest", "model")
-var baseURL = flag.String("u", "http://127.0.0.1:8080", "openai API base URL")
+var model = flag.String("m", "", "model")
 var sysPrompt = flag.String("s", "", "system prompt")
 var converse = flag.Bool("c", false, "start a back-and-forth chat")
 
-func readToken() (string, error) {
-	confDir, err := os.UserConfigDir()
-	if err != nil {
-		return "", err
-	}
-	b, err := os.ReadFile(path.Join(confDir, "openai/token"))
-	return string(bytes.TrimSpace(b)), err
-}
-
 func copyAll(w io.Writer, paths []string) (n int64, err error) {
 	if len(paths) == 0 {
 		return io.Copy(w, os.Stdin)
@@ -56,15 +46,25 @@ func init() {
 }
 
 func main() {
-	token, err := readToken()
+	confDir, err := os.UserConfigDir()
 	if err != nil {
-		log.Fatalf("read auth token: %v", err)
+		log.Fatal(err)
 	}
-	client := &openai.Client{http.DefaultClient, token, *baseURL}
+	config, err := readConfig(path.Join(confDir, "openai"))
+	if err != nil {
+		log.Fatalf("read configuration: %v", err)
+	}
+	if *model == "" {
+		*model = config.DefaultModel
+	}
+	if config.BaseURL == "" {
+		config.BaseURL = "http://127.0.0.1:8080"
+	}
+	client := &openai.Client{http.DefaultClient, config.Token, config.BaseURL}
 
 	chat := openai.Chat{Model: *model}
 	if *sysPrompt != "" {
-		chat.Messages =  []openai.Message{
+		chat.Messages = []openai.Message{
 			{openai.RoleSystem, *sysPrompt},
 		}
 	}
blob - /dev/null
blob + e02ba987eb04c9f18b689ebc3ef7990651f25392 (mode 644)
--- /dev/null
+++ src/llm/config.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strings"
+)
+
+type Config struct {
+	BaseURL      string
+	Token        string
+	DefaultModel string
+}
+
+func readConfig(name string) (*Config, error) {
+	f, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var conf Config
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		if strings.HasPrefix(sc.Text(), "#") {
+			continue
+		} else if sc.Text() == "" {
+			continue
+		}
+
+		k, v, ok := strings.Cut(strings.TrimSpace(sc.Text()), " ")
+		if !ok {
+			return nil, fmt.Errorf("key %q: expected space after key", k)
+		}
+		v = strings.TrimSpace(v)
+		switch k {
+		case "token":
+			conf.Token = v
+		case "url":
+			conf.BaseURL = v
+		case "model":
+			conf.DefaultModel = v
+		default:
+			return nil, fmt.Errorf("unknown configuration key %q", k)
+		}
+	}
+	return &conf, sc.Err()
+}