Commit Diff


commit - 1e0f74802b07d3bb880dbd00370afb5584c97496
commit + 3037a80d2ff3693e0ce53a11bdbcea3b33145433
blob - a1be4d332d42e23347d20b56672911e56fb1d11b
blob + 27180db849cb325cddfa1178a350ae7103990d8e
--- README
+++ README
@@ -8,5 +8,6 @@ hits - count web traffic
 hlsget - download the contents of a HLS playlist
 jsfmt - format javascript source code
 lemmyverse - find lemmy communities
+llama - prompt a large language model
 webpaste - create a web paste on webpaste.olowe.co
 xstream - stream X display over the network
blob - /dev/null
blob + 0154aacfc12328ee609fd5adb4a8e439e8c10bf3 (mode 755)
--- /dev/null
+++ bin/llama
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+import json
+import os
+import sys
+import urllib.request
+
+# these models hallucinate
+# model = "llama3.1-8b-instant"
+# model = "llama-3.1-70b-versatile"
+
+url = "https://api.groq.com/openai/v1/chat/completions"
+model = "llama3-8b-8192"
+big =  "llama3-70b-8192"
+
+def read_token(name):
+	with open(name) as f:
+		return f.read().strip()
+
+tpath = os.path.join(os.getenv("HOME"), ".config/groq/token")
+token = read_token(tpath)
+
+if len(sys.argv) > 1 and sys.argv[1] == "-b":
+	model = big
+prompt = sys.stdin.read()
+message = {"messages": [{"role": "user","content": prompt}], "model": model}
+
+req = urllib.request.Request(url, json.dumps(message).encode())
+req.add_header("Content-Type", "application/json")
+req.add_header("Authorization", "Bearer "+token)
+# groq blocks urllib's user agent?!
+req.add_header("User-Agent", "curl/8.9.0")
+
+with urllib.request.urlopen(req) as resp:
+	reply = json.load(resp)
+	print(reply["choices"][0]["message"]["content"])
blob - /dev/null
blob + 522c0479bd640805b911a31bca763d9286d4b450 (mode 644)
--- /dev/null
+++ man/llama.1
@@ -0,0 +1,26 @@
+.Dd
+.Dt LLAMA 1
+.Sh NAME
+.Nm llama
+.Nd prompt a large language model
+.Sh SYNOPSIS
+.Nm
+.Op Fl b
+.Sh DESCRIPTION
+.Nm
+reads a prompt from the standard input
+and sends it to a large language model hosted by Groq.
+The reply is written to the standard output.
+The default model is Llama 3 8B.
+.Pp
+A Groq API token must be written to
+.Pa $HOME/.config/groq/token .
+.Pp
+The following flags are understood:
+.Bl -tag -width Ds
+.It Fl b
+Prompt the "bigger" 70B model.
+.Sh EXAMPLE
+.Dl echo 'What is LLM slop?' | llama
+.Sh EXIT STATUS
+.Ex