Blame


1 3037a80d 2024-08-18 o #!/usr/bin/env python
2 3037a80d 2024-08-18 o
3 3037a80d 2024-08-18 o import json
4 3037a80d 2024-08-18 o import os
5 3037a80d 2024-08-18 o import sys
6 3037a80d 2024-08-18 o import urllib.request
7 3037a80d 2024-08-18 o
8 3037a80d 2024-08-18 o url = "https://api.groq.com/openai/v1/chat/completions"
9 bdb5c09d 2024-09-29 o # small models include:
10 bdb5c09d 2024-09-29 o # llama-3.1-8b-instant
11 bdb5c09d 2024-09-29 o # llama-3.2-3b-preview
12 bdb5c09d 2024-09-29 o # llama-3.2-1b-preview
13 7a579223 2024-12-07 o model = "llama-3.1-8b-instant"
14 7a579223 2024-12-07 o big = "llama-3.3-70b-versatile"
15 3037a80d 2024-08-18 o
16 3037a80d 2024-08-18 o def read_token(name):
17 3037a80d 2024-08-18 o with open(name) as f:
18 3037a80d 2024-08-18 o return f.read().strip()
19 3037a80d 2024-08-18 o
20 3037a80d 2024-08-18 o tpath = os.path.join(os.getenv("HOME"), ".config/groq/token")
21 3037a80d 2024-08-18 o token = read_token(tpath)
22 3037a80d 2024-08-18 o
23 3037a80d 2024-08-18 o if len(sys.argv) > 1 and sys.argv[1] == "-b":
24 3037a80d 2024-08-18 o model = big
25 3037a80d 2024-08-18 o prompt = sys.stdin.read()
26 3037a80d 2024-08-18 o message = {"messages": [{"role": "user","content": prompt}], "model": model}
27 3037a80d 2024-08-18 o
28 3037a80d 2024-08-18 o req = urllib.request.Request(url, json.dumps(message).encode())
29 3037a80d 2024-08-18 o req.add_header("Content-Type", "application/json")
30 3037a80d 2024-08-18 o req.add_header("Authorization", "Bearer "+token)
31 bdb5c09d 2024-09-29 o # groq blocks urllib's user agent
32 3037a80d 2024-08-18 o req.add_header("User-Agent", "curl/8.9.0")
33 3037a80d 2024-08-18 o
34 3037a80d 2024-08-18 o with urllib.request.urlopen(req) as resp:
35 3037a80d 2024-08-18 o reply = json.load(resp)
36 3037a80d 2024-08-18 o print(reply["choices"][0]["message"]["content"])