• R/O
  • HTTP
  • SSH
  • HTTPS

Commit

Tags
Keine Tags

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

Emergent generative agents


Commit MetaInfo

Revisionc85f6b41fc7a2feabf601e41537d7cefa0f4c4fa (tree)
Zeit2023-04-10 03:57:49
AutorCorbin <cds@corb...>
CommiterCorbin

Log Message

IRC stuff.

Multiple channels, timestamps, better formatting of log lines, log
storage, and small tweaks.

Ändern Zusammenfassung

Diff

--- a/agent.py
+++ b/agent.py
@@ -1,7 +1,11 @@
11 #!/usr/bin/env nix-shell
22 #! nix-shell -i python3 -p python3Packages.irc python3Packages.transformers python3Packages.torch
33
4+from concurrent.futures import ThreadPoolExecutor
5+from datetime import datetime
46 import json
7+import os.path
8+import random
59 import sys
610
711 from irc.bot import SingleServerIRCBot
@@ -52,58 +56,95 @@ Goals, working memory, notes: {allGoals}
5256 def load_character(path):
5357 with open(path) as handle: return json.load(handle)
5458
55-characters = [load_character(arg) for arg in sys.argv[1:]]
56-prompts = [build_prompt(**character) for character in characters]
57-prompt, title = prompts.pop(0)
59+character = load_character(sys.argv[2])
60+startingChannels = character.pop("startingChannels")
61+prompt, title = build_prompt(**character)
5862
5963 MAX_NEW_TOKENS = 128
60-gen = CamelidGen()
61-gen = HFGen(Flavor.GPTNeo, MAX_NEW_TOKENS)
64+if sys.argv[1] == "llama":
65+ print("~ Initializing camelid adapter…")
66+ gen = CamelidGen()
67+else:
68+ print("~ Initializing GPT-Neo on HF…")
69+ gen = HFGen(Flavor.GPTNeo, MAX_NEW_TOKENS)
6270 max_context_length = gen.contextLength()
6371
72+executor = ThreadPoolExecutor(max_workers=1)
73+
6474 class Agent(SingleServerIRCBot):
65- def __init__(self, host, title):
75+ def __init__(self, host, title, startingChannels, logpath):
6676 super(Agent, self).__init__([(host, 6667)], title_to_nick(title), title)
67- self.log = Log([])
77+ self.startingChannels = startingChannels
78+ self.logpath = logpath
79+ self.logs = {}
80+
81+ def on_join(self, c, e):
82+ channel = e.target
83+ c.topic(channel)
84+ try:
85+ with open(os.path.join(self.logpath, channel + ".txt"), "r") as f:
86+ self.logs[channel] = Log(f.read().strip().split("\n"))
87+ except IOError: self.logs[channel] = Log([])
6888
69- def on_join(self, c, e): c.topic(e.target)
7089 def on_currenttopic(self, c, e):
7190 self.channels[e.arguments[0]].topic = e.arguments[1]
7291
73- def on_welcome(self, c, e): c.join("#treehouse")
92+ def on_welcome(self, c, e):
93+ for channel in self.startingChannels: c.join(channel)
7494
7595 def on_pubmsg(self, c, e):
7696 line = e.arguments[0]
77- self.log.push(e.source.nick, line)
97+ channel = e.target
98+ log = self.logs[channel]
99+ log.irc(datetime.now(), e.source.nick, line)
78100 # Dispatch in the style of
79101 # https://github.com/jaraco/irc/blob/main/scripts/testbot.py
80- if ":" in line:
81- prefix = lower(self.connection.get_nickname()) + ":"
82- if lower(line).startswith(prefix):
83- self.do_command(c, e, line[len(prefix):].strip())
84-
85- def do_command(self, c, e, inst):
86- channel = e.target
87- nick = self.connection.get_nickname()
88- users = self.channels[channel].users()
89- prefix = nick + ":"
90- fullPrompt = prompt + self.chatPrompt(channel)
91- self.log.bumpCutoff(max_context_length, gen.countTokens, fullPrompt, prefix)
92- s = self.log.finishPrompt(fullPrompt, prefix)
93- print("~ log cutoff:", self.log.cutoff,
94- "prompt length (tokens):", gen.countTokens(s))
95- # Hack: GPT-Neo tries to smuggle messages, so forbid it.
96- forbidden = [prefix] + ["." + user for user in users]
97- reply = parseLine(gen.complete(s), forbidden)
98- self.log.push(nick, reply)
99- c.privmsg(channel, reply)
102+ nick = lower(self.connection.get_nickname())
103+ colon = nick + ":"
104+ comma = nick + ","
105+ lowered = lower(line)
106+ if colon in lowered or comma in lowered:
107+ self.do_command(c, e, line[len(colon):].strip())
108+ elif nick in lowered and random.random() <= 0.875:
109+ self.generateReply(c, channel)
110+ elif random.random() <= 0.125: self.generateReply(c, channel)
111+
112+ def do_command(self, c, e, inst): self.generateReply(c, e.target)
100113
101114 def chatPrompt(self, channel):
102- topic = getattr(self.channels[channel], "topic", "no topic")
115+ c = self.channels[channel]
116+ topic = getattr(c, "topic", "no topic")
117+ users = ", ".join(c.users())
103118 return f"""
104119 IRC channel: {channel}
105120 Channel topic: {topic}
121+Channel users: {users}
106122 """
107123
108-agent = Agent("june.local", title)
109-agent.start()
124+ def generateReply(self, c, channel):
125+ log = self.logs[channel]
126+ nick = self.connection.get_nickname()
127+ users = self.channels[channel].users()
128+ prefix = nick + ":"
129+ fullPrompt = prompt + self.chatPrompt(channel)
130+ log.bumpCutoff(max_context_length, gen.countTokens, fullPrompt, prefix)
131+ s = log.finishPrompt(fullPrompt, prefix)
132+ print("~ log cutoff:", log.cutoff,
133+ "prompt length (tokens):", gen.countTokens(s))
134+ forbidden = [prefix] + ["." + user for user in users]
135+ # NB: At this point, execution is kicked out to a thread.
136+ def cb(completion):
137+ reply = parseLine(completion.result(), forbidden)
138+ log.irc(datetime.now(), nick, reply)
139+ c.privmsg(channel, reply)
140+ executor.submit(lambda: gen.complete(s)).add_done_callback(cb)
141+
142+logpath = sys.argv[3]
143+agent = Agent("june.local", title, startingChannels, logpath)
144+try: agent.start()
145+except KeyboardInterrupt:
146+ print("~ Saving logs…")
147+ for channel, log in agent.logs.items():
148+ with open(os.path.join(agent.logpath, channel + ".txt"), "w") as f:
149+ f.write("\n".join(log.l))
150+print("~ Quitting, bye!")
--- a/common.py
+++ b/common.py
@@ -22,6 +22,8 @@ class Log:
2222 self.stamp += 1
2323
2424 def push(self, speaker, entry): self.raw(speaker + ": " + entry)
25+ def irc(self, t, speaker, entry):
26+ self.raw(f"{t:%H:%M:%S} <{speaker}> {entry}")
2527
2628 def finishPrompt(self, s, prefix):
2729 return self.finishPromptAtCutoff(self.cutoff, s, prefix)
--- a/gens/camelid.py
+++ b/gens/camelid.py
@@ -4,9 +4,10 @@ from common import Timer
44
55 llama = [
66 "/home/simpson/models/result/bin/llama-cpp",
7- "-m", "/home/simpson/models/13b-4bit.bin",
8- "-f", "/dev/stdin",
9- "-c", "2048",
7+ "-t", "3",
8+ "-m", "/home/simpson/models/7b-4bit.bin",
9+ "-f", "/dev/stdin",
10+ "-c", "2048",
1011 ]
1112
1213 class CamelidGen: