forked from CarletonCognitiveModelingLab/python_actr
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathExample1Hello.py
More file actions
42 lines (36 loc) · 1.07 KB
/
Example1Hello.py
File metadata and controls
42 lines (36 loc) · 1.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#!pip install python_actr #uncomment this if needed
from python_actr import *
class MyEnv(Model):
pass
class MyAgent(ACTR):
goal = Buffer() # Creating the goal buffer for the agent
def init(): # this rule fires when the agent is instantiated.
goal.set("helloworld") # set goal buffer to direct program flow
def bread_bottom(goal="helloworld"): # if goal="helloworld" , fire rule
print ("Hello World!")
goal.set("stop") # set goal buffer to direct program flow
#def stop_production(goal="stop"):
#self.stop() # stop the agent
celscilist = {
'dd':{'x':{'followers': 400}},
'jp':{'x':{'followers': 300}},
'sh':{'x':{'followers': 200}},
'gs':{'x':{'followers': 100}}
}
class celsci():
def fame(x):
y="low"
if x>300:
y= 'high'
return y
def checkfame(x):
person = celsci.fame(celscilist[x]['x']['followers'])
return person
for person in celscilist:
#dh=ic(person,checkfame(person))
print(person,checkfame(person))
tim = MyAgent()
subway=MyEnv()
subway.agent=tim
log_everything(subway)
subway.run()