File size: 1,412 Bytes
d31e9b7
7c6020d
d31e9b7
 
 
 
 
7c6020d
 
 
 
 
 
 
 
 
6403dd7
4b24ee4
d31e9b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c71ea91
 
 
6403dd7
c71ea91
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# to run Python-ACT-R in Notebook, we need to install the package if not done already:
#from python_actr import *

# Using the code above, if python_actr is not installed, it first install python_actr 
# then imports its modules: from python_actr import *
# so no needs to import the following modules one by one, but you can use them instead 
# by uncommenting one by one, when needed, instead of importing everything at once:
import python_actr
from python_actr import log
from python_actr import ACTR
from python_actr import Model
from python_actr import Buffer
from python_actr import Memory
from python_actr import DMSpreading
from python_actr import log_everything

#logev=log_everything(html=False)

class MyEnv(Model):
  pass
class MyAgent(ACTR):
  production_time = 0.05
  production_sd = 0.01
  production_threshold = -20
  
  goal = Buffer() # Creating the goal buffer for the agent
  
  def init(): # this rule fires when the agent is instantiated.
    goal.set("sandwich bread") # set goal buffer to direct program flow
  def bread_bottom(goal="sandwich bread"): # if goal="sandwich bread" , fire rule
    print("I have a piece of bread2")
    #logging.warning("I have a piece of bread")
    goal.set("stop") # set goal buffer to direct program flow
  def stop_production(goal="stop"):
    self.stop() # stop the agent

#tim = MyAgent()
#subway=MyEnv()
#subway.agent=tim
#logev(subway)
#subway.run()