CognitiveScience commited on
Commit
d31e9b7
1 Parent(s): efae330

Create dcogsphere.py

Browse files
Files changed (1) hide show
  1. dcogsphere.py +38 -0
dcogsphere.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # to run Python-ACT-R in Notebook, we need to install the package if not done already:
2
+ from python_actr import *
3
+
4
+ # Using the code above, if python_actr is not installed, it first install python_actr
5
+ # then imports its modules: from python_actr import *
6
+ # so no needs to import the following modules one by one, but you can use them instead
7
+ # by uncommenting one by one, when needed, instead of importing everything at once:
8
+ #import python_actr
9
+ #from python_actr import log
10
+ #from python_actr import ACTR
11
+ #from python_actr import Model
12
+ #from python_actr import Buffer
13
+ #from python_actr import Memory
14
+ #from python_actr import DMSpreading
15
+ #from python_actr import log_everything
16
+ class MyEnv(Model):
17
+ pass
18
+ class MyAgent(ACTR):
19
+ production_time = 0.05
20
+ production_sd = 0.01
21
+ production_threshold = -20
22
+
23
+ goal = Buffer() # Creating the goal buffer for the agent
24
+
25
+ def init(): # this rule fires when the agent is instantiated.
26
+ goal.set("sandwich bread") # set goal buffer to direct program flow
27
+ def bread_bottom(goal="sandwich bread"): # if goal="sandwich bread" , fire rule
28
+ print("I have a piece of bread2")
29
+ #logging.warning("I have a piece of bread")
30
+ goal.set("stop") # set goal buffer to direct program flow
31
+ def stop_production(goal="stop"):
32
+ self.stop() # stop the agent
33
+
34
+ tim = MyAgent()
35
+ subway=MyEnv()
36
+ subway.agent=tim
37
+ log_everything(subway)
38
+ subway.run()