repo
stringlengths 3
91
| file
stringlengths 16
152
| code
stringlengths 0
3.77M
| file_length
int64 0
3.77M
| avg_line_length
float64 0
16k
| max_line_length
int64 0
273k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Themis | Themis-master/Themis2.0/loan_3.py | import sys
sex = sys.argv[1]
race = sys.argv[2]
income = sys.argv[3]
# third case
if income == "0...50000":
print ("0")
else:
print ("1") | 140 | 13.1 | 25 | py |
Themis | Themis-master/Themis2.0/themis.py | # Themis 2.0
#
# By: Rico Angell
from __future__ import division
import argparse
import subprocess
from itertools import chain, combinations, product
import math
import random
import scipy.stats as st
import xml.etree.ElementTree as ET
class Input:
"""
Class to define an input characteristic to the software.
Attributes
----------
name : str
Name of the input.
values : list
List of the possible values for this input.
Methods
-------
get_random_input()
Returns a random element from the values list.
"""
def __init__(self, name="", values=[]):
try:
self.name = name
self.values = [str(v) for v in values]
except:
print("Themis input initialization corrupted!")
def get_random_input(self):
"""
Return a random value from self.values
"""
try:
return random.choice(self.values)
except:
print("Error in get_random_input")
def __str__(self):
try:
s = "\nInput\n"
s += "-----\n"
s += "Name: " + self.name + "\n"
s += "Values: " + ", ".join(self.values)
return s
except:
print("Issue with returning a string representation of the input")
__repr__ = __str__
class Test:
"""
Data structure for storing tests.
Attributes
----------
function : str
Name of the function to call
i_fields : list of `Input.name`
The inputs of interest, i.e. compute the casual discrimination wrt
these fields.
threshold : float in [0,1]
At least this level of discrimination to be considered.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
group : bool
Search for group discrimination if `True`.
causal : bool
Search for causal discrimination if `True`.
"""
def __init__(self, function="", i_fields=[], conf=0.999, margin=0.0001,
group=False, causal=False, threshold=0.2):
try:
self.function = function
self.i_fields = i_fields
self.conf = conf
self.margin = margin
self.group = group
self.causal = causal
self.threshold = threshold
except:
print("Themis test initialization input")
def __str__(self):
try:
s = "Test: " + self.function + "\n"
if self.function != "discrimination_search":
s += "Characteristics: " + ", ".join(self.i_fields) + "\n"
else:
s += "Threshold: " + str(self.threshold) + "\n"
s += "Group: " + str(self.group) + "\n"
s += "Causal: " + str(self.causal) + "\n"
s += "Confidence: " + str(self.conf) + "\n"
s += "Margin: " + str(self.margin) + "\n"
return s
except:
print("Issue with returning a string version of the test dettails")
__repr__ = __str__
class Themis:
"""
Compute discrimination for a piece of software.
Attributes
----------
Methods
-------
"""
def __init__(self, xml_fname=""):
"""
Initialize Themis from xml file.
Parameters
----------
xml_fname : string
name of the xml file we want to import settings from.
"""
assert xml_fname != ""
try:
tree = ET.parse(xml_fname)
root = tree.getroot()
self.max_samples = int(root.find("max_samples").text)
self.min_samples = int(root.find("min_samples").text)
self.rand_seed = int(root.find("seed").text)
self.software_name = root.find("name").text
self.command = root.find("command").text.strip()
self._build_input_space(args=root.find("inputs"))
self._load_tests(args=root.find("tests"))
self._cache = {}
except:
print("issue with reading xml file and initializing Themis")
def run(self):
"""
Run Themis tests specified in the configuration file.
"""
try:
print ("\nTHEMIS 2.0")
print ("----------")
print("SOFTWARE NAME: " + self.software_name)
print("MAX SAMPLES: " + str(self.max_samples))
print("MIN SAMPLES: " + str(self.min_samples))
print("COMMAND: " + self.command)
print ("RANDOM SEED: " + str(self.rand_seed))
for test in self.tests:
random.seed(self.rand_seed)
print ("--------------------------------------------------")
if test.function == "causal_discrimination":
suite, p = self.causal_discrimination(i_fields=test.i_fields,
conf=test.conf,
margin=test.margin)
print (test)
print ("Score: ", p)
elif test.function == "group_discrimination":
suite, p = self.group_discrimination(i_fields=test.i_fields,
conf=test.conf,
margin=test.margin)
print (test)
print ("Score: ", p)
elif test.function == "discrimination_search":
g, c = self.discrimination_search(threshold=test.threshold,
conf=test.conf,
margin=test.margin,
group=test.group,
causal=test.causal)
print (test)
if g:
print ("Group")
print ("-----")
for key, value in g.items():
print (", ".join(key) + " --> " + str(value))
print ("")
if c:
print ("Causal")
print ("------")
for key, value in c.items():
print (", ".join(key) + " --> " + str(value))
print ("--------------------------------------------------")
except:
print("Issue in main Themis run")
def group_discrimination(self, i_fields=None, conf=0.999, margin=0.0001):
"""
Compute the group discrimination for characteristics `i_fields`.
Parameters
----------
i_fields : list of `Input.name`
The inputs of interest, i.e. compute the casual discrimination wrt
these fields.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
Returns
-------
tuple
* list of dict
The test suite used to compute group discrimination.
* float
The percentage of group discrimination
"""
assert i_fields != None
try:
min_group, max_group, test_suite, p = float("inf"), 0, [], 0
rand_fields = self._all_other_fields(i_fields)
for fixed_sub_assign in self._gen_all_sub_inputs(args=i_fields):
count = 0
for num_sampled in range(1, self.max_samples):
assign = self._new_random_sub_input(args=rand_fields)
assign.update(fixed_sub_assign)
self._add_assignment(test_suite, assign)
count += self._get_test_result(assign=assign)
p, end = self._end_condition(count, num_sampled, conf, margin)
if end:
break
min_group = min(min_group, p)
max_group = max(max_group, p)
return test_suite, (max_group - min_group)
except:
print("Issue in group_discrimination")
def causal_discrimination(self, i_fields=None, conf=0.999, margin=0.0001):
"""
Compute the causal discrimination for characteristics `i_fields`.
Parameters
----------
i_fields : list of `Input.name`
The inputs of interest, i.e. compute the casual discrimination wrt
these fields.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
Returns
-------
tuple
* list of dict
The test suite used to compute causal discrimination.
* float
The percentage of causal discrimination.
"""
try:
assert i_fields != None
count, test_suite, p = 0, [], 0
f_fields = self._all_other_fields(i_fields) # fixed fields
for num_sampled in range(1, self.max_samples):
fixed_assign = self._new_random_sub_input(args=f_fields)
singular_assign = self._new_random_sub_input(args=i_fields)
assign = self._merge_assignments(fixed_assign, singular_assign)
self._add_assignment(test_suite, assign)
result = self._get_test_result(assign=assign)
for dyn_sub_assign in self._gen_all_sub_inputs(args=i_fields):
if dyn_sub_assign == singular_assign:
continue
assign.update(dyn_sub_assign)
self._add_assignment(test_suite, assign)
if self._get_test_result(assign=assign) != result:
count += 1
break
p, end = self._end_condition(count, num_sampled, conf, margin)
if end:
break
return test_suite, p
except:
print("Issue in causal discrimination")
def discrimination_search(self, threshold=0.2, conf=0.99, margin=0.01,
group=False, causal=False):
"""
Find all minimall subsets of characteristics that discriminate.
Choose to search by group or causally and set a threshold for
discrimination.
Parameters
----------
threshold : float in [0,1]
At least this level of discrimination to be considered.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
group : bool
Search for group discrimination if `True`.
causal : bool
Search for causal discrimination if `True`.
Returns
-------
tuple of dict
The lists of subsets of the input characteristics that discriminate.
"""
try:
assert group or causal
group_d_scores, causal_d_scores = {}, {}
for sub in self._all_relevant_subs(self.input_order):
if self._supset(list(set(group_d_scores.keys())|
set(causal_d_scores.keys())), sub):
continue
if group:
_, p = self.group_discrimination(i_fields=sub, conf=conf,
margin=margin)
if p > threshold:
group_d_scores[sub] = p
if causal:
_, p = self.causal_discrimination(i_fields=sub, conf=conf,
margin=margin)
if p > threshold:
causal_d_scores[sub] = p
return group_d_scores, causal_d_scores
except:
print("Issue in trying to search for discrimination")
def _all_relevant_subs(self, xs):
try:
return chain.from_iterable(combinations(xs, n) \
for n in range(1, len(xs)))
except:
print("Issue in returning reltive ssubsets. Possible a divide by zer error")
def _supset(self, list_of_small, big):
try:
for small in list_of_small:
next_subset = False
for x in small:
if x not in big:
next_subset = True
break
if not next_subset:
return True
except:
print("Issue in finding superset")
def _new_random_sub_input(self, args=[]):
try:
assert args
return {name : self.inputs[name].get_random_input() for name in args}
except:
print("Issue in getting a random subset")
def _gen_all_sub_inputs(self, args=[]):
assert args
try:
vals_of_args = [self.inputs[arg].values for arg in args]
combos = [list(elt) for elt in list(product(*vals_of_args))]
return ({arg : elt[idx] for idx, arg in enumerate(args)} \
for elt in combos)
except:
print("Issue in generate all")
def _get_test_result(self, assign=None):
assert assign != None
try:
tupled_args = self._tuple(assign)
if tupled_args in self._cache.keys():
return self._cache[tupled_args]
cmd = self.command + " " + " ".join(tupled_args)
output = subprocess.getoutput(cmd).strip()
self._cache[tupled_args] = subprocess.getoutput(cmd).strip() == "1")
return self._cache[tupled_args]
except:
print("Issue in getting the results of the tests")
def _add_assignment(self, test_suite, assign):
try:
if assign not in test_suite:
test_suite.append(assign)
except:
print("Issue in assigining to the test_suite")
def _all_other_fields(self, i_fields):
try:
return [f for f in self.input_order if f not in i_fields]
except:
print("Issue in _all_other_fields")
def _end_condition(self, count, num_sampled, conf, margin):
try:
p = 0
if num_sampled > self.min_samples:
p = count / num_sampled
error = st.norm.ppf(conf)*math.sqrt((p*(1-p))/num_sampled)
return p, error < margin
return p, False
except:
print("Issue in _end_condition. Possibly a divide by zero error")
def _merge_assignments(self, assign1, assign2):
try:
merged = {}
merged.update(assign1)
merged.update(assign2)
return merged
except:
print("Issue in merging assigments")
def _tuple(self, assign=None):
try:
assert assign != None
return tuple(str(assign[name]) for name in self.input_order)
except:
print("Issue in generating tuples for tests")
def _untuple(self, tupled_args=None):
assert tupled_args != None
try:
listed_args = list(tupled_args)
return {name : listed_args[idx] \
for idx, name in enumerate(self.input_order)}
except:
print("Issue in untupling")
def _build_input_space(self, args=None):
assert args != None
try:
self.inputs = {}
self.input_order = []
for obj in args.findall("input"):
name = obj.find("name").text
values = []
t = obj.find("type").text
if t == "categorical":
values = [elt.text
for elt in obj.find("values").findall("value")]
elif t == "continuousInt":
values = range(int(obj.find("bounds").find("lowerbound").text),
int(obj.find("bounds").find("upperbound").text)+1)
else:
assert False
self.inputs[name] = Input(name=name, values=values)
self.input_order.append(name)
except:
print("Issue in building the input space/scope. Major problem")
def _load_tests(self, args=None):
assert args != None
try:
self.tests = []
for obj in args.findall("test"):
test = Test()
test.function = obj.find("function").text
if test.function == "causal_discrimination" or \
test.function == "group_discrimination":
test.i_fields = [elt.text
for elt in obj.find("i_fields").findall("input_name")]
if test.function == "discrimination_search":
test.group = bool(obj.findall("group"))
test.causal = bool(obj.findall("causal"))
test.threshold = float(obj.find("threshold").text)
test.conf = float(obj.find("conf").text)
test.margin = float(obj.find("margin").text)
self.tests.append(test)
except:
print("Issue in loading the tests")
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description="Run Themis.")
parser.add_argument("XML_FILE", type=str, nargs=1,
help="XML configuration file")
args = parser.parse_args()
t = Themis(xml_fname=args.XML_FILE[0])
t.run()
except:
print("Issue in the main call to Themis i.e. Driver")
| 18,139 | 35.720648 | 88 | py |
Themis | Themis-master/Themis2.0/test/test_input.py | import pytest
import themis
def test_init():
_input = themis.Input(name="Sex", values=["Male", "Female"])
assert _input.name == "Sex"
assert _input.values == ["Male", "Female"]
assert _input.num_values == 2
def test_get_random_input():
_input = themis.Input(name="Sex", values=["Male", "Female"])
for i in range(10):
r_input = _input.get_random_input()
assert r_input == "Male" or r_input == "Female"
| 443 | 28.6 | 64 | py |
Themis | Themis-master/Themis2.0/test/software.py | import sys
sex = sys.argv[1]
race = sys.argv[3]
if(sex=="Male" and race=="Red"):
print "1"
else:
print "0"
| 116 | 12 | 32 | py |
Themis | Themis-master/Themis2.0/test/test_themis.py | from itertools import chain, combinations
import pytest
import themis
def test_init():
t = themis.Themis(xml_fname="settings.xml")
assert t.input_order == ["Sex", "Age", "Race", "Income"]
assert t.command == "python software.py"
def test_tuple():
t = themis.Themis(xml_fname="settings.xml")
assign = t.new_random_sub_input(args=t.input_order)
tupled_args = t._tuple(assign=assign)
assert assign == t._untuple(tupled_args = tupled_args)
def test_gen_all_sub_inputs():
t = themis.Themis(xml_fname="settings.xml")
t.gen_all_sub_inputs(args=t.input_order)
def test_get_test_result():
t = themis.Themis(xml_fname="settings.xml")
assign = t.new_random_sub_input(args=t.input_order)
if assign["Sex"] == "Male" and assign["Race"] == "Red":
assert t.get_test_result(assign=assign)
else:
assert not t.get_test_result(assign=assign)
def test_group_discrimination():
t = themis.Themis(xml_fname="settings.xml")
print "\nGroup:"
for f in t._all_relevant_subs(["Sex", "Race", "Age", "Income"]):
_, p = t.group_discrimination(i_fields=f)
print f, "--> ", p
def test_causal_discrimination():
t = themis.Themis(xml_fname="settings.xml")
print "\nCausal:"
for f in t._all_relevant_subs(["Sex", "Race", "Age", "Income"]):
_, p = t.causal_discrimination(i_fields=f)
print f, "--> ", p
def test_discrimination_search():
t = themis.Themis(xml_fname="settings.xml")
group_subs, causal_subs = t.discrimination_search(group=True, causal=True)
print "\n"
print "Group: ", group_subs
print "Causal: ", causal_subs
| 1,640 | 31.82 | 78 | py |
LambdaMart | LambdaMart-master/test.py | from lambdamart import LambdaMART
import numpy as np
import pandas as pd
def get_data(file_loc):
f = open(file_loc, 'r')
data = []
for line in f:
new_arr = []
arr = line.split(' #')[0].split()
score = arr[0]
q_id = arr[1].split(':')[1]
new_arr.append(int(score))
new_arr.append(int(q_id))
arr = arr[2:]
for el in arr:
new_arr.append(float(el.split(':')[1]))
data.append(new_arr)
f.close()
return np.array(data)
def group_queries(data):
query_indexes = {}
index = 0
for record in data:
query_indexes.setdefault(record[1], [])
query_indexes[record[1]].append(index)
index += 1
return query_indexes
def main():
total_ndcg = 0.0
for i in [1,2,3,4,5]:
print 'start Fold ' + str(i)
training_data = get_data('Fold%d/train.txt' % (i))
test_data = get_data('Fold%d/test.txt' % (i))
model = LambdaMART(training_data, 300, 0.001, 'sklearn')
model.fit()
model.save('lambdamart_model_%d' % (i))
# model = LambdaMART()
# model.load('lambdamart_model.lmart')
average_ndcg, predicted_scores = model.validate(test_data, 10)
print average_ndcg
total_ndcg += average_ndcg
total_ndcg /= 5.0
print 'Original average ndcg at 10 is: ' + str(total_ndcg)
total_ndcg = 0.0
for i in [1,2,3,4,5]:
print 'start Fold ' + str(i)
training_data = get_data('Fold%d/train.txt' % (i))
test_data = get_data('Fold%d/test.txt' % (i))
model = LambdaMART(training_data, 300, 0.001, 'original')
model.fit()
model.save('lambdamart_model_sklearn_%d' % (i))
# model = LambdaMART()
# model.load('lambdamart_model.lmart')
average_ndcg, predicted_scores = model.validate(test_data, 10)
print average_ndcg
total_ndcg += average_ndcg
total_ndcg /= 5.0
print 'Sklearn average ndcg at 10 is: ' + str(total_ndcg)
# print 'NDCG score: %f' % (average_ndcg)
# query_indexes = group_queries(test_data)
# index = query_indexes.keys()[0]
# testdata = [test_data[i][0] for i in query_indexes[index]]
# pred = [predicted_scores[i] for i in query_indexes[index]]
# output = pd.DataFrame({"True label": testdata, "prediction": pred})
# output = output.sort('prediction',ascending = False)
# output.to_csv("outdemo.csv", index =False)
# print output
# # for i in query_indexes[index]:
# # print test_data[i][0], predicted_scores[i]
if __name__ == '__main__':
main() | 2,310 | 28.253165 | 70 | py |
LambdaMart | LambdaMart-master/RegressionTree.py | # regression tree
# input is a dataframe of features
# the corresponding y value(called labels here) is the scores for each document
import pandas as pd
import numpy as np
from multiprocessing import Pool
from itertools import repeat
import scipy
import scipy.optimize
node_id = 0
def get_splitting_points(args):
# given a list
# return a list of possible splitting values
attribute, col = args
attribute.sort()
possible_split = []
for i in range(len(attribute)-1):
if attribute[i] != attribute[i+1]:
possible_split.append(np.mean((attribute[i],attribute[i+1])))
return possible_split, col
# create a dictionary, key is the attribute number, value is whole list of possible splits for that column
def find_best_split_parallel(args):
best_ls = 1000000
best_split = None
best_children = None
split_point, data, label = args
key,possible_split = split_point
for split in possible_split:
children = split_children(data, label, key, split)
#weighted average of left and right ls
ls = len(children[1])*least_square(children[1])/len(label) + len(children[3])*least_square(children[3])/len(label)
if ls < best_ls:
best_ls = ls
best_split = (key, split)
best_children = children
return best_ls, best_split, best_children
def find_best_split(data, label, split_points):
# split_points is a dictionary of possible splitting values
# return the best split
best_ls = 1000000
best_split = None
best_children = None
pool = Pool()
for ls, split, children in pool.map(find_best_split_parallel, zip(split_points.items(), repeat(data), repeat(label))):
if ls < best_ls:
best_ls = ls
best_split = split
best_children = children
pool.close()
return best_split, best_children # return a tuple(attribute, value)
def split_children(data, label, key, split):
left_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] < split]
right_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] >= split]
left_data = data.iloc[left_index,:]
right_data = data.iloc[right_index,:]
left_label = [label[i] for i in left_index]
right_label =[label[i] for i in right_index]
return left_data, left_label, right_data, right_label
def least_square(label):
if not len(label):
return 0
return (np.sum(label)**2)/len(set(label))
def create_leaf(label):
global node_id
node_id += 1
leaf = {'splittng_feature': None,
'left': None,
'right':None,
'is_leaf':True,
'index':node_id}
leaf['value'] = round(np.mean(label),3)
return leaf
def find_splits_parallel(args):
var_space, label, col = args
# var_space = data.iloc[:,col].tolist()
return scipy.optimize.fminbound(error_function, min(var_space), max(var_space), args = (col, var_space, label), full_output = 1)
# return,
# if not min_error or error < min_error:
# min_error = error
# split_var = col
# min_split = split
def create_tree(data, all_pos_split, label, max_depth, ideal_ls, current_depth = 0):
remaining_features = all_pos_split
#stopping conditions
if sum([len(v)!= 0 for v in remaining_features.values()]) == 0:
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(label)
# #Additional stopping condition (limit tree depth)
elif current_depth > max_depth:
return create_leaf(label)
#######
min_error = None
split_var = None
min_split = None
var_spaces = [data.iloc[:,col].tolist() for col in xrange(data.shape[1])]
cols = [col for col in xrange(data.shape[1])]
pool = Pool()
for split, error, ierr, numf in pool.map(find_splits_parallel, zip(var_spaces, repeat(label), cols)):
if not min_error or error < min_error:
min_error = error
split_var = col
min_split = split
pool.close()
splitting_feature = (split_var, min_split)
children = split_children(data, label, split_var, min_split)
left_data, left_label, right_data, right_label = children
if len(left_label) == 0 or len(right_label) == 0:
return create_leaf(label)
left_least_square = least_square(left_label)
# Create a leaf node if the split is "perfect"
if left_least_square < ideal_ls:
return create_leaf(left_label)
if least_square(right_label) < ideal_ls:
return create_leaf(right_label)
# recurse on children
left_tree = create_tree(left_data, remaining_features, left_label, max_depth, ideal_ls, current_depth +1)
right_tree = create_tree(right_data, remaining_features, right_label, max_depth, ideal_ls, current_depth +1)
return {'is_leaf' : False,
'value' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree,
'index' : None}
def error_function(split_point, split_var, data, label):
data1 = []
data2 = []
for i in xrange(len(data)):
temp_dat = data[i]
if temp_dat <= split_point:
data1.append(label[i])
else:
data2.append(label[i])
return least_square(data1) + least_square(data2)
def make_prediction(tree, x, annotate = False):
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['value']
return tree['value']
else:
# the splitting value of x.
split_feature_value = x[tree['splitting_feature'][0]]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value < tree['splitting_feature'][1]:
return make_prediction(tree['left'], x, annotate)
else:
return make_prediction(tree['right'], x, annotate)
class RegressionTree:
def __init__(self, training_data, labels, max_depth=5, ideal_ls=100):
self.training_data = training_data
self.labels = labels
self.max_depth = max_depth
self.ideal_ls = ideal_ls
self.tree = None
def fit(self):
global node_id
node_id = 0
all_pos_split = {}
pool = Pool()
splitting_data = [self.training_data.iloc[:,col].tolist() for col in xrange(self.training_data.shape[1])]
cols = [col for col in xrange(self.training_data.shape[1])]
for dat, col in pool.map(get_splitting_points, zip(splitting_data, cols)):
all_pos_split[col] = dat
pool.close()
self.tree = create_tree(self.training_data, all_pos_split, self.labels, self.max_depth, self.ideal_ls)
def predict(self, test):
prediction = np.array([make_prediction(self.tree, x) for x in test])
return prediction
if __name__ == '__main__':
#read in data, label
data = pd.read_excel("mlr06.xls")
test = [[478, 184, 40, 74, 11, 31], [1000,10000,10000,10000,10000,1000,100000]]
label = data['X7']
del data['X7']
model = RegressionTree(data, label)
model.fit()
print model.predict(test)
| 6,591 | 29.803738 | 129 | py |
LambdaMart | LambdaMart-master/lambdamart.py | import numpy as np
import math
import random
import copy
from sklearn.tree import DecisionTreeRegressor
from multiprocessing import Pool
from RegressionTree import RegressionTree
import pandas as pd
import pickle
def dcg(scores):
"""
Returns the DCG value of the list of scores.
Parameters
----------
scores : list
Contains labels in a certain ranked order
Returns
-------
DCG_val: int
This is the value of the DCG on the given scores
"""
return np.sum([
(np.power(2, scores[i]) - 1) / np.log2(i + 2)
for i in xrange(len(scores))
])
def dcg_k(scores, k):
"""
Returns the DCG value of the list of scores and truncates to k values.
Parameters
----------
scores : list
Contains labels in a certain ranked order
k : int
In the amount of values you want to only look at for computing DCG
Returns
-------
DCG_val: int
This is the value of the DCG on the given scores
"""
return np.sum([
(np.power(2, scores[i]) - 1) / np.log2(i + 2)
for i in xrange(len(scores[:k]))
])
def ideal_dcg(scores):
"""
Returns the Ideal DCG value of the list of scores.
Parameters
----------
scores : list
Contains labels in a certain ranked order
Returns
-------
Ideal_DCG_val: int
This is the value of the Ideal DCG on the given scores
"""
scores = [score for score in sorted(scores)[::-1]]
return dcg(scores)
def ideal_dcg_k(scores, k):
"""
Returns the Ideal DCG value of the list of scores and truncates to k values.
Parameters
----------
scores : list
Contains labels in a certain ranked order
k : int
In the amount of values you want to only look at for computing DCG
Returns
-------
Ideal_DCG_val: int
This is the value of the Ideal DCG on the given scores
"""
scores = [score for score in sorted(scores)[::-1]]
return dcg_k(scores, k)
def single_dcg(scores, i, j):
"""
Returns the DCG value at a single point.
Parameters
----------
scores : list
Contains labels in a certain ranked order
i : int
This points to the ith value in scores
j : int
This sets the ith value in scores to be the jth rank
Returns
-------
Single_DCG: int
This is the value of the DCG at a single point
"""
return (np.power(2, scores[i]) - 1) / np.log2(j + 2)
def compute_lambda(args):
"""
Returns the lambda and w values for a given query.
Parameters
----------
args : zipped value of true_scores, predicted_scores, good_ij_pairs, idcg, query_key
Contains a list of the true labels of documents, list of the predicted labels of documents,
i and j pairs where true_score[i] > true_score[j], idcg values, and query keys.
Returns
-------
lambdas : numpy array
This contains the calculated lambda values
w : numpy array
This contains the computed w values
query_key : int
This is the query id these values refer to
"""
true_scores, predicted_scores, good_ij_pairs, idcg, query_key = args
num_docs = len(true_scores)
sorted_indexes = np.argsort(predicted_scores)[::-1]
rev_indexes = np.argsort(sorted_indexes)
true_scores = true_scores[sorted_indexes]
predicted_scores = predicted_scores[sorted_indexes]
lambdas = np.zeros(num_docs)
w = np.zeros(num_docs)
single_dcgs = {}
for i,j in good_ij_pairs:
if (i,i) not in single_dcgs:
single_dcgs[(i,i)] = single_dcg(true_scores, i, i)
single_dcgs[(i,j)] = single_dcg(true_scores, i, j)
if (j,j) not in single_dcgs:
single_dcgs[(j,j)] = single_dcg(true_scores, j, j)
single_dcgs[(j,i)] = single_dcg(true_scores, j, i)
for i,j in good_ij_pairs:
z_ndcg = abs(single_dcgs[(i,j)] - single_dcgs[(i,i)] + single_dcgs[(j,i)] - single_dcgs[(j,j)]) / idcg
rho = 1 / (1 + np.exp(predicted_scores[i] - predicted_scores[j]))
rho_complement = 1.0 - rho
lambda_val = z_ndcg * rho
lambdas[i] += lambda_val
lambdas[j] -= lambda_val
w_val = rho * rho_complement * z_ndcg
w[i] += w_val
w[j] += w_val
return lambdas[rev_indexes], w[rev_indexes], query_key
def group_queries(training_data, qid_index):
"""
Returns a dictionary that groups the documents by their query ids.
Parameters
----------
training_data : Numpy array of lists
Contains a list of document information. Each document's format is [relevance score, query index, feature vector]
qid_index : int
This is the index where the qid is located in the training data
Returns
-------
query_indexes : dictionary
The keys were the different query ids and teh values were the indexes in the training data that are associated of those keys.
"""
query_indexes = {}
index = 0
for record in training_data:
query_indexes.setdefault(record[qid_index], [])
query_indexes[record[qid_index]].append(index)
index += 1
return query_indexes
def get_pairs(scores):
"""
Returns pairs of indexes where the first value in the pair has a higher score than the second value in the pair.
Parameters
----------
scores : list of int
Contain a list of numbers
Returns
-------
query_pair : list of pairs
This contains a list of pairs of indexes in scores.
"""
query_pair = []
for query_scores in scores:
temp = sorted(query_scores, reverse=True)
pairs = []
for i in xrange(len(temp)):
for j in xrange(len(temp)):
if temp[i] > temp[j]:
pairs.append((i,j))
query_pair.append(pairs)
return query_pair
class LambdaMART:
def __init__(self, training_data=None, number_of_trees=5, learning_rate=0.1, tree_type='sklearn'):
"""
This is the constructor for the LambdaMART object.
Parameters
----------
training_data : list of int
Contain a list of numbers
number_of_trees : int (default: 5)
Number of trees LambdaMART goes through
learning_rate : float (default: 0.1)
Rate at which we update our prediction with each tree
tree_type : string (default: "sklearn")
Either "sklearn" for using Sklearn implementation of the tree of "original"
for using our implementation
"""
if tree_type != 'sklearn' and tree_type != 'original':
raise ValueError('The "tree_type" must be "sklearn" or "original"')
self.training_data = training_data
self.number_of_trees = number_of_trees
self.learning_rate = learning_rate
self.trees = []
self.tree_type = tree_type
def fit(self):
"""
Fits the model on the training data.
"""
predicted_scores = np.zeros(len(self.training_data))
query_indexes = group_queries(self.training_data, 1)
query_keys = query_indexes.keys()
true_scores = [self.training_data[query_indexes[query], 0] for query in query_keys]
good_ij_pairs = get_pairs(true_scores)
tree_data = pd.DataFrame(self.training_data[:, 2:7])
labels = self.training_data[:, 0]
# ideal dcg calculation
idcg = [ideal_dcg(scores) for scores in true_scores]
for k in xrange(self.number_of_trees):
print 'Tree %d' % (k)
lambdas = np.zeros(len(predicted_scores))
w = np.zeros(len(predicted_scores))
pred_scores = [predicted_scores[query_indexes[query]] for query in query_keys]
pool = Pool()
for lambda_val, w_val, query_key in pool.map(compute_lambda, zip(true_scores, pred_scores, good_ij_pairs, idcg, query_keys), chunksize=1):
indexes = query_indexes[query_key]
lambdas[indexes] = lambda_val
w[indexes] = w_val
pool.close()
if self.tree_type == 'sklearn':
# Sklearn implementation of the tree
tree = DecisionTreeRegressor(max_depth=50)
tree.fit(self.training_data[:,2:], lambdas)
self.trees.append(tree)
prediction = tree.predict(self.training_data[:,2:])
predicted_scores += prediction * self.learning_rate
elif self.tree_type == 'original':
# Our implementation of the tree
tree = RegressionTree(tree_data, lambdas, max_depth=10, ideal_ls= 0.001)
tree.fit()
prediction = tree.predict(self.training_data[:,2:])
predicted_scores += prediction * self.learning_rate
def predict(self, data):
"""
Predicts the scores for the test dataset.
Parameters
----------
data : Numpy array of documents
Numpy array of documents with each document's format is [query index, feature vector]
Returns
-------
predicted_scores : Numpy array of scores
This contains an array or the predicted scores for the documents.
"""
data = np.array(data)
query_indexes = group_queries(data, 0)
predicted_scores = np.zeros(len(data))
for query in query_indexes:
results = np.zeros(len(query_indexes[query]))
for tree in self.trees:
results += self.learning_rate * tree.predict(data[query_indexes[query], 1:])
predicted_scores[query_indexes[query]] = results
return predicted_scores
def validate(self, data, k):
"""
Predicts the scores for the test dataset and calculates the NDCG value.
Parameters
----------
data : Numpy array of documents
Numpy array of documents with each document's format is [relevance score, query index, feature vector]
k : int
this is used to compute the NDCG@k
Returns
-------
average_ndcg : float
This is the average NDCG value of all the queries
predicted_scores : Numpy array of scores
This contains an array or the predicted scores for the documents.
"""
data = np.array(data)
query_indexes = group_queries(data, 1)
average_ndcg = []
predicted_scores = np.zeros(len(data))
for query in query_indexes:
results = np.zeros(len(query_indexes[query]))
for tree in self.trees:
results += self.learning_rate * tree.predict(data[query_indexes[query], 2:])
predicted_sorted_indexes = np.argsort(results)[::-1]
t_results = data[query_indexes[query], 0]
t_results = t_results[predicted_sorted_indexes]
predicted_scores[query_indexes[query]] = results
dcg_val = dcg_k(t_results, k)
idcg_val = ideal_dcg_k(t_results, k)
ndcg_val = (dcg_val / idcg_val)
average_ndcg.append(ndcg_val)
average_ndcg = np.nanmean(average_ndcg)
return average_ndcg, predicted_scores
def save(self, fname):
"""
Saves the model into a ".lmart" file with the name given as a parameter.
Parameters
----------
fname : string
Filename of the file you want to save
"""
pickle.dump(self, open('%s.lmart' % (fname), "wb"), protocol=2)
def load(self, fname):
"""
Loads the model from the ".lmart" file given as a parameter.
Parameters
----------
fname : string
Filename of the file you want to load
"""
model = pickle.load(open(fname , "rb"))
self.training_data = model.training_data
self.number_of_trees = model.number_of_trees
self.tree_type = model.tree_type
self.learning_rate = model.learning_rate
self.trees = model.trees | 10,588 | 28.744382 | 141 | py |
OLD3S | OLD3S-main/model/loaddatasets.py | import numpy as np
import pandas as pd
import torch
import torchvision
from sklearn import preprocessing
from torchvision.transforms import transforms
from sklearn.utils import shuffle
Newfeature = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(hue=0.3),
torchvision.transforms.ToTensor()])
def loadcifar():
cifar10_original = torchvision.datasets.CIFAR10(
root='./data',
train=True,
download=True,
transform=transforms.ToTensor()
)
cifar10_color = torchvision.datasets.CIFAR10(
root='./data',
train=True,
download=True,
transform=Newfeature
)
x_S1 = torch.Tensor(cifar10_original.data)
x_S2 = torch.Tensor(cifar10_color.data)
y_S1, y_S2 = torch.Tensor(cifar10_original.targets), torch.Tensor(cifar10_color.targets)
x_S1, y_S1 = shuffle(x_S1, y_S1, random_state=30)
x_S2, y_S2 = shuffle(x_S2, y_S2, random_state=30)
x_S1 = torch.transpose(x_S1, 3, 2)
x_S1 = torch.transpose(x_S1, 2, 1)
x_S2 = torch.transpose(x_S2, 3, 2)
x_S2 = torch.transpose(x_S2, 2, 1)
x_S2 = transforms.ColorJitter(hue=0.3)(x_S2)
return x_S1, y_S1, x_S2, y_S2
def loadsvhn():
svhm_original = torchvision.datasets.SVHN('./data', split='train', download=False,
transform=transforms.Compose([ transforms.ToTensor()]))
svhm_color = torchvision.datasets.SVHN(
root='./data',
split="train",
download=True,
transform=Newfeature
)
x_S1 = torch.Tensor(svhm_original.data)
x_S2 = torch.Tensor(svhm_color.data)
x_S2 = transforms.ColorJitter(hue=0.3)(x_S2)
y_S1, y_S2 = svhm_original.labels, svhm_color.labels
for i in range(len(y_S1)):
if y_S1[i] == 10:
y_S1[i] = 0
y_S1, y_S2 = torch.Tensor(y_S1), torch.Tensor(y_S1)
x_S1, y_S1 = shuffle(x_S1, y_S1, random_state=30)
x_S2, y_S2 = shuffle(x_S2, y_S2, random_state=30)
return x_S1, y_S1, x_S2, y_S2
def loadmagic():
data = pd.read_csv(r"./data/magic04_X.csv", header=None).values
label = pd.read_csv(r"./data/magic04_y.csv", header=None).values
for i in label:
if i[0] == -1:
i[0] = 0
rd1 = np.random.RandomState(1314)
data = preprocessing.scale(data)
matrix1 = rd1.random((10, 30))
x_S2 = np.dot(data, matrix1)
x_S1 = torch.sigmoid(torch.Tensor(data))
x_S2 = torch.sigmoid(torch.Tensor(x_S2))
y_S1, y_S2 = torch.Tensor(label), torch.Tensor(label)
x_S1, y_S1 = shuffle(x_S1, y_S1, random_state=50)
x_S2, y_S2 = shuffle(x_S2, y_S2, random_state=50)
return x_S1, y_S1, x_S2, y_S2
def loadadult():
df1 = pd.read_csv(r"D:/pycharmproject/pytorch/OLD3/adult/data/adult.data", header=1)
df1.columns=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o']
le = preprocessing.LabelEncoder()
le.fit(df1.o)
df1['o'] = le.transform(df1.o)
le.fit(df1.b)
df1['b'] = le.transform(df1.b)
le.fit(df1.d)
df1['d'] = le.transform(df1.d)
le.fit(df1.f)
df1['f'] = le.transform(df1.f)
le.fit(df1.g)
df1['g'] = le.transform(df1.g)
le.fit(df1.h)
df1['h'] = le.transform(df1.h)
le.fit(df1.i)
df1['i'] = le.transform(df1.i)
le.fit(df1.j)
df1['j'] = le.transform(df1.j)
le.fit(df1.n)
df1['n'] = le.transform(df1.n)
data = np.array(df1.iloc[:, :-1])
label = np.array(df1.o)
rd1 = np.random.RandomState(1314)
data = preprocessing.scale(data)
matrix1 = rd1.random((14, 30))
x_S2 = np.dot(data, matrix1)
x_S1 = torch.sigmoid(torch.Tensor(data))
x_S2 = torch.sigmoid(torch.Tensor(x_S2))
y_S1, y_S2 = torch.Tensor(label), torch.Tensor(label)
x_S1, y_S1 = shuffle(x_S1, y_S1, random_state=30)
x_S2, y_S2 = shuffle(x_S2, y_S2, random_state=30)
return x_S1, y_S1, x_S2, y_S2
def loadreuter(name):
x_S1 = torch.Tensor(torch.load('./data/' + name +'/x_S1_pca'))
y_S1 = torch.Tensor(torch.load('./data/' + name +'/y_S1_multiLinear'))
x_S2 = torch.Tensor(torch.load('./data/' + name +'/x_S2_pca'))
y_S2 = torch.Tensor(torch.load('./data/' + name +'/y_S2_multiLinear'))
return x_S1, y_S1, x_S2, y_S2
def loadmnist():
mnist_original = torchvision.datasets.FashionMNIST(
root='./data',
download=True,
train=True,
# Simply put the size you want in Resize (can be tuple for height, width)
transform=torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]
),
)
mnist_color = torchvision.datasets.FashionMNIST(
root='./data',
train=True,
download=True,
transform=torchvision.transforms.Compose(
[
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(hue=0.3),
torchvision.transforms.ToTensor()]
),
)
x_S1 = mnist_original.data
x_S2 = mnist_color.data
x_S2 = transforms.ColorJitter(hue=0.3)(x_S2)
y_S1, y_S2 = mnist_original.targets, mnist_color.targets
x_S1, y_S1 = shuffle(x_S1, y_S1, random_state=1000)
x_S2, y_S2 = shuffle(x_S2, y_S2, random_state=1000)
return x_S1, y_S1, x_S2, y_S2
| 5,243 | 33.051948 | 92 | py |
OLD3S | OLD3S-main/model/model_vae.py | import torch
import torch.nn as nn
import math
import copy
from torch.nn.parameter import Parameter
from cnn import Dynamic_ResNet18
from autoencoder import *
from loaddatasets import *
from mlp import MLP
from vae import *
from test import DCVAE
def normal(t):
mean, std, var = torch.mean(t), torch.std(t), torch.var(t)
t = (t - mean) / std
return t
'''class OLD3S_Shallow_VAE:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, path, lr=0.001, b=0.9, eta=-0.001, s=0.008, m=0.9,
spike=9e-5, thre=10000, RecLossFunc='Smooth'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.spike = spike
self.thre = thre
self.beta1 = 1
self.beta2 = 0
self.correct = 0
self.accuracy = 0
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.data_S1 = data_S1
self.label_S1 = label_S1
self.data_S2 = data_S2
self.label_S2 = label_S2
self.lr = Parameter(torch.tensor(lr), requires_grad=False).to(self.device)
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.s1 = Parameter(torch.tensor(0.01), requires_grad=False).to(self.device)
self.num_block = 8
self.alpha = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.alpha2 = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.CELoss = nn.CrossEntropyLoss()
self.KLDivLoss = nn.KLDivLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.Accuracy = []
self.a_1 = 0.8
self.a_2 = 0.2
self.cl_1 = []
self.cl_2 = []
state_1 = torch.load('best_model_svhn')
state_2 = torch.load('best_model_svhn_2')
self.autoencoder = DCVAE(32, hidden_size=1024, latent_size=100, image_channels=3).to(self.device)
self.autoencoder_2 = DCVAE(32, hidden_size=1024, latent_size=100, image_channels=3).to(self.device)
self.autoencoder.load_state_dict(state_1)
self.autoencoder_2.load_state_dict(state_2)
def FirstPeriod(self):
data1 = self.data_S1
data2 = self.data_S2[self.B:]
#self.net_model1 = Dynamic_ResNet18().to(self.device)
self.net_model1 = MLP(100,10).to(self.device)
self.autoencoder.eval()
optimizer_1 = torch.optim.Adam(self.net_model1.parameters(), lr=0.001)
optimizer_2 = torch.optim.Adam(self.autoencoder.parameters(), lr=0.001)
for (i, x) in enumerate(data1):
self.i = i
x1 = x.unsqueeze(0).float().to(self.device)
x1 = normal(x1)
self.i = i
y = self.label_S1[i].unsqueeze(0).long().to(self.device)
if self.i < self.B:
encoded, decoded, mu, logVar = self.autoencoder(x1)
y_hat, loss_1 = self.HB_Fit(self.net_model1, encoded, y, optimizer_1)
optimizer_2.zero_grad()
CE = F.binary_cross_entropy(decoded, x1, reduction='sum')
KLD = -0.5 * torch.sum(1 + logVar - mu.pow(2) - logVar.exp())
loss = CE +KLD
# Backpropagation based on the loss
loss.backward(retain_graph=True)
optimizer_2.step()
if self.i < self.thre:
self.beta2 = self.beta2 + self.spike
self.beta1 = 1 - self.beta2
else:
x2 = data2[self.i - self.B].unsqueeze(0).float().to(self.device)
x2 = normal(x2)
if i == self.B:
self.net_model2 = copy.deepcopy(self.net_model1)
torch.save(self.net_model1.state_dict(), './data/' + self.path + '/net_model1.pth')
optimizer_1_1 = torch.optim.Adam(self.net_model1.parameters(), lr=self.lr)
optimizer_1_2 = torch.optim.Adam(self.net_model2.parameters(), lr=self.lr)
# optimizer_2_1 = torch.optim.SGD(self.autoencoder.parameters(), lr=0.02)
optimizer_2_2 = torch.optim.Adam(self.autoencoder_2.parameters(), lr=0.001)
self.autoencoder.eval()
encoded_1, decoded_1, mu_1, logVar_1 = self.autoencoder(x1)
encoded_2, decoded_2, mu_2, logVar_2 = self.autoencoder_2(x2)
y_hat_1, loss_1_1 = self.HB_Fit(self.net_model1, encoded_1, y, optimizer_1_1)
y_hat_2, loss_1_2 = self.HB_Fit(self.net_model2, encoded_2, y, optimizer_1_2)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_1_1)
self.cl_2.append(loss_1_2)
if len(self.cl_1) == 200:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
optimizer_2_2.zero_grad()
CE = F.binary_cross_entropy(decoded_2, x2, reduction='sum')
KLD = -0.5 * torch.sum(1 + logVar_2 - mu_2.pow(2) - logVar_2.exp())
loss_2_1 = CE + KLD
loss_2_2 = self.RecLossFunc(encoded_2, encoded_1)
loss_2 = loss_2_1 + loss_2_2
loss_2.backward(retain_graph=True)
optimizer_2_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.net_model2.state_dict(), './data/' + self.path + '/net_model2.pth')
def SecondPeriod(self):
print("use FirstPeriod when i<T1 ")
#self.FirstPeriod()
self.correct = 0
data2 = self.data_S2[:self.B]
net_model1 = self.loadmodel('./data/' + self.path + '/net_model1.pth')
net_model2 = self.loadmodel('./data/' + self.path + '/net_model2.pth')
optimizer_3 = torch.optim.Adam(net_model1.parameters(), lr=0.001)
optimizer_4 = torch.optim.Adam(net_model2.parameters(), lr=0.001)
optimizer_5 = torch.optim.Adam(self.autoencoder_2.parameters(), lr=self.lr)
self.a_1 = 0.8
self.a_2 = 0.2
self.cl_1 = []
self.cl_2 = []
for (i, x) in enumerate(data2):
x = x.unsqueeze(0).float().to(self.device)
x = normal(x)
y = self.label_S2[i].unsqueeze(0).long().to(self.device)
encoded, decoded, mu, logVar = self.autoencoder_2(x)
optimizer_5.zero_grad()
y_hat_2, loss_2 = self.HB_Fit(net_model2, encoded, y, optimizer_4)
y_hat_1, loss_1 = self.HB_Fit(net_model1, encoded, y, optimizer_3)
CE = F.binary_cross_entropy(decoded, x, reduction='sum')
KLD = -0.5 * torch.sum(1 + logVar - mu.pow(2) - logVar.exp())
loss_autoencoder = CE + KLD
loss_autoencoder.backward()
optimizer_5.step()
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
print(y_hat_1)
print(y_hat_2)
self.cl_1.append(loss_1)
self.cl_2.append(loss_2)
if len(self.cl_1) == 200:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
#torch.save(self.Accuracy, './data/' + self.path + '/Accuracy')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
param.grad.zero_()
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def loadmodel(self, path):
net_model = MLP(100,10).to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def HB_Fit(self, model, X, Y, optimizer): # hedge backpropagation
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.CELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha[i] * out
for i in range(5): # First 6 are shallow and last 2 are deep
if i == 0:
alpha_sum_1 = self.alpha[i]
else:
alpha_sum_1 += self.alpha[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
loss_ = (self.alpha[i] / alpha_sum_1) * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha[i] = torch.max(self.alpha[i], self.s / 5)
self.alpha[i] = torch.min(self.alpha[i], self.m) # exploration-exploitation
z_t = torch.sum(self.alpha)
self.alpha = Parameter(self.alpha / z_t, requires_grad=False).to(self.device)
return output, Loss_sum'''
class OLD3S_Mnist_VAE:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, dimension1, dimension2, hidden_size,
latent_size, classes, path, lr=0.001, b=0.9,
eta=-0.05, s=0.008, m=0.99, RecLossFunc='BCE'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.correct = 0
self.accuracy = 0
self.lr = lr
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.x_S1 = data_S1
self.y_S1 = label_S1
self.x_S2 = data_S2
self.y_S2 = label_S2
self.dimension1 = dimension1
self.dimension2 = dimension2
self.hidden_size = hidden_size
self.latent_size = latent_size
self.classes = classes
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.CELoss = nn.CrossEntropyLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.MSELoss = nn.MSELoss()
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.Accuracy = []
self.a_1 = 0.5
self.a_2 = 0.5
self.cl_1 = []
self.cl_2 = []
self.alpha = Parameter(torch.Tensor(5).fill_(1 / 5), requires_grad=False).to(
self.device)
state_1 = torch.load('./data/' + self.path + '/vae_model_1')
state_2 = torch.load('./data/' + self.path + '/vae_model_2')
self.autoencoder_1 = VAE_Mnist(self.dimension1, self.hidden_size,self.latent_size).to(self.device)
self.autoencoder_1.load_state_dict(state_1)
self.autoencoder_2 = VAE_Mnist(self.dimension2, self.hidden_size,self.latent_size).to(self.device)
self.autoencoder_2.load_state_dict(state_2)
def FirstPeriod(self):
classifier_1 = MLP(self.latent_size,self.classes).to(self.device)
optimizer_classifier_1 = torch.optim.Adam(classifier_1.parameters(),0.001)
optimizer_autoencoder_1 = torch.optim.Adam(self.autoencoder_1.parameters(), 0.001)
# eta = -8 * math.sqrt(1 / math.log(self.t))
self.autoencoder_1.eval()
for (i, x) in enumerate(self.x_S1):
self.i = i
x1 = x.unsqueeze(0).float().to(self.device)
x1 = normal(x1)
y1 = self.y_S1[i].unsqueeze(0).long().to(self.device)
if self.i < self.B: # Before evolve
encoded, decoded, mu, logVar = self.autoencoder_1(x1)
y_hat, loss_1 = self.HB_Fit(classifier_1, encoded, y1, optimizer_classifier_1)
loss_2 = self.VAE_Loss(logVar, mu, decoded, x1,optimizer_autoencoder_1)
else:
x2 = self.x_S2[self.i].unsqueeze(0).float().to(self.device)
x2 = normal(x2)
if i == self.B:
classifier_2 = copy.deepcopy(classifier_1)
torch.save(classifier_1.state_dict(),
'./data/' + self.path + '/net_model1.pth')
self.autoencoder_2.eval()
optimizer_classifier_2 = torch.optim.Adam(classifier_2.parameters(), self.lr)
optimizer_autoencoder_2 = torch.optim.Adam(self.autoencoder_2.parameters(), 0.0001)
encoded_1, decoded_1, mu_1, logVar_1 = self.autoencoder_1(x1)
encoded_2, decoded_2, mu_2, logVar_2 = self.autoencoder_2(x2)
y_hat_2, loss_classifier_2 = self.HB_Fit(classifier_2,
encoded_2, y1, optimizer_classifier_2)
y_hat_1, loss_classifier_1 = self.HB_Fit(classifier_1,
encoded_1, y1, optimizer_classifier_1)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
optimizer_autoencoder_2.zero_grad()
kl_divergence = 0.5 * torch.sum(-1 - logVar_2 + mu_2.pow(2) + logVar_2.exp())
rec_loss = F.binary_cross_entropy(decoded_2.reshape(1,28,28), x2, size_average=False) + kl_divergence
loss_autoencoder_2 = rec_loss + self.SmoothL1Loss(encoded_2, encoded_1)
loss_autoencoder_2.backward(retain_graph=True)
optimizer_autoencoder_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y1).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(classifier_2.state_dict(), './data/' + self.path + '/net_model2.pth')
def SecondPeriod(self):
print('use FESA when i<T1')
self.FirstPeriod()
self.correct = 0
net_model1 = self.loadmodel('./data/' + self.path + '/net_model1.pth')
net_model2 = self.loadmodel('./data/' + self.path + '/net_model2.pth')
optimizer_classifier_1 = torch.optim.Adam(net_model1.parameters(), self.lr)
optimizer_classifier_2 = torch.optim.Adam(net_model2.parameters(), self.lr)
optimizer_autoencoder_2 = torch.optim.Adam(self.autoencoder_2.parameters(), 0.0001)
data_2 = self.x_S2[:self.B]
label_2 = self.y_S1[:self.B]
self.a_1 = 0.8
self.a_2 = 0.2
self.cl_1 = []
self.cl_2 = []
# eta = -8 * math.sqrt(1 / math.log(self.B))
for (i, x) in enumerate(data_2):
x = x.unsqueeze(0).float().to(self.device)
x = normal(x)
self.i = i + self.T1
y1 = label_2[i].unsqueeze(0).long().to(self.device)
encoded_2, decoded_2, mu, logVar = self.autoencoder_2(x)
optimizer_autoencoder_2.zero_grad()
y_hat_2, loss_classifier_2 = self.HB_Fit(net_model2,
encoded_2, y1, optimizer_classifier_2)
y_hat_1, loss_classifier_1 = self.HB_Fit(net_model1,
encoded_2, y1, optimizer_classifier_1)
loss_autoencoder_2 = self.VAE_Loss(logVar, mu, decoded_2, x, optimizer_autoencoder_2)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y1).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.Accuracy, './data/' + self.path + '/Accuracy_Vae')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
# param.grad.detach_()
param.grad.zero_() # data.fill_(0)
def loadmodel(self, path):
net_model = MLP(self.latent_size,self.classes).to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def VAE_Loss(self, logVar, mu, decoded,x,optimizer):
optimizer.zero_grad()
kl_divergence = 0.5 * torch.sum(-1 - logVar + mu.pow(2) + logVar.exp())
ce = F.binary_cross_entropy(decoded.reshape(1,28,28),x, size_average=False)
loss = ce + kl_divergence
# Backpropagation based on the loss
loss.backward(retain_graph=True)
optimizer.step()
return loss
def HB_Fit(self, model, X, Y, optimizer): # hedge backpropagation
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.CELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha[i] * out
for i in range(5): # First 6 are shallow and last 2 are deep
if i == 0:
alpha_sum_1 = self.alpha[i]
else:
alpha_sum_1 += self.alpha[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
loss_ = (self.alpha[i] / alpha_sum_1) * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha[i] = torch.max(self.alpha[i], self.s / 5)
self.alpha[i] = torch.min(self.alpha[i], self.m) # exploration-exploitation
z_t = torch.sum(self.alpha)
self.alpha = Parameter(self.alpha / z_t, requires_grad=False).to(self.device)
return output, Loss_sum
class OLD3S_Deep_VAE:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, path, lr=0.001, b=0.9, eta=-0.01, s=0.008, m=0.9,
spike=9e-5, thre=10000, RecLossFunc='Smooth'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.autoencoder = ConvVAE().to(self.device)
self.autoencoder_2 = ConvVAE().to(self.device)
self.spike = spike
self.thre = thre
self.beta1 = 1
self.beta2 = 0
self.correct = 0
self.accuracy = 0
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.data_S1 = data_S1
self.label_S1 = label_S1
self.data_S2 = data_S2
self.label_S2 = label_S2
self.lr = Parameter(torch.tensor(lr), requires_grad=False).to(self.device)
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.s1 = Parameter(torch.tensor(0.01), requires_grad=False).to(self.device)
self.num_block = 8
self.alpha1 = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.alpha2 = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.CELoss = nn.CrossEntropyLoss()
self.KLDivLoss = nn.KLDivLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.Accuracy = []
self.a_1 = 0.8
self.a_2 = 0.2
self.cl_1 = []
self.cl_2 = []
def FirstPeriod(self):
data1 = self.data_S1
data2 = self.data_S2[self.B:]
self.net_model1 = Dynamic_ResNet18().to(self.device)
#self.net_model1 = MLP(32,10).to(self.device)
optimizer_1 = torch.optim.Adam(self.net_model1.parameters(), lr=0.001)
optimizer_2 = torch.optim.Adam(self.autoencoder.parameters(), lr=0.001)
self.autoencoder.eval()
for (i, x) in enumerate(data1):
self.i = i
x1 = x.unsqueeze(0).float().to(self.device)
self.i = i
y = self.label_S1[i].unsqueeze(0).long().to(self.device)
if self.i < self.B:
encoded, decoded, mu, logVar = self.autoencoder(x1)
y_hat, loss_1 = self.HB_Fit(self.net_model1, encoded, y, optimizer_1)
optimizer_2.zero_grad()
kl_divergence = 0.5 * torch.sum(-1 - logVar + mu.pow(2) + logVar.exp())
loss = F.mse_loss(decoded, x1, size_average=False) + kl_divergence
# Backpropagation based on the loss
loss.backward(retain_graph=True)
optimizer_2.step()
if self.i < self.thre:
self.beta2 = self.beta2 + self.spike
self.beta1 = 1 - self.beta2
else:
x2 = data2[self.i - self.B].unsqueeze(0).float().to(self.device)
if i == self.B:
self.net_model2 = copy.deepcopy(self.net_model1)
torch.save(self.net_model1.state_dict(), './data/' + self.path + '/net_model1.pth')
optimizer_1_1 = torch.optim.Adam(self.net_model1.parameters(), lr=self.lr)
optimizer_1_2 = torch.optim.Adam(self.net_model2.parameters(), lr=self.lr)
# optimizer_2_1 = torch.optim.SGD(self.autoencoder.parameters(), lr=0.02)
optimizer_2_2 = torch.optim.Adam(self.autoencoder_2.parameters(), lr=0.001)
encoded_1, decoded_1, mu_1, logVar_1 = self.autoencoder(x1)
encoded_2, decoded_2, mu_2, logVar_2 = self.autoencoder_2(x2)
y_hat_1, loss_1_1 = self.HB_Fit(self.net_model1, encoded_1, y, optimizer_1_1)
y_hat_2, loss_1_2 = self.HB_Fit(self.net_model2, encoded_2, y, optimizer_1_2)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_1_1)
self.cl_2.append(loss_1_2)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
# Backpropagation based on the loss
optimizer_2_2.zero_grad()
kl_divergence = 0.5 * torch.sum(-1 - logVar_2 + mu_2.pow(2) + logVar_2.exp())
loss_2_1 = F.mse_loss(decoded_2, x2, size_average=False) + kl_divergence
loss_2_2 = self.RecLossFunc(encoded_2, encoded_1)
loss_2 = loss_2_1 + loss_2_2
loss_2.backward(retain_graph=True)
optimizer_2_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.net_model2.state_dict(), './data/' + self.path + '/net_model2.pth')
def SecondPeriod(self):
print("use FirstPeriod when i<T1 ")
self.FirstPeriod()
self.correct = 0
data2 = self.data_S2[:self.B]
net_model1 = self.loadmodel('./data/' + self.path + '/net_model1.pth')
net_model2 = self.loadmodel('./data/' + self.path + '/net_model2.pth')
optimizer_3 = torch.optim.Adam(net_model1.parameters(), lr=self.lr)
optimizer_4 = torch.optim.Adam(net_model2.parameters(), lr=self.lr)
optimizer_5 = torch.optim.Adam(self.autoencoder_2.parameters(), lr=self.lr)
self.a_1 = 0.2
self.a_2 = 0.8
self.cl_1 = []
self.cl_2 = []
for (i, x) in enumerate(data2):
x = x.unsqueeze(0).float().to(self.device)
y = self.label_S2[i].unsqueeze(0).long().to(self.device)
encoded, decoded, mu, logVar = self.autoencoder_2(x)
optimizer_5.zero_grad()
y_hat_2, loss_4 = self.HB_Fit(net_model2, encoded, y, optimizer_4)
y_hat_1, loss_3 = self.HB_Fit(net_model1, encoded, y, optimizer_3)
kl_divergence = 0.5 * torch.sum(-1 - logVar + mu.pow(2) + logVar.exp())
loss = F.mse_loss(decoded, x, size_average=False) + kl_divergence
loss_5 = self.RecLossFunc(torch.sigmoid(x), decoded) + loss
loss_5.backward()
optimizer_5.step()
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_3)
self.cl_2.append(loss_4)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.Accuracy, './data/' + self.path + '/Accuracy')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
param.grad.zero_()
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def loadmodel(self, path):
net_model = Dynamic_ResNet18().to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def VaeReconstruction(self, x, out, mu, logVar, optimizer):
x, out, mu, logVar = x, out, mu, logVar
logVar = torch.sigmoid(logVar)
#mu = torch.sigmoid(mu)
#optimizer.zero_grad()
# The loss is the BCE loss combined with the KL divergence to ensure the distribution is learnt
kl_divergence = 0.5 * torch.sum(-1 - logVar + mu.pow(2) + logVar.exp())
loss = F.binary_cross_entropy(out, x, size_average=False) + kl_divergence
# Backpropagation based on the loss
loss.backward(retain_graph=True)
optimizer.step()
def HB_Fit(self, model, X, Y, optimizer, block_split=6):
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.CELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha1[i] * out
for i in range(self.num_block):
if i < block_split:
if i == 0:
alpha_sum_1 = self.alpha1[i]
else:
alpha_sum_1 += self.alpha1[i]
else:
if i == block_split:
alpha_sum_2 = self.alpha1[i]
else:
alpha_sum_2 += self.alpha1[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
if i < block_split:
loss_ = (self.alpha1[i] / alpha_sum_1) * self.beta1 * loss
else:
loss_ = (self.alpha1[i] / alpha_sum_2) * self.beta2 * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha1[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha1[i] = torch.max(self.alpha1[i], self.s / self.num_block)
self.alpha1[i] = torch.min(self.alpha1[i], self.m)
z_t = torch.sum(self.alpha1)
self.alpha1 = Parameter(self.alpha1 / z_t, requires_grad=False).to(self.device)
return output, Loss_sum
class OLD3S_Shallow_VAE:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, dimension1, dimension2, hidden_size,
latent_size, classes, path, lr=0.001, b=0.9,
eta=-0.05, s=0.008, m=0.99, RecLossFunc='BCE'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.correct = 0
self.accuracy = 0
self.lr = lr
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.x_S1 = data_S1
self.y_S1 = label_S1
self.x_S2 = data_S2
self.y_S2 = label_S2
self.dimension1 = dimension1
self.dimension2 = dimension2
self.hidden_size = hidden_size
self.latent_size = latent_size
self.classes = classes
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.CELoss = nn.CrossEntropyLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.MSELoss = nn.MSELoss()
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.Accuracy = []
self.a_1 = 0.5
self.a_2 = 0.5
self.cl_1 = []
self.cl_2 = []
self.alpha = Parameter(torch.Tensor(5).fill_(1 / 5), requires_grad=False).to(
self.device)
state_1 = torch.load('./data/' + self.path + '/vae_model_1')
state_2 = torch.load('./data/' + self.path + '/vae_model_2')
self.autoencoder_1 = VAE_Shallow(self.dimension1, self.hidden_size, self.latent_size).to(self.device)
self.autoencoder_1.load_state_dict(state_1)
self.autoencoder_2 = VAE_Shallow(self.dimension2, self.hidden_size, self.latent_size).to(self.device)
self.autoencoder_2.load_state_dict(state_2)
def FirstPeriod(self):
classifier_1 = MLP(self.latent_size, self.classes).to(self.device)
optimizer_classifier_1 = torch.optim.Adam(classifier_1.parameters(), 0.001)
optimizer_autoencoder_1 = torch.optim.Adam(self.autoencoder_1.parameters(), 0.001)
# eta = -8 * math.sqrt(1 / math.log(self.t))
self.autoencoder_1.eval()
for (i, x) in enumerate(self.x_S1):
self.i = i
x1 = x.unsqueeze(0).float().to(self.device)
x1 = normal(x1)
y1 = self.y_S1[i].long().to(self.device)
if self.i < self.B: # Before evolve
encoded, decoded, mu, logVar = self.autoencoder_1(x1)
y_hat, loss_1 = self.HB_Fit(classifier_1, encoded, y1, optimizer_classifier_1)
loss_2 = self.VAE_Loss(logVar, mu, decoded, x1, optimizer_autoencoder_1)
else:
x2 = self.x_S2[self.i].unsqueeze(0).float().to(self.device)
x2 = normal(x2)
if i == self.B:
classifier_2 = copy.deepcopy(classifier_1)
torch.save(classifier_1.state_dict(),
'./data/' + self.path + '/net_model1.pth')
self.autoencoder_2.eval()
optimizer_classifier_2 = torch.optim.Adam(classifier_2.parameters(), self.lr)
optimizer_autoencoder_2 = torch.optim.Adam(self.autoencoder_2.parameters(), 0.0001)
encoded_1, decoded_1, mu_1, logVar_1 = self.autoencoder_1(x1)
encoded_2, decoded_2, mu_2, logVar_2 = self.autoencoder_2(x2)
y_hat_2, loss_classifier_2 = self.HB_Fit(classifier_2,
encoded_2, y1, optimizer_classifier_2)
y_hat_1, loss_classifier_1 = self.HB_Fit(classifier_1,
encoded_1, y1, optimizer_classifier_1)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
optimizer_autoencoder_2.zero_grad()
kl_divergence = 0.5 * torch.sum(-1 - logVar_2 + mu_2.pow(2) + logVar_2.exp())
rec_loss = F.binary_cross_entropy(decoded_2, x2, size_average=False) + kl_divergence
loss_autoencoder_2 = rec_loss + self.SmoothL1Loss(encoded_2, encoded_1)
loss_autoencoder_2.backward(retain_graph=True)
optimizer_autoencoder_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y1).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(classifier_2.state_dict(), './data/' + self.path + '/net_model2.pth')
def SecondPeriod(self):
print('use FESA when i<T1')
self.FirstPeriod()
self.correct = 0
net_model1 = self.loadmodel('./data/' + self.path + '/net_model1.pth')
net_model2 = self.loadmodel('./data/' + self.path + '/net_model2.pth')
optimizer_classifier_1 = torch.optim.Adam(net_model1.parameters(), self.lr)
optimizer_classifier_2 = torch.optim.Adam(net_model2.parameters(), self.lr)
optimizer_autoencoder_2 = torch.optim.Adam(self.autoencoder_2.parameters(), 0.0001)
data_2 = self.x_S2[:self.B]
label_2 = self.y_S1[:self.B]
self.a_1 = 0.8
self.a_2 = 0.2
self.cl_1 = []
self.cl_2 = []
# eta = -8 * math.sqrt(1 / math.log(self.B))
for (i, x) in enumerate(data_2):
x = x.unsqueeze(0).float().to(self.device)
x = normal(x)
self.i = i + self.T1
y1 = label_2[i].long().to(self.device)
encoded_2, decoded_2, mu, logVar = self.autoencoder_2(x)
optimizer_autoencoder_2.zero_grad()
y_hat_2, loss_classifier_2 = self.HB_Fit(net_model2,
encoded_2, y1, optimizer_classifier_2)
y_hat_1, loss_classifier_1 = self.HB_Fit(net_model1,
encoded_2, y1, optimizer_classifier_1)
loss_autoencoder_2 = self.VAE_Loss(logVar, mu, decoded_2, x, optimizer_autoencoder_2)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y1).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.Accuracy, './data/' + self.path + '/Accuracy_Vae')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
# param.grad.detach_()
param.grad.zero_() # data.fill_(0)
def loadmodel(self, path):
net_model = MLP(self.latent_size, self.classes).to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def VAE_Loss(self, logVar, mu, decoded, x, optimizer):
optimizer.zero_grad()
kl_divergence = 0.5 * torch.sum(-1 - logVar + mu.pow(2) + logVar.exp())
ce = F.binary_cross_entropy(decoded, x, size_average=False)
loss = ce + kl_divergence
# Backpropagation based on the loss
loss.backward(retain_graph=True)
optimizer.step()
return loss
def HB_Fit(self, model, X, Y, optimizer): # hedge backpropagation
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.CELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha[i] * out
for i in range(5): # First 6 are shallow and last 2 are deep
if i == 0:
alpha_sum_1 = self.alpha[i]
else:
alpha_sum_1 += self.alpha[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
loss_ = (self.alpha[i] / alpha_sum_1) * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha[i] = torch.max(self.alpha[i], self.s / 5)
self.alpha[i] = torch.min(self.alpha[i], self.m) # exploration-exploitation
z_t = torch.sum(self.alpha)
self.alpha = Parameter(self.alpha / z_t, requires_grad=False).to(self.device)
return output, Loss_sum | 45,436 | 40.761949 | 118 | py |
OLD3S | OLD3S-main/model/cnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class BasicBlock(nn.Module):
EXPANSION = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.EXPANSION * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.EXPANSION * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.EXPANSION * planes)
)
def forward(self, x):
out = self.conv1(x)
out = F.relu(self.bn1(out))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
EXPANSION = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.EXPANSION * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.EXPANSION * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.EXPANSION * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.EXPANSION * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.EXPANSION * planes)
)
def forward(self, x):
out = self.conv1(x)
out = F.relu(self.bn1(out))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
woha = self.shortcut(x)
out += woha
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
super(ResNet, self).__init__()
self.num_blocks = num_blocks
self.in_planes = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.num_classes = num_classes
self.hidden_layers = []
self.output_layers = []
self._make_layer(block, 64, num_blocks[0], stride=1)
self._make_layer(block, 128, num_blocks[1], stride=2)
self._make_layer(block, 256, num_blocks[2], stride=2)
self._make_layer(block, 512, num_blocks[3], stride=2)
self.output_layers.append(self._make_mlp1(64, 2)) # 32
self.output_layers.append(self._make_mlp1(64, 2)) # 32
self.output_layers.append(self._make_mlp2(128, 2)) # 16
self.output_layers.append(self._make_mlp2(128, 2)) # 16
self.output_layers.append(self._make_mlp3(256, 2)) # 8
self.output_layers.append(self._make_mlp3(256, 2)) # 8
self.output_layers.append(self._make_mlp4(512, 2)) # 4
self.output_layers.append(self._make_mlp4(512, 2)) # 4
self.hidden_layers = nn.ModuleList(self.hidden_layers) #
self.output_layers = nn.ModuleList(self.output_layers) #
def _make_mlp1(self, in_planes, kernel_size_pool, padding_pool=0):
classifier = nn.Sequential(
nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool),
nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool),
#nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool),
nn.Flatten(),
nn.Linear(in_planes*8*8, in_planes*8*2),
nn.Linear(in_planes*8*2, 256),
nn.Linear(256, self.num_classes),
)
return classifier
def _make_mlp2(self, in_planes, kernel_size_pool, padding_pool=0):
classifier = nn.Sequential(
nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool,ceil_mode=True),
nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool,ceil_mode=True),
# nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool),
nn.Flatten(),
nn.Linear(in_planes*4*4, self.num_classes),
)
return classifier
def _make_mlp3(self, in_planes, kernel_size_pool, padding_pool=0):
classifier = nn.Sequential(
nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool,ceil_mode=True),
nn.AvgPool2d(kernel_size=kernel_size_pool, padding=padding_pool,ceil_mode=True),
# nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool),
nn.Flatten(),
nn.Linear(in_planes*2*2, self.num_classes),
)
return classifier
def _make_mlp4(self, in_planes, kernel_size_pool, padding_pool=0):
classifier = nn.Sequential(
nn.AvgPool2d(kernel_size=kernel_size_pool, padding=padding_pool,ceil_mode=True),
# nn.MaxPool2d(kernel_size=kernel_size_pool, padding=padding_pool),
nn.Flatten(),
nn.Linear(in_planes*2*2, self.num_classes),
)
return classifier
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
self.hidden_layers.append(block(self.in_planes, planes, stride))
self.in_planes = block.EXPANSION * planes
def forward(self, x):
hidden_connections = []
hidden_connections.append(F.relu(self.bn1(self.conv1(x))))
for i in range(len(self.hidden_layers)):
hidden_connections.append(self.hidden_layers[i](hidden_connections[i]))
output_class = []
for i in range(len(self.output_layers)):
#print(hidden_connections[i].shape)
output_class.append(self.output_layers[i](hidden_connections[i]))
return output_class
def Dynamic_ResNet18():
return ResNet(BasicBlock, [1, 2, 2, 2])
| 6,471 | 37.070588 | 104 | py |
OLD3S | OLD3S-main/model/mlp.py | import torch
from torch import nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes):
super(BasicBlock, self).__init__()
self.Linear1 = nn.Linear(
in_planes, planes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.Linear1(x)
out = self.relu(out)
return out
class MLP(nn.Module):
def __init__(self, in_planes, num_classes = 6):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
super(MLP, self).__init__()
self.in_planes = in_planes
self.num_classes = num_classes
self.Linear = nn.Linear(self.in_planes, self.in_planes)
self.hidden_layers = []
self.output_layers = []
self.hidden_layers.append(BasicBlock(self.in_planes,self.in_planes))
self.hidden_layers.append(BasicBlock(self.in_planes, self.in_planes))
self.hidden_layers.append(BasicBlock(self.in_planes, self.in_planes))
self.hidden_layers.append(BasicBlock(self.in_planes, self.in_planes))
self.output_layers.append(self._make_mlp1(self.in_planes))
self.output_layers.append(self._make_mlp2(self.in_planes))
self.output_layers.append(self._make_mlp3(self.in_planes))
self.output_layers.append(self._make_mlp4(self.in_planes))
self.output_layers.append(self._make_mlp4(self.in_planes))
self.hidden_layers = nn.ModuleList(self.hidden_layers) #
self.output_layers = nn.ModuleList(self.output_layers) #
def _make_mlp1(self, in_planes):
classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(in_planes, self.num_classes),
)
return classifier
def _make_mlp2(self, in_planes):
classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(in_planes, self.num_classes),
)
return classifier
def _make_mlp3(self, in_planes):
classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(in_planes, self.num_classes),
)
return classifier
def _make_mlp4(self, in_planes):
classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(in_planes, self.num_classes),
)
return classifier
def _make_mlp5(self, in_planes):
classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(in_planes, self.num_classes),
)
return classifier
def forward(self, x):
hidden_connections = []
hidden_connections.append(F.relu(self.Linear(x)))
for i in range(len(self.hidden_layers)):
hidden_connections.append(self.hidden_layers[i](hidden_connections[i]))
output_class = []
for i in range(len(self.output_layers)):
output = self.output_layers[i](hidden_connections[i])
output_class.append(output)
return output_class | 2,956 | 29.484536 | 84 | py |
OLD3S | OLD3S-main/model/model.py | import torch
import torch.nn as nn
import math
import copy
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from cnn import Dynamic_ResNet18
from autoencoder import *
from mlp import MLP
def normal(t):
mean, std, var = torch.mean(t), torch.std(t), torch.var(t)
t = (t - mean) / std
return t
class OLD3S_Deep:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, path, lr=0.01, b=0.9, eta = -0.01, s=0.008, m=0.9,
spike=9e-5, thre=10000, RecLossFunc = 'Smooth'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.autoencoder = AutoEncoder_Deep().to(self.device)
self.autoencoder_2 = AutoEncoder_Deep().to(self.device)
self.spike = spike
self.thre = thre
self.beta1 = 1
self.beta2 = 0
self.correct = 0
self.accuracy = 0
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.data_S1 = data_S1
self.label_S1 = label_S1
self.data_S2 = data_S2
self.label_S2 = label_S2
self.lr = Parameter(torch.tensor(lr), requires_grad=False).to(self.device)
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.s1 = Parameter(torch.tensor(0.01), requires_grad=False).to(self.device)
self.num_block = 8
self.alpha1 = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.alpha2 = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.CELoss = nn.CrossEntropyLoss()
self.KLDivLoss = nn.KLDivLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.Accuracy = []
self.a_1 = 0.8
self.a_2 = 0.2
self.cl_1 = []
self.cl_2 = []
def FirstPeriod(self):
data1 = self.data_S1
data2 = self.data_S2[self.B:]
self.net_model1 = Dynamic_ResNet18().to(self.device)
optimizer_1 = torch.optim.SGD(self.net_model1.parameters(), lr=self.lr)
optimizer_2 = torch.optim.SGD(self.autoencoder.parameters(), lr=0.03)
for (i, x) in enumerate(data1):
self.i = i
x1 = x.unsqueeze(0).float().to(self.device)
x1 = normal(x1)
y = self.label_S1[i].unsqueeze(0).long().to(self.device)
if self.i < self.B:
optimizer_2.zero_grad()
encoded, decoded = self.autoencoder(x1)
loss_1, y_hat = self.HB_Fit(self.net_model1, encoded, y, optimizer_1)
loss_2 = self.BCELoss(torch.sigmoid(decoded), x1)
loss_2.backward()
optimizer_2.step()
if self.i < self.thre:
self.beta2 = self.beta2 + self.spike
self.beta1 = 1 - self.beta2
else:
x2 = data2[self.i - self.B].unsqueeze(0).float().to(self.device)
if i == self.B:
self.net_model2 = copy.deepcopy(self.net_model1)
torch.save(self.net_model1.state_dict(), './data/'+self.path +'/net_model1.pth')
optimizer_1_1 = torch.optim.SGD(self.net_model1.parameters(), lr=self.lr)
optimizer_1_2 = torch.optim.SGD(self.net_model2.parameters(), lr=self.lr)
#optimizer_2_1 = torch.optim.SGD(self.autoencoder.parameters(), lr=0.02)
optimizer_2_2 = torch.optim.SGD(self.autoencoder_2.parameters(), lr=0.08)
encoded_1, decoded_1 = self.autoencoder(x1)
encoded_2, decoded_2 = self.autoencoder_2(x2)
loss_1_1, y_hat_1 = self.HB_Fit(self.net_model1, encoded_1, y, optimizer_1_1)
loss_1_2, y_hat_2 = self.HB_Fit(self.net_model2, encoded_2, y, optimizer_1_2)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_1_1)
self.cl_2.append(loss_1_2)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
optimizer_2_2.zero_grad()
loss_2_1 = self.BCELoss(torch.sigmoid(x2), decoded_2)
loss_2_2 = self.RecLossFunc(encoded_2, encoded_1)
loss_2 = loss_2_1 + loss_2_2
loss_2.backward(retain_graph=True)
optimizer_2_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.net_model2.state_dict(), './data/'+self.path +'/net_model2.pth')
def SecondPeriod(self):
print("use FirstPeriod when i<T1 ")
self.FirstPeriod()
self.correct = 0
data2 = self.data_S2[:self.B]
net_model1 = self.loadmodel('./data/'+self.path +'/net_model1.pth')
net_model2 = self.loadmodel('./data/'+self.path +'/net_model2.pth')
optimizer_3 = torch.optim.SGD(net_model1.parameters(), lr=self.lr)
optimizer_4 = torch.optim.SGD(net_model2.parameters(), lr=self.lr)
optimizer_5 = torch.optim.SGD(self.autoencoder_2.parameters(), lr=self.lr)
self.a_1 = 0.2
self.a_2 = 0.8
self.cl_1 = []
self.cl_2 = []
for (i, x) in enumerate(data2):
x = x.unsqueeze(0).float().to(self.device)
y = self.label_S2[i].unsqueeze(0).long().to(self.device)
encoded, decoded = self.autoencoder_2(x)
optimizer_5.zero_grad()
loss_4, y_hat_2 = self.HB_Fit(net_model2, encoded, y, optimizer_4)
loss_3, y_hat_1 = self.HB_Fit(net_model1, encoded, y, optimizer_3)
loss_5 = self.BCELoss(torch.sigmoid(x), decoded)
loss_5.backward()
optimizer_5.step()
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_3)
self.cl_2.append(loss_4)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.Accuracy, './data/'+self.path +'/Accuracy')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
param.grad.zero_()
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def SmoothReconstruction(self, X, decoded, optimizer):
optimizer.zero_grad()
loss_2 = self.SmoothL1Loss(torch.sigmoid(X), decoded)
loss_2.backward()
optimizer.step()
def loadmodel(self, path):
net_model = Dynamic_ResNet18().to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def HB_Fit(self, model, X, Y, optimizer, block_split=6):
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.CELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha1[i] * out
for i in range(self.num_block):
if i < block_split:
if i == 0:
alpha_sum_1 = self.alpha1[i]
else:
alpha_sum_1 += self.alpha1[i]
else:
if i == block_split:
alpha_sum_2 = self.alpha1[i]
else:
alpha_sum_2 += self.alpha1[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
if i < block_split:
loss_ = (self.alpha1[i] / alpha_sum_1) * self.beta1 * loss
else:
loss_ = (self.alpha1[i] / alpha_sum_2) * self.beta2 * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha1[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha1[i] = torch.max(self.alpha1[i], self.s / self.num_block)
self.alpha1[i] = torch.min(self.alpha1[i], self.m)
z_t = torch.sum(self.alpha1)
self.alpha1 = Parameter(self.alpha1 / z_t, requires_grad=False).to(self.device)
return Loss_sum, output
class OLD3S_Shallow:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, dimension1, dimension2, path, lr = 0.001, b=0.9,
eta = -0.001, s=0.008, m=0.99, RecLossFunc = 'BCE'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.correct = 0
self.accuracy = 0
self.lr = lr
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.x_S1 = data_S1
self.y_S1 = label_S1
self.x_S2 = data_S2
self.y_S2 = label_S2
self.dimension1 = dimension1
self.dimension2 = dimension2
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.CELoss = nn.CrossEntropyLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.MSELoss = nn.MSELoss()
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.Accuracy = []
self.a_1 = 0.5
self.a_2 = 0.5
self.cl_1 = []
self.cl_2 = []
self.alpha = Parameter(torch.Tensor(5).fill_(1 / 5), requires_grad=False).to(
self.device)
self.autoencoder_1 = AutoEncoder_Shallow(self.dimension1, 1024).to(self.device)
self.autoencoder_2 = AutoEncoder_Shallow(self.dimension2, 1024).to(self.device)
def FirstPeriod(self):
classifier_1 = MLP(1024,2).to(self.device)
optimizer_classifier_1 = torch.optim.Adam(classifier_1.parameters(), self.lr)
optimizer_autoencoder_1 = torch.optim.Adam(self.autoencoder_1.parameters(), self.lr)
# eta = -8 * math.sqrt(1 / math.log(self.t))
for (i, x) in enumerate(self.x_S1):
self.i = i
x1 = x.unsqueeze(0).float().to(self.device)
y = self.y_S1[i].unsqueeze(0).long().to(self.device)
if self.y_S1[i] == 0:
y1 = torch.Tensor([1, 0]).reshape(1, 2).float().to(self.device)
else:
y1 = torch.Tensor([0, 1]).reshape(1, 2).float().to(self.device)
if self.i < self.B: # Before evolve
encoded_1, decoded_1 = self.autoencoder_1(x1)
optimizer_autoencoder_1.zero_grad()
y_hat, loss_classifier_1 = self.HB_Fit(classifier_1,
encoded_1, y1, optimizer_classifier_1)
loss_autoencoder_1 = self.BCELoss(torch.sigmoid(decoded_1), x1)
loss_autoencoder_1.backward()
optimizer_autoencoder_1.step()
else:
x2 = self.x_S2[self.i].unsqueeze(0).float().to(self.device)
if i == self.B:
classifier_2 = copy.deepcopy(classifier_1)
torch.save(classifier_1.state_dict(),
'./data/'+self.path +'/net_model1.pth')
optimizer_classifier_2 = torch.optim.Adam(classifier_2.parameters(), self.lr)
optimizer_autoencoder_2 = torch.optim.Adam(self.autoencoder_2.parameters(), self.lr)
encoded_2, decoded_2 = self.autoencoder_2(x2)
encoded_1, decoded_1 = self.autoencoder_1(x1)
y_hat_2, loss_classifier_2 = self.HB_Fit(classifier_2,
encoded_2, y1, optimizer_classifier_2)
y_hat_1, loss_classifier_1 = self.HB_Fit(classifier_1,
encoded_1, y1, optimizer_classifier_1)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 50:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
optimizer_autoencoder_2.zero_grad()
loss_2_0 = self.BCELoss(torch.sigmoid(decoded_2), x2)
loss_2_1 = self.RecLossFunc(encoded_2, encoded_1)
loss_autoencoder_2 = loss_2_0 + loss_2_1
loss_autoencoder_2.backward()
optimizer_autoencoder_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(classifier_2.state_dict(), './data/'+self.path +'/net_model2.pth')
def SecondPeriod(self):
print('use FESA when i<T1')
self.FirstPeriod()
self.correct = 0
net_model1 = self.loadmodel('./data/' + self.path + '/net_model1.pth')
net_model2 = self.loadmodel('./data/' + self.path + '/net_model2.pth')
optimizer_classifier_1_FES = torch.optim.Adam(net_model1.parameters(), self.lr)
optimizer_classifier_2_FES = torch.optim.Adam(net_model2.parameters(), self.lr)
optimizer_autoencoder_2_FES = torch.optim.Adam(self.autoencoder_2.parameters(), self.lr)
data_2 = self.x_S2[:self.B]
label_2 = self.y_S1[:self.B]
self.a_1 = 0.2
self.a_2 = 0.8
self.cl_1 = []
self.cl_2 = []
# eta = -8 * math.sqrt(1 / math.log(self.B))
for (i, x) in enumerate(data_2):
x = x.unsqueeze(0).float().to(self.device)
self.i = i + self.T1
y = label_2[i].long().to(self.device)
if label_2[i] == 0:
y1 = torch.Tensor([1, 0]).unsqueeze(0).float().to(self.device)
else:
y1 = torch.Tensor([0, 1]).unsqueeze(0).float().to(self.device)
encoded_2, decoded_2 = self.autoencoder_2(x)
optimizer_autoencoder_2_FES.zero_grad()
y_hat_2, loss_classifier_2 = self.HB_Fit(net_model2,
encoded_2, y1, optimizer_classifier_2_FES)
y_hat_1, loss_classifier_1 = self.HB_Fit(net_model1,
encoded_2, y1, optimizer_classifier_1_FES)
loss_autoencoder_2 = self.BCELoss(torch.sigmoid(decoded_2), x)
loss_autoencoder_2.backward()
optimizer_autoencoder_2_FES.step()
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 50:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.Accuracy, './data/'+self.path +'/Accuracy')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
# param.grad.detach_()
param.grad.zero_() # data.fill_(0)
def loadmodel(self, path):
net_model = MLP(1024,2).to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def HB_Fit(self, model, X, Y, optimizer): # hedge backpropagation
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.BCELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha[i] * out
for i in range(5): # First 6 are shallow and last 2 are deep
if i == 0:
alpha_sum_1 = self.alpha[i]
else:
alpha_sum_1 += self.alpha[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
loss_ = (self.alpha[i] / alpha_sum_1) * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha[i] = torch.max(self.alpha[i], self.s / 5)
self.alpha[i] = torch.min(self.alpha[i], self.m) # exploration-exploitation
z_t = torch.sum(self.alpha)
self.alpha = Parameter(self.alpha / z_t, requires_grad=False).to(self.device)
return output, Loss_sum
class OLD3S_Reuter:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, dimension1, dimension2, path, lr=0.01, b=0.9,
eta=-0.001, s=0.008, m=0.99, RecLossFunc='BCE'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.correct = 0
self.accuracy = 0
self.lr = lr
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.x_S1 = data_S1
self.y_S1 = label_S1
self.x_S2 = data_S2
self.y_S2 = label_S2
self.dimension1 = dimension1
self.dimension2 = dimension2
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.CELoss = nn.CrossEntropyLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.MSELoss = nn.MSELoss()
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.Accuracy = []
self.a_1 = 0.5
self.a_2 = 0.5
self.cl_1 = []
self.cl_2 = []
self.alpha = Parameter(torch.Tensor(5).fill_(1 / 5), requires_grad=False).to(
self.device)
self.autoencoder_1 = AutoEncoder_Shallow(self.dimension1, 1024).to(self.device)
self.autoencoder_2 = AutoEncoder_Shallow(self.dimension2, 1024).to(self.device)
def FirstPeriod(self):
classifier_1 = MLP(1024,6).to(self.device)
optimizer_classifier_1 = torch.optim.SGD(classifier_1.parameters(), self.lr)
optimizer_autoencoder_1 = torch.optim.SGD(self.autoencoder_1.parameters(), self.lr)
# eta = -8 * math.sqrt(1 / math.log(self.t))
for (i, x) in enumerate(self.x_S1):
self.i = i
x1 = x.unsqueeze(0).float().to(self.device)
y1 = self.y_S1[i].long().to(self.device)
if self.i < self.B: # Before evolve
encoded_1, decoded_1 = self.autoencoder_1(x1)
optimizer_autoencoder_1.zero_grad()
y_hat, loss_classifier_1 = self.HB_Fit(classifier_1,
encoded_1, y1, optimizer_classifier_1)
loss_autoencoder_1 = self.BCELoss(torch.sigmoid(decoded_1), x1)
loss_autoencoder_1.backward()
optimizer_autoencoder_1.step()
else:
x2 = self.x_S2[self.i].unsqueeze(0).float().to(self.device)
if i == self.B:
classifier_2 = copy.deepcopy(classifier_1)
torch.save(classifier_1.state_dict(),
'./data/' + self.path + '/net_model1.pth')
optimizer_classifier_2 = torch.optim.SGD(classifier_2.parameters(), self.lr)
optimizer_autoencoder_2 = torch.optim.SGD(self.autoencoder_2.parameters(), 0.9)
encoded_2, decoded_2 = self.autoencoder_2(x2)
encoded_1, decoded_1 = self.autoencoder_1(x1)
y_hat_2, loss_classifier_2 = self.HB_Fit(classifier_2,
encoded_2, y1, optimizer_classifier_2)
y_hat_1, loss_classifier_1 = self.HB_Fit(classifier_1,
encoded_1, y1, optimizer_classifier_1)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 50:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
optimizer_autoencoder_2.zero_grad()
loss_2_0 = self.BCELoss(torch.sigmoid(decoded_2), x2)
loss_2_1 = self.RecLossFunc(encoded_2, encoded_1)
loss_autoencoder_2 = loss_2_0 + loss_2_1
loss_autoencoder_2.backward()
optimizer_autoencoder_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y1).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(classifier_2.state_dict(), './data/' + self.path + '/net_model2.pth')
def SecondPeriod(self):
print('use FESA when i<T1')
self.FirstPeriod()
self.correct = 0
net_model1 = self.loadmodel('./data/' + self.path + '/net_model1.pth')
net_model2 = self.loadmodel('./data/' + self.path + '/net_model2.pth')
optimizer_classifier_1_FES = torch.optim.SGD(net_model1.parameters(), self.lr)
optimizer_classifier_2_FES = torch.optim.SGD(net_model2.parameters(), self.lr)
optimizer_autoencoder_2_FES = torch.optim.SGD(self.autoencoder_2.parameters(), self.lr)
data_2 = self.x_S2[:self.B]
label_2 = self.y_S1[:self.B]
self.a_1 = 0.2
self.a_2 = 0.8
self.cl_1 = []
self.cl_2 = []
# eta = -8 * math.sqrt(1 / math.log(self.B))
for (i, x) in enumerate(data_2):
x = x.unsqueeze(0).float().to(self.device)
self.i = i + self.T1
y1 = label_2[i].long().to(self.device)
encoded_2, decoded_2 = self.autoencoder_2(x)
optimizer_autoencoder_2_FES.zero_grad()
y_hat_2, loss_classifier_2 = self.HB_Fit(net_model2,
encoded_2, y1, optimizer_classifier_2_FES)
y_hat_1, loss_classifier_1 = self.HB_Fit(net_model1,
encoded_2, y1, optimizer_classifier_1_FES)
loss_autoencoder_2 = self.BCELoss(torch.sigmoid(decoded_2), x)
loss_autoencoder_2.backward()
optimizer_autoencoder_2_FES.step()
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_classifier_1)
self.cl_2.append(loss_classifier_2)
if len(self.cl_1) == 50:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y1).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 500 == 0:
self.accuracy = self.correct / 500
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.Accuracy, './data/' + self.path + '/Accuracy')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
# param.grad.detach_()
param.grad.zero_() # data.fill_(0)
def loadmodel(self, path):
net_model = MLP(1024,6).to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def HB_Fit(self, model, X, Y, optimizer): # hedge backpropagation
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.CELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha[i] * out
for i in range(5): # First 6 are shallow and last 2 are deep
if i == 0:
alpha_sum_1 = self.alpha[i]
else:
alpha_sum_1 += self.alpha[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
loss_ = (self.alpha[i] / alpha_sum_1) * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha[i] = torch.max(self.alpha[i], self.s / 5)
self.alpha[i] = torch.min(self.alpha[i], self.m) # exploration-exploitation
z_t = torch.sum(self.alpha)
self.alpha = Parameter(self.alpha / z_t, requires_grad=False).to(self.device)
return output, Loss_sum
class OLD3S_Mnist:
def __init__(self, data_S1, label_S1, data_S2, label_S2, T1, t, path, lr=0.01, b=0.9, eta = -0.01, s=0.008, m=0.9,
spike=9e-5, thre=10000, RecLossFunc = 'Smooth'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.autoencoder = AutoEncoder_Mnist().to(self.device)
self.autoencoder_2 = AutoEncoder_Mnist().to(self.device)
self.spike = spike
self.thre = thre
self.beta1 = 1
self.beta2 = 0
self.correct = 0
self.accuracy = 0
self.T1 = T1
self.t = t
self.B = self.T1 - self.t
self.path = path
self.data_S1 = data_S1
self.label_S1 = label_S1
self.data_S2 = data_S2
self.label_S2 = label_S2
self.lr = Parameter(torch.tensor(lr), requires_grad=False).to(self.device)
self.b = Parameter(torch.tensor(b), requires_grad=False).to(self.device)
self.eta = Parameter(torch.tensor(eta), requires_grad=False).to(self.device)
self.s = Parameter(torch.tensor(s), requires_grad=False).to(self.device)
self.m = Parameter(torch.tensor(m), requires_grad=False).to(self.device)
self.s1 = Parameter(torch.tensor(0.01), requires_grad=False).to(self.device)
self.num_block = 8
self.alpha1 = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.alpha2 = Parameter(torch.Tensor(self.num_block).fill_(1 / self.num_block), requires_grad=False).to(
self.device)
self.RecLossFunc = self.ChoiceOfRecLossFnc(RecLossFunc)
self.CELoss = nn.CrossEntropyLoss()
self.KLDivLoss = nn.KLDivLoss()
self.BCELoss = nn.BCELoss()
self.SmoothL1Loss = nn.SmoothL1Loss()
self.Accuracy = []
self.a_1 = 0.8
self.a_2 = 0.2
self.cl_1 = []
self.cl_2 = []
def FirstPeriod(self):
data1 = self.data_S1
data2 = self.data_S2[self.B:]
self.net_model1 = Dynamic_ResNet18().to(self.device)
optimizer_1 = torch.optim.SGD(self.net_model1.parameters(), lr=self.lr)
optimizer_2 = torch.optim.SGD(self.autoencoder.parameters(), lr=0.03)
for (i, x) in enumerate(data1):
self.i = i
x1 = x.reshape(1, 28, 28).unsqueeze(0).float().to(self.device)
x1 = normal(x1)
y = self.label_S1[i].unsqueeze(0).long().to(self.device)
if self.i < self.B:
optimizer_2.zero_grad()
encoded, decoded = self.autoencoder(x1)
loss_1, y_hat = self.HB_Fit(self.net_model1, encoded, y, optimizer_1)
loss_2 = self.BCELoss(torch.sigmoid(decoded), x1)
loss_2.backward()
optimizer_2.step()
if self.i < self.thre:
self.beta2 = self.beta2 + self.spike
self.beta1 = 1 - self.beta2
else:
x2 = data2[self.i - self.B].reshape(1, 28, 28).unsqueeze(0).float().to(self.device)
x2 = normal(x2)
if i == self.B:
self.net_model2 = copy.deepcopy(self.net_model1)
torch.save(self.net_model1.state_dict(), './data/'+self.path +'/net_model1.pth')
optimizer_1_1 = torch.optim.SGD(self.net_model1.parameters(), lr=self.lr)
optimizer_1_2 = torch.optim.SGD(self.net_model2.parameters(), lr=self.lr)
#optimizer_2_1 = torch.optim.SGD(self.autoencoder.parameters(), lr=0.02)
optimizer_2_2 = torch.optim.SGD(self.autoencoder_2.parameters(), lr=0.08)
encoded_1, decoded_1 = self.autoencoder(x1)
encoded_2, decoded_2 = self.autoencoder_2(x2)
loss_1_1, y_hat_1 = self.HB_Fit(self.net_model1, encoded_1, y, optimizer_1_1)
loss_1_2, y_hat_2 = self.HB_Fit(self.net_model2, encoded_2, y, optimizer_1_2)
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_1_1)
self.cl_2.append(loss_1_2)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
optimizer_2_2.zero_grad()
loss_2_1 = self.BCELoss(torch.sigmoid(x2), decoded_2)
loss_2_2 = self.RecLossFunc(encoded_2, encoded_1)
loss_2 = loss_2_1 + loss_2_2
loss_2.backward(retain_graph=True)
optimizer_2_2.step()
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 0")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.net_model2.state_dict(), './data/'+self.path +'/net_model2.pth')
def SecondPeriod(self):
print("use FirstPeriod when i<T1 ")
self.FirstPeriod()
self.correct = 0
data2 = self.data_S2[:self.B]
net_model1 = self.loadmodel('./data/'+self.path +'/net_model1.pth')
net_model2 = self.loadmodel('./data/'+self.path +'/net_model2.pth')
optimizer_3 = torch.optim.SGD(net_model1.parameters(), lr=self.lr)
optimizer_4 = torch.optim.SGD(net_model2.parameters(), lr=self.lr)
optimizer_5 = torch.optim.SGD(self.autoencoder_2.parameters(), lr=self.lr)
self.a_1 = 0.2
self.a_2 = 0.8
self.cl_1 = []
self.cl_2 = []
for (i, x) in enumerate(data2):
x = x.reshape(1, 28, 28).unsqueeze(0).float().to(self.device)
x = normal(x)
y = self.label_S2[i].unsqueeze(0).long().to(self.device)
encoded, decoded = self.autoencoder_2(x)
optimizer_5.zero_grad()
loss_4, y_hat_2 = self.HB_Fit(net_model2, encoded, y, optimizer_4)
loss_3, y_hat_1 = self.HB_Fit(net_model1, encoded, y, optimizer_3)
loss_5 = self.BCELoss(torch.sigmoid(x), decoded)
loss_5.backward()
optimizer_5.step()
y_hat = self.a_1 * y_hat_1 + self.a_2 * y_hat_2
self.cl_1.append(loss_3)
self.cl_2.append(loss_4)
if len(self.cl_1) == 100:
self.cl_1.pop(0)
self.cl_2.pop(0)
try:
a_cl_1 = math.exp(self.eta * sum(self.cl_1))
a_cl_2 = math.exp(self.eta * sum(self.cl_2))
self.a_1 = (a_cl_1) / (a_cl_2 + a_cl_1)
except OverflowError:
self.a_1 = float('inf')
self.a_2 = 1 - self.a_1
_, predicted = torch.max(y_hat.data, 1)
self.correct += (predicted == y).item()
if i == 0:
print("finish 1")
if (i + 1) % 100 == 0:
print("step : %d" % (i + 1), end=", ")
print("correct: %d" % (self.correct))
if (i + 1) % 1000 == 0:
self.accuracy = self.correct / 1000
self.Accuracy.append(self.accuracy)
self.correct = 0
print("Accuracy: ", self.accuracy)
torch.save(self.Accuracy, './data/'+self.path +'/Accuracy')
def zero_grad(self, model):
for child in model.children():
for param in child.parameters():
if param.grad is not None:
param.grad.zero_()
def ChoiceOfRecLossFnc(self, name):
if name == 'Smooth':
return nn.SmoothL1Loss()
elif name == 'KL':
return nn.KLDivLoss()
elif name == 'BCE':
return nn.BCELoss()
else:
print('Enter correct loss function name!')
def SmoothReconstruction(self, X, decoded, optimizer):
optimizer.zero_grad()
loss_2 = self.SmoothL1Loss(torch.sigmoid(X), decoded)
loss_2.backward()
optimizer.step()
def loadmodel(self, path):
net_model = Dynamic_ResNet18().to(self.device)
pretrain_dict = torch.load(path)
model_dict = net_model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
model_dict.update(pretrain_dict)
net_model.load_state_dict(model_dict)
net_model.to(self.device)
return net_model
def HB_Fit(self, model, X, Y, optimizer, block_split=6):
predictions_per_layer = model.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
loss = self.CELoss(out, Y)
losses_per_layer.append(loss)
output = torch.empty_like(predictions_per_layer[0])
for i, out in enumerate(predictions_per_layer):
output += self.alpha1[i] * out
for i in range(self.num_block):
if i < block_split:
if i == 0:
alpha_sum_1 = self.alpha1[i]
else:
alpha_sum_1 += self.alpha1[i]
else:
if i == block_split:
alpha_sum_2 = self.alpha1[i]
else:
alpha_sum_2 += self.alpha1[i]
Loss_sum = torch.zeros_like(losses_per_layer[0])
for i, loss in enumerate(losses_per_layer):
if i < block_split:
loss_ = (self.alpha1[i] / alpha_sum_1) * self.beta1 * loss
else:
loss_ = (self.alpha1[i] / alpha_sum_2) * self.beta2 * loss
Loss_sum += loss_
optimizer.zero_grad()
Loss_sum.backward(retain_graph=True)
optimizer.step()
for i in range(len(losses_per_layer)):
self.alpha1[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha1[i] = torch.max(self.alpha1[i], self.s / self.num_block)
self.alpha1[i] = torch.min(self.alpha1[i], self.m)
z_t = torch.sum(self.alpha1)
self.alpha1 = Parameter(self.alpha1 / z_t, requires_grad=False).to(self.device)
return Loss_sum, output
| 42,413 | 40.258755 | 118 | py |
OLD3S | OLD3S-main/model/vae.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from loaddatasets import *
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision.transforms import transforms
from torch.utils.data import DataLoader, Dataset
import os
import random
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from PIL import Image
from torch import nn
from torch import optim
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import numpy as np
from scipy.stats import norm
import numpy as np
from scipy.stats import norm
latent_dim = 100
inter_dim = 256
mid_dim = (256, 2, 2)
mid_num = 1
for i in mid_dim:
mid_num *= i
class ConvVAE(nn.Module):
def __init__(self, latent=latent_dim):
super(ConvVAE, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, 6, 2, 1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 128, 6, 2, 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 256, 6, 2, 1),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 2),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 3, 2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 3, 1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, 1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 3, 1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.ConvTranspose2d(16, 3, 2, 2, 3),
nn.Sigmoid()
)
self.fc1 = nn.Linear(mid_num, inter_dim)
self.fc2 = nn.Linear(inter_dim, latent * 2)
self.fcr2 = nn.Linear(latent, inter_dim)
self.fcr1 = nn.Linear(inter_dim, mid_num)
def reparameterise(self, mu, logvar):
epsilon = torch.randn_like(mu)
return mu + epsilon * torch.exp(logvar / 2)
def forward(self, x):
batch = x.size(0)
x = self.encoder(x)
x = self.fc1(x.view(batch, -1))
h = self.fc2(x)
mu, logvar = h.chunk(2, dim=-1)
z = self.reparameterise(mu, logvar)
decode = self.fcr2(z)
decode = self.fcr1(decode)
recon_x = self.decoder(decode.view(batch, *mid_dim))
#recon_x = self.decoder(decode.view(-1,1,32,32))
return z, recon_x, mu, logvar
class VAE_Deep(nn.Module):
def __init__(self):
super(VAE_Deep, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 8, 3, 1,1),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.Conv2d(8, 8, 3, 1, 1),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.Conv2d(8, 1, 3, 1, 1),
nn.BatchNorm2d(1),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(1, 8, 3, 1, 1),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.ConvTranspose2d(8, 8, 3, 1, 1),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.ConvTranspose2d(8, 3, 3, 1,1),
nn.BatchNorm2d(3),
nn.Sigmoid()
)
self.fc1 = nn.Linear(32, 32)
self.fc2 = nn.Linear(32, 32)
def reparameterise(self, mu, logvar):
epsilon = torch.randn_like(mu)
return mu + epsilon * torch.exp(logvar / 2)
def forward(self, x):
batch = x.size(0)
x = self.encoder(x)
mu = self.fc1(x)
logvar = self.fc2(x)
z = self.reparameterise(mu, logvar)
recon_x = self.decoder(z)
#recon_x = self.decoder(decode.view(-1,1,32,32))
return z, recon_x, mu, logvar
class VAE_Mnist(nn.Module):
def __init__(self, input_dim, h_dim, z_dim):
# 调用父类方法初始化模块的state
super(VAE_Mnist, self).__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.z_dim = z_dim
# 编码器 : [b, input_dim] => [b, z_dim]
self.fc1 = nn.Linear(input_dim, h_dim) # 第一个全连接层
self.fc2 = nn.Linear(h_dim, z_dim) # mu
self.fc3 = nn.Linear(h_dim, z_dim) # log_var
# 解码器 : [b, z_dim] => [b, input_dim]
self.fc4 = nn.Linear(z_dim, h_dim)
self.fc5 = nn.Linear(h_dim, input_dim)
def forward(self, x):
batch_size = x.shape[0]
x = x.view(batch_size, self.input_dim)
mu, log_var = self.encode(x)
sampled_z = self.reparameterization(mu, log_var)
x_hat = self.decode(sampled_z)
return sampled_z, x_hat, mu, log_var
def encode(self, x):
h = F.relu(self.fc1(x))
mu = self.fc2(h)
log_var = self.fc3(h)
return mu, log_var
def reparameterization(self, mu, log_var):
sigma = torch.exp(log_var * 0.5)
eps = torch.randn_like(sigma)
return mu + sigma * eps
def decode(self, z):
h = F.relu(self.fc4(z))
x_hat = torch.sigmoid(self.fc5(h))
return x_hat
class VAE_Shallow(nn.Module):
def __init__(self, input_dim, h_dim, z_dim):
# 调用父类方法初始化模块的state
super(VAE_Shallow, self).__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.z_dim = z_dim
# 编码器 : [b, input_dim] => [b, z_dim]
self.fc1 = nn.Linear(input_dim, h_dim) # 第一个全连接层
self.fc2 = nn.Linear(h_dim, z_dim) # mu
self.fc3 = nn.Linear(h_dim, z_dim) # log_var
# 解码器 : [b, z_dim] => [b, input_dim]
self.fc4 = nn.Linear(z_dim, h_dim)
self.fc5 = nn.Linear(h_dim, input_dim)
def forward(self, x):
mu, log_var = self.encode(x)
sampled_z = self.reparameterization(mu, log_var)
x_hat = self.decode(sampled_z)
return sampled_z, x_hat, mu, log_var
def encode(self, x):
h = F.relu(self.fc1(x))
mu = self.fc2(h)
log_var = self.fc3(h)
return mu, log_var
def reparameterization(self, mu, log_var):
sigma = torch.exp(log_var * 0.5)
eps = torch.randn_like(sigma)
return mu + sigma * eps
def decode(self, z):
h = F.relu(self.fc4(z))
x_hat = torch.sigmoid(self.fc5(h))
return x_hat
| 6,408 | 25.593361 | 60 | py |
OLD3S | OLD3S-main/model/autoencoder.py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module): # BasicBlock from ResNet [He et al.2016]
EXPANSION = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.EXPANSION*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.EXPANSION*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.EXPANSION*planes)
)
def forward(self, x):
out = self.conv1(x)
out = F.relu(self.bn1(out))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class AutoEncoder_Deep(nn.Module):
def __init__(self):
super(AutoEncoder_Deep,self).__init__()
self.conv1 = nn.Conv2d(3, 12, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(12)
self.conv2 = nn.ConvTranspose2d(12, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(1)
self.encoder = BasicBlock(12, 1)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(1, 12, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(12),
nn.ReLU(),
nn.ConvTranspose2d(12, 12, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(12),
nn.ReLU(),
nn.ConvTranspose2d(12, 3, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(3)
)
def forward(self, x):
encoded = self.encoder(F.relu(self.bn1(self.conv1(x)))) # maps the feature size from 3*32*32 to 32*32
decoded = self.decoder(encoded)
return encoded, decoded
class AutoEncoder_Shallow(nn.Module):
def __init__(self, inplanes, outplanes):
super(AutoEncoder_Shallow, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(inplanes, outplanes),
nn.ReLU()
)
self.decoder = nn.Sequential(
nn.Linear(outplanes, inplanes),
nn.ReLU()
)
def forward(self, x):
encoder = self.encoder(x)
decoder = self.decoder(encoder)
return encoder, decoder
class BasicBlock_Mnist(nn.Module): # BasicBlock from ResNet [He et al.2016]
EXPANSION = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock_Mnist, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.EXPANSION*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.EXPANSION*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.EXPANSION*planes)
)
def forward(self, x):
out = self.conv1(x)
out = F.relu(self.bn1(out))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class AutoEncoder_Mnist(nn.Module):
def __init__(self):
super(AutoEncoder_Mnist,self).__init__()
self.conv1 = nn.Conv2d(1, 12, kernel_size=3, stride=1, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(12)
self.conv2 = nn.ConvTranspose2d(12, 3, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(3)
self.encoder = BasicBlock(12, 1)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(1, 12, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(12),
nn.ReLU(),
nn.ConvTranspose2d(12, 12, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(12),
nn.ReLU(),
nn.ConvTranspose2d(12, 1, kernel_size=3, stride=1, padding=3, bias=False),
nn.BatchNorm2d(1)
)
def forward(self, x):
encoded = self.encoder(F.relu(self.bn1(self.conv1(x)))) # maps the feature size from 3*32*32 to 32*32
decoded = self.decoder(encoded)
return encoded, decoded
| 4,793 | 35.318182 | 113 | py |
OLD3S | OLD3S-main/model/train.py | import random
import numpy as np
import argparse
from model import *
from loaddatasets import *
from model_vae import *
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser(description="Options")
parser.add_argument('-DataName', action='store', dest='DataName', default='enfr')
parser.add_argument('-AutoEncoder', action='store', dest='AutoEncoder', default='VAE')
parser.add_argument('-beta', action='store', dest='beta', default=0.9)
parser.add_argument('-eta', action='store', dest='eta', default=-0.01)
parser.add_argument('-learningrate', action='store', dest='learningrate', default=0.01)
parser.add_argument('-RecLossFunc', action='store', dest='RecLossFunc', default='Smooth')
args = parser.parse_args()
learner = OLD3S(args)
learner.train()
class OLD3S:
def __init__(self, args):
'''
Data is stored as list of dictionaries.
Label is stored as list of scalars.
'''
self.datasetname = args.DataName
self.autoencoder = args.AutoEncoder
self.beta = args.beta
self.eta = args.eta
self.learningrate = args.learningrate
self.RecLossFunc = args.RecLossFunc
def train(self):
if self.datasetname == 'cifar':
print('cifar trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadcifar()
train = OLD3S_Deep(x_S1, y_S1, x_S2, y_S2, 50000, 5000,'parameter_cifar')
train.SecondPeriod()
elif self.datasetname == 'svhn':
print('svhn trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadsvhn()
train = OLD3S_Deep(x_S1, y_S1, x_S2, y_S2, 73257, 7257,'parameter_svhn')
train.SecondPeriod()
elif self.datasetname == 'mnist':
print('mnist trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadmnist()
if self.autoencoder == 'VAE':
train = OLD3S_Mnist_VAE(x_S1, y_S1, x_S2, y_S2, 60000, 6000,dimension1 = 784, dimension2 = 784,
hidden_size = 128, latent_size = 20, classes = 10, path = 'parameter_mnist')
else:
train = OLD3S_Mnist(x_S1, y_S1, x_S2, y_S2, 60000, 6000, 'parameter_mnist')
train.SecondPeriod()
elif self.datasetname == 'magic':
print('magic trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadmagic()
train = OLD3S_Shallow(x_S1, y_S1, x_S2, y_S2, 19019, 1919, 10, 30, 'parameter_magic')
train.SecondPeriod()
elif self.datasetname == 'adult':
print('adult trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadadult()
train = OLD3S_Shallow(x_S1, y_S1, x_S2, y_S2, 32559, 3559, 14, 30, 'parameter_adult')
train.SecondPeriod()
elif self.datasetname == 'enfr':
print('reuter-en-fr trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadreuter('EN_FR')
if self.autoencoder == 'VAE':
train = OLD3S_Shallow_VAE(x_S1, y_S1, x_S2, y_S2, 18758, 2758,2000, 2500,
hidden_size = 1024, latent_size = 128, classes = 6, path = 'parameter_enfr')
else:
train = OLD3S_Reuter(x_S1, y_S1, x_S2, y_S2, 18758, 2758, 2000, 2500, 'parameter_enfr')
train.SecondPeriod()
elif self.datasetname == 'enit':
print('reuter-en-it trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadreuter('EN_IT')
train = OLD3S_Reuter(x_S1, y_S1, x_S2, y_S2, 18758, 2758, 2000, 1500, 'parameter_enit')
train.SecondPeriod()
elif self.datasetname == 'ensp':
print('reuter-en-sp trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadreuter('EN_SP')
train = OLD3S_Reuter(x_S1, y_S1, x_S2, y_S2, 18758, 2758, 2000, 1000, 'parameter_ensp')
train.SecondPeriod()
elif self.datasetname == 'frit':
print('reuter-fr-it trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadreuter('FR_IT')
train = OLD3S_Reuter(x_S1, y_S1, x_S2, y_S2, 26648, 3648, 2500, 1500, 'parameter_frit')
train.SecondPeriod()
elif self.datasetname == 'frsp':
print('reuter-fr-sp trainning starts')
x_S1, y_S1, x_S2, y_S2 = loadreuter('FR_SP')
train = OLD3S_Reuter(x_S1, y_S1, x_S2, y_S2, 26648, 3648, 2500, 1000, 'parameter_frsp')
train.SecondPeriod()
else:
print('Choose a correct dataset name please')
if __name__ == '__main__':
setup_seed(30)
main()
| 4,771 | 41.990991 | 111 | py |
OLD3S | OLD3S-main/model/metric.py | import numpy as np
import torch
from matplotlib import pyplot as plt
from numpy import *
from scipy.interpolate import make_interp_spline
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
def plot_reuter(y_axi_1, x, path, a, b):
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.plot(range(20))
ax.axvspan(a, b, alpha=0.5, color='#86C6F4')
plt.grid()
x_smooth = np.linspace(x.min(), x.max(), 25)
y_smooth_1 = make_interp_spline(x, y_axi_1)(x_smooth)
ACR(y_smooth_1)
STD(25, y_axi_1, y_smooth_1)
plt.plot(x_smooth, y_smooth_1, color='#7E2F8E', marker='d')
ax.set_xlim(250, a + b)
plt.tick_params(labelsize=20)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel('# of instances', fontsize=30)
plt.ylabel('OCA', fontsize=30)
plt.tight_layout()
plt.savefig(path)
def ACR(accuracy):
f_star = max(accuracy)
acr = mean([f_star - i for i in accuracy])
print(acr)
def STD(filternumber, elements,smoothlist):
gap = len(elements)//filternumber
std = 0
for i in range(filternumber):
for j in range(int(gap)):
std += np.abs(smoothlist[i] - elements[i*gap: (i+1)*gap][j])
print(std/len(elements))
x = np.array([i for i in range(1000, 50000 + 45000 + 1, 1000)])
path = 'D:\pycharmproject\OLD3S\model\data\CIFAR-Hedge.png'
y_axi_1 = np.array(torch.load('./data/parameter_cifar/parameter_cifarAccuracy')).tolist()
plot_reuter(y_axi_1, x, path, 45000, 50000) | 1,544 | 29.9 | 89 | py |
CTC2021 | CTC2021-main/ctc_gector/convert_from_sentpair_to_edits.py | # coding:utf-8
import sys
import Levenshtein
import json
src_path = sys.argv[1]
tgt_path = sys.argv[2]
sid_path = sys.argv[3]
with open(src_path) as f_src, open(tgt_path) as f_tgt, open(sid_path) as f_sid:
lines_src = f_src.readlines()
lines_tgt = f_tgt.readlines()
lines_sid = f_sid.readlines()
assert len(lines_src) == len(lines_tgt) == len(lines_sid)
for i in range(len(lines_src)):
src_line = lines_src[i].strip().replace(' ', '')
tgt_line = lines_tgt[i].strip().replace(' ', '')
sid = lines_sid[i].strip().split('\t')[0]
edits = Levenshtein.opcodes(src_line, tgt_line)
result = []
for edit in edits:
if "。" in tgt_line[edit[3]:edit[4]]: # rm 。
continue
if edit[0] == "insert":
result.append((str(edit[1]), "缺失", "", tgt_line[edit[3]:edit[4]]))
elif edit[0] == "replace":
result.append((str(edit[1]), "别字", src_line[edit[1]:edit[2]], tgt_line[edit[3]:edit[4]]))
elif edit[0] == "delete":
result.append((str(edit[1]), "冗余", src_line[edit[1]:edit[2]], ""))
out_line = ""
for res in result:
out_line += ', '.join(res) + ', '
if out_line:
print(sid + ', ' + out_line.strip())
else:
print(sid + ', -1')
| 1,360 | 29.931818 | 105 | py |
CTC2021 | CTC2021-main/ctc_gector/evaluate.py | import traceback
import argparse
def read_input_file(input_file):
pid_to_text = {}
with open(input_file, 'r') as f:
for line in f:
pid = line[:9]
text = line[9:].strip()
pid_to_text[pid] = text
return pid_to_text
def read_label_file(pid_to_text, label_file):
'''
读取纠正结果
:param filename:
:return:
'''
error_set, det_set, cor_set = set(), set(), set()
with open(label_file, 'r', encoding='utf-8') as f:
for line in f:
terms = line.strip().split(',')
terms = [t.strip() for t in terms]
pid = terms[0]
if pid not in pid_to_text:
continue
if len(terms) == 2 and terms[-1] == '-1':
continue
text = pid_to_text[pid]
if (len(terms)-2) % 4 == 0:
error_num = int((len(terms)-2) / 4)
for i in range(error_num):
loc, typ, wrong, correct = terms[i*4+1: (i+1)*4+1]
loc = int(loc)
cor_text = text[:loc] + correct + text[loc+len(wrong):]
error_set.add((pid, loc, wrong, cor_text))
det_set.add((pid, loc, wrong))
cor_set.add((pid, cor_text))
else:
raise Exception('check your data format: {}'.format(line))
assert len(error_set) == len(det_set) == len(cor_set)
return error_set, det_set, cor_set
def cal_f1(ref_num, pred_num, right_num):
precision = float(right_num) / pred_num
recall = float(right_num) / ref_num
if precision + recall < 1e-6:
return 0.0
f1 = 2 * precision * recall / (precision + recall)
return f1 * 100
def evaluate(input_file, ref_file, pred_file):
pid_to_text = read_input_file(input_file)
ref_error_set, ref_det_set, ref_cor_set = read_label_file(pid_to_text, ref_file)
pred_error_set, pred_det_set, pred_cor_set = read_label_file(pid_to_text, pred_file)
ref_num = len(ref_cor_set)
pred_num = len(pred_cor_set)
det_right_num = 0
for error in ref_error_set:
pid, loc, wrong, cor_text = error
if (pid, loc, wrong) in pred_det_set or (pid, cor_text) in pred_cor_set:
det_right_num += 1
detect_f1 = cal_f1(ref_num, pred_num, det_right_num)
cor_right_num = len(ref_cor_set & pred_cor_set)
correct_f1 = cal_f1(ref_num, pred_num, cor_right_num)
final_score = 0.8 * detect_f1 + 0.2 * correct_f1
print("detect_f1: {}".format(detect_f1))
print("correct_f1: {}".format(correct_f1))
print("final_score: {}".format(final_score))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file',
help='Path to the input file',
required=True)
parser.add_argument('-r', '--ref_file',
help='Path to the reference label file',
required=True)
parser.add_argument('-p', '--pred_file',
help='Path to the predict label file',
required=True)
args = parser.parse_args()
try:
evaluate(args.input_file, args.ref_file, args.pred_file)
except:
traceback.print_exc()
| 3,294 | 33.684211 | 88 | py |
CTC2021 | CTC2021-main/ctc_gector/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
if item not in vocab:
print("warning: %s not in vocab" % item)
item = "[UNK]"
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 10,619 | 29.25641 | 80 | py |
CTC2021 | CTC2021-main/ctc_gector/segment.py | # coding:utf-8
import sys
import tokenization
from tqdm import tqdm
tokenizer = tokenization.FullTokenizer(vocab_file="vocab.txt", do_lower_case=True)
for line in tqdm(sys.stdin):
line = line.strip()
items = line.split('\t')
line = tokenization.convert_to_unicode(items[1])
if not line:
print()
continue
tokens = tokenizer.tokenize(line)
print(' '.join(tokens))
| 410 | 16.869565 | 82 | py |
CTC2021 | CTC2021-main/ctc_gector/predict.py | import argparse
from utils.helpers import read_lines
from gector.gec_model import GecBERTModel
def predict_for_file(input_file, output_file, model, batch_size=32):
test_data = read_lines(input_file)
predictions = []
cnt_corrections = 0
batch = []
for sent in test_data:
batch.append(sent.split())
if len(batch) == batch_size:
preds, cnt = model.handle_batch(batch)
predictions.extend(preds)
cnt_corrections += cnt
batch = []
if batch:
preds, cnt = model.handle_batch(batch)
predictions.extend(preds)
cnt_corrections += cnt
with open(output_file, 'w') as f:
f.write("\n".join([" ".join(x) for x in predictions]) + '\n')
return cnt_corrections
def main(args):
# get all paths
model = GecBERTModel(vocab_path=args.vocab_path,
model_paths=args.model_path,
max_len=args.max_len, min_len=args.min_len,
iterations=args.iteration_count,
min_error_probability=args.min_error_probability,
min_probability=args.min_error_probability,
lowercase_tokens=args.lowercase_tokens,
model_name=args.transformer_model,
special_tokens_fix=args.special_tokens_fix,
log=False,
confidence=args.additional_confidence,
is_ensemble=args.is_ensemble,
weigths=args.weights)
cnt_corrections = predict_for_file(args.input_file, args.output_file, model,
batch_size=args.batch_size)
# evaluate with m2 or ERRANT
print(f"Produced overall corrections: {cnt_corrections}")
if __name__ == '__main__':
# read parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_path',
help='Path to the model file.', nargs='+',
required=True)
parser.add_argument('--vocab_path',
help='Path to the model file.',
default='data/output_vocabulary' # to use pretrained models
)
parser.add_argument('--input_file',
help='Path to the evalset file',
required=True)
parser.add_argument('--output_file',
help='Path to the output file',
required=True)
parser.add_argument('--max_len',
type=int,
help='The max sentence length'
'(all longer will be truncated)',
default=50)
parser.add_argument('--min_len',
type=int,
help='The minimum sentence length'
'(all longer will be returned w/o changes)',
default=3)
parser.add_argument('--batch_size',
type=int,
help='The size of hidden unit cell.',
default=128)
parser.add_argument('--lowercase_tokens',
type=int,
help='Whether to lowercase tokens.',
default=0)
parser.add_argument('--transformer_model',
#choices=['bert', 'gpt2', 'transformerxl', 'xlnet', 'distilbert', 'roberta', 'albert'],
help='Name of the transformer model.',
default='roberta')
parser.add_argument('--iteration_count',
type=int,
help='The number of iterations of the model.',
default=5)
parser.add_argument('--additional_confidence',
type=float,
help='How many probability to add to $KEEP token.',
default=0)
parser.add_argument('--min_probability',
type=float,
default=0.0)
parser.add_argument('--min_error_probability',
type=float,
default=0.0)
parser.add_argument('--special_tokens_fix',
type=int,
help='Whether to fix problem with [CLS], [SEP] tokens tokenization. '
'For reproducing reported results it should be 0 for BERT/XLNet and 1 for RoBERTa.',
default=1)
parser.add_argument('--is_ensemble',
type=int,
help='Whether to do ensembling.',
default=0)
parser.add_argument('--weights',
help='Used to calculate weighted average', nargs='+',
default=None)
args = parser.parse_args()
main(args)
| 4,940 | 41.230769 | 113 | py |
CTC2021 | CTC2021-main/ctc_gector/train.py | import argparse
import os
from random import seed
import torch
from allennlp.data.iterators import BucketIterator
from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from gector.bert_token_embedder import PretrainedBertEmbedder
from gector.datareader import Seq2LabelsDatasetReader
from gector.seq2labels_model import Seq2Labels
from gector.trainer import Trainer
from gector.wordpiece_indexer import PretrainedBertIndexer
from utils.helpers import get_weights_name
def fix_seed():
torch.manual_seed(1)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed(43)
def get_token_indexers(model_name, max_pieces_per_token=5, lowercase_tokens=True, special_tokens_fix=0, is_test=False):
bert_token_indexer = PretrainedBertIndexer(
pretrained_model=model_name,
max_pieces_per_token=max_pieces_per_token,
do_lowercase=lowercase_tokens,
use_starting_offsets=True,
special_tokens_fix=special_tokens_fix,
is_test=is_test
)
return {'bert': bert_token_indexer}
def get_token_embedders(model_name, tune_bert=False, special_tokens_fix=0):
take_grads = True if tune_bert > 0 else False
bert_token_emb = PretrainedBertEmbedder(
pretrained_model=model_name,
top_layer_only=True, requires_grad=take_grads,
special_tokens_fix=special_tokens_fix)
token_embedders = {'bert': bert_token_emb}
embedder_to_indexer_map = {"bert": ["bert", "bert-offsets"]}
text_filed_emd = BasicTextFieldEmbedder(token_embedders=token_embedders,
embedder_to_indexer_map=embedder_to_indexer_map,
allow_unmatched_keys=True)
return text_filed_emd
def get_data_reader(model_name, max_len, skip_correct=False, skip_complex=0,
test_mode=False, tag_strategy="keep_one",
broken_dot_strategy="keep", lowercase_tokens=True,
max_pieces_per_token=3, tn_prob=0, tp_prob=1, special_tokens_fix=0,):
token_indexers = get_token_indexers(model_name,
max_pieces_per_token=max_pieces_per_token,
lowercase_tokens=lowercase_tokens,
special_tokens_fix=special_tokens_fix,
is_test=test_mode)
reader = Seq2LabelsDatasetReader(token_indexers=token_indexers,
max_len=max_len,
skip_correct=skip_correct,
skip_complex=skip_complex,
test_mode=test_mode,
tag_strategy=tag_strategy,
broken_dot_strategy=broken_dot_strategy,
lazy=True,
tn_prob=tn_prob,
tp_prob=tp_prob)
return reader
def get_model(model_name, vocab, tune_bert=False,
predictor_dropout=0,
label_smoothing=0.0,
confidence=0,
special_tokens_fix=0):
token_embs = get_token_embedders(model_name, tune_bert=tune_bert, special_tokens_fix=special_tokens_fix)
model = Seq2Labels(vocab=vocab,
text_field_embedder=token_embs,
predictor_dropout=predictor_dropout,
label_smoothing=label_smoothing,
confidence=confidence)
return model
def main(args):
fix_seed()
print(args)
if not os.path.exists(args.model_dir):
os.mkdir(args.model_dir)
#if '/' in args.transformer_model:
weights_name = args.transformer_model
#else:
# weights_name = get_weights_name(args.transformer_model, args.lowercase_tokens)
# read datasets
reader = get_data_reader(weights_name, args.max_len, skip_correct=bool(args.skip_correct),
skip_complex=args.skip_complex,
test_mode=False,
tag_strategy=args.tag_strategy,
lowercase_tokens=args.lowercase_tokens,
max_pieces_per_token=args.pieces_per_token,
tn_prob=args.tn_prob,
tp_prob=args.tp_prob,
special_tokens_fix=args.special_tokens_fix)
train_data = reader.read(args.train_set)
dev_data = list(reader.read(args.dev_set))
print('train_data', train_data)
print('dev_data len', len(dev_data))
default_tokens = [DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN]
namespaces = ['labels', 'd_tags']
tokens_to_add = {x: default_tokens for x in namespaces}
# build vocab
if args.vocab_path:
vocab = Vocabulary.from_files(args.vocab_path)
else:
vocab = Vocabulary.from_instances(train_data,
max_vocab_size={'tokens': 30000,
'labels': args.target_vocab_size,
'd_tags': 2},
tokens_to_add=tokens_to_add)
vocab.save_to_files(os.path.join(args.model_dir, 'vocabulary'))
print("Data is loaded")
model = get_model(weights_name, vocab,
tune_bert=args.tune_bert,
predictor_dropout=args.predictor_dropout,
label_smoothing=args.label_smoothing,
special_tokens_fix=args.special_tokens_fix)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
cuda_device = list(range(torch.cuda.device_count()))
else:
cuda_device = 0
else:
cuda_device = -1
if args.pretrain:
model.load_state_dict(torch.load(os.path.join(args.pretrain_folder, args.pretrain + '.th')))
model = model.to(device)
print("Model is set")
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.1, patience=10)
instances_per_epoch = None if not args.updates_per_epoch else \
int(args.updates_per_epoch * args.batch_size * args.accumulation_size)
iterator = BucketIterator(batch_size=args.batch_size,
sorting_keys=[("tokens", "num_tokens")],
biggest_batch_first=True,
max_instances_in_memory=args.batch_size * 5000,
#max_instances_in_memory=instances_per_epoch,
instances_per_epoch=instances_per_epoch,
)
val_iterator = BucketIterator(batch_size=args.batch_size,
sorting_keys=[("tokens", "num_tokens")],
biggest_batch_first=True,
max_instances_in_memory=args.batch_size * 1000,
#max_instances_in_memory=instances_per_epoch,
instances_per_epoch=len(dev_data)/2,
)
iterator.index_with(vocab)
val_iterator.index_with(vocab) #FIXME
print('dev_data_len 2', len(dev_data))
trainer = Trainer(model=model,
optimizer=optimizer,
scheduler=scheduler,
iterator=iterator,
train_dataset=train_data,
validation_dataset=dev_data,
validation_iterator=val_iterator,
serialization_dir=args.model_dir,
patience=args.patience,
num_epochs=args.n_epoch,
cuda_device=cuda_device,
shuffle=False,
accumulated_batch_count=args.accumulation_size,
cold_step_count=args.cold_steps_count,
cold_lr=args.cold_lr,
cuda_verbose_step=int(args.cuda_verbose_steps)
if args.cuda_verbose_steps else None
)
print("Start training")
trainer.train()
# Here's how to save the model.
out_model = os.path.join(args.model_dir, 'model.th')
with open(out_model, 'wb') as f:
torch.save(model.state_dict(), f)
print("Model is dumped")
if __name__ == '__main__':
# read parameters
parser = argparse.ArgumentParser()
parser.add_argument('--train_set',
help='Path to the train data', required=True)
parser.add_argument('--dev_set',
help='Path to the dev data', required=True)
parser.add_argument('--model_dir',
help='Path to the model dir', required=True)
parser.add_argument('--vocab_path',
help='Path to the model vocabulary directory.'
'If not set then build vocab from data',
default='')
parser.add_argument('--batch_size',
type=int,
help='The size of the batch.',
default=32)
parser.add_argument('--max_len',
type=int,
help='The max sentence length'
'(all longer will be truncated)',
default=50)
parser.add_argument('--target_vocab_size',
type=int,
help='The size of target vocabularies.',
default=1000)
parser.add_argument('--n_epoch',
type=int,
help='The number of epoch for training model.',
default=20)
parser.add_argument('--patience',
type=int,
help='The number of epoch with any improvements'
' on validation set.',
default=None)
parser.add_argument('--skip_correct',
type=int,
help='If set than correct sentences will be skipped '
'by data reader.',
default=1)
parser.add_argument('--skip_complex',
type=int,
help='If set than complex corrections will be skipped '
'by data reader.',
choices=[0, 1, 2, 3, 4, 5],
default=0)
parser.add_argument('--tune_bert',
type=int,
help='If more then 0 then fine tune bert.',
default=1)
parser.add_argument('--tag_strategy',
choices=['keep_one', 'merge_all'],
help='The type of the data reader behaviour.',
default='keep_one')
parser.add_argument('--accumulation_size',
type=int,
help='How many batches do you want accumulate.',
default=4)
parser.add_argument('--lr',
type=float,
help='Set initial learning rate.',
default=1e-5)
parser.add_argument('--cold_steps_count',
type=int,
help='Whether to train only classifier layers first.',
default=4)
parser.add_argument('--cold_lr',
type=float,
help='Learning rate during cold_steps.',
default=1e-3)
parser.add_argument('--predictor_dropout',
type=float,
help='The value of dropout for predictor.',
default=0.0)
parser.add_argument('--lowercase_tokens',
type=int,
help='Whether to lowercase tokens.',
default=0)
parser.add_argument('--pieces_per_token',
type=int,
help='The max number for pieces per token.',
default=5)
parser.add_argument('--cuda_verbose_steps',
help='Number of steps after which CUDA memory information is printed. '
'Makes sense for local testing. Usually about 1000.',
default=None)
parser.add_argument('--label_smoothing',
type=float,
help='The value of parameter alpha for label smoothing.',
default=0.0)
parser.add_argument('--tn_prob',
type=float,
help='The probability to take TN from data.',
default=0)
parser.add_argument('--tp_prob',
type=float,
help='The probability to take TP from data.',
default=1)
parser.add_argument('--updates_per_epoch',
type=int,
help='If set then each epoch will contain the exact amount of updates.',
default=0)
parser.add_argument('--pretrain_folder',
help='The name of the pretrain folder.')
parser.add_argument('--pretrain',
help='The name of the pretrain weights in pretrain_folder param.',
default='')
parser.add_argument('--transformer_model',
#choices=['bert', 'distilbert', 'gpt2', 'roberta', 'transformerxl', 'xlnet', 'albert'],
help='Name of the transformer model.',
default='roberta')
parser.add_argument('--special_tokens_fix',
type=int,
help='Whether to fix problem with [CLS], [SEP] tokens tokenization.',
default=1)
args = parser.parse_args()
main(args)
| 14,433 | 43.687307 | 119 | py |
CTC2021 | CTC2021-main/ctc_gector/gector/datareader.py | """Tweaked AllenNLP dataset reader."""
import logging
import re
from random import random
from typing import Dict, List
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, SequenceLabelField, MetadataField, Field
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from overrides import overrides
from utils.helpers import SEQ_DELIMETERS, START_TOKEN
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("seq2labels_datareader")
class Seq2LabelsDatasetReader(DatasetReader):
"""
Reads instances from a pretokenised file where each line is in the following format:
WORD###TAG [TAB] WORD###TAG [TAB] ..... \n
and converts it into a ``Dataset`` suitable for sequence tagging. You can also specify
alternative delimiters in the constructor.
Parameters
----------
delimiters: ``dict``
The dcitionary with all delimeters.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
max_len: if set than will truncate long sentences
"""
# fix broken sentences mostly in Lang8
BROKEN_SENTENCES_REGEXP = re.compile(r'\.[a-zA-RT-Z]')
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
delimeters: dict = SEQ_DELIMETERS,
skip_correct: bool = False,
skip_complex: int = 0,
lazy: bool = False,
max_len: int = None,
test_mode: bool = False,
tag_strategy: str = "keep_one",
tn_prob: float = 0,
tp_prob: float = 0,
broken_dot_strategy: str = "keep") -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self._delimeters = delimeters
self._max_len = max_len
self._skip_correct = skip_correct
self._skip_complex = skip_complex
self._tag_strategy = tag_strategy
self._broken_dot_strategy = broken_dot_strategy
self._test_mode = test_mode
self._tn_prob = tn_prob
self._tp_prob = tp_prob
@overrides
def _read(self, file_path):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
line = line.strip("\n")
# skip blank and broken lines
if not line or (not self._test_mode and self._broken_dot_strategy == 'skip'
and self.BROKEN_SENTENCES_REGEXP.search(line) is not None):
continue
tokens_and_tags = [pair.rsplit(self._delimeters['labels'], 1)
for pair in line.split(self._delimeters['tokens'])]
try:
tokens = [Token(token) for token, tag in tokens_and_tags]
tags = [tag for token, tag in tokens_and_tags]
except ValueError:
tokens = [Token(token[0]) for token in tokens_and_tags]
tags = None
if tokens and tokens[0] != Token(START_TOKEN):
tokens = [Token(START_TOKEN)] + tokens
words = [x.text for x in tokens]
if self._max_len is not None:
tokens = tokens[:self._max_len]
tags = None if tags is None else tags[:self._max_len]
instance = self.text_to_instance(tokens, tags, words)
if instance:
yield instance
def extract_tags(self, tags: List[str]):
op_del = self._delimeters['operations']
labels = [x.split(op_del) for x in tags]
comlex_flag_dict = {}
# get flags
for i in range(5):
idx = i + 1
comlex_flag_dict[idx] = sum([len(x) > idx for x in labels])
if self._tag_strategy == "keep_one":
# get only first candidates for r_tags in right and the last for left
labels = [x[0] for x in labels]
elif self._tag_strategy == "merge_all":
# consider phrases as a words
pass
else:
raise Exception("Incorrect tag strategy")
detect_tags = ["CORRECT" if label == "$KEEP" else "INCORRECT" for label in labels]
return labels, detect_tags, comlex_flag_dict
def text_to_instance(self, tokens: List[Token], tags: List[str] = None,
words: List[str] = None) -> Instance: # type: ignore
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
sequence = TextField(tokens, self._token_indexers)
fields["tokens"] = sequence
fields["metadata"] = MetadataField({"words": words})
if tags is not None:
labels, detect_tags, complex_flag_dict = self.extract_tags(tags)
if self._skip_complex and complex_flag_dict[self._skip_complex] > 0:
return None
rnd = random()
# skip TN
if self._skip_correct and all(x == "CORRECT" for x in detect_tags):
if rnd > self._tn_prob:
return None
# skip TP
else:
if rnd > self._tp_prob:
return None
fields["labels"] = SequenceLabelField(labels, sequence,
label_namespace="labels")
fields["d_tags"] = SequenceLabelField(detect_tags, sequence,
label_namespace="d_tags")
return Instance(fields)
| 6,373 | 40.934211 | 107 | py |
CTC2021 | CTC2021-main/ctc_gector/gector/seq2labels_model.py | """Basic model. Predicts tags for every token"""
from typing import Dict, Optional, List, Any
import numpy
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TimeDistributed, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.training.metrics import CategoricalAccuracy
from overrides import overrides
from torch.nn.modules.linear import Linear
@Model.register("seq2labels")
class Seq2Labels(Model):
"""
This ``Seq2Labels`` simply encodes a sequence of text with a stacked ``Seq2SeqEncoder``, then
predicts a tag (or couple tags) for each token in the sequence.
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
calculate_span_f1 : ``bool``, optional (default=``None``)
Calculate span-level F1 metrics during training. If this is ``True``, then
``label_encoding`` is required. If ``None`` and
label_encoding is specified, this is set to ``True``.
If ``None`` and label_encoding is not specified, it defaults
to ``False``.
label_encoding : ``str``, optional (default=``None``)
Label encoding to use when calculating span f1.
Valid options are "BIO", "BIOUL", "IOB1", "BMES".
Required if ``calculate_span_f1`` is true.
label_namespace : ``str``, optional (default=``labels``)
This is needed to compute the SpanBasedF1Measure metric, if desired.
Unless you did something unusual, the default value should be what you want.
verbose_metrics : ``bool``, optional (default = False)
If true, metrics will be returned per label class in addition
to the overall statistics.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
predictor_dropout=0.0,
labels_namespace: str = "labels",
detect_namespace: str = "d_tags",
verbose_metrics: bool = False,
label_smoothing: float = 0.0,
confidence: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(Seq2Labels, self).__init__(vocab, regularizer)
self.label_namespaces = [labels_namespace,
detect_namespace]
self.text_field_embedder = text_field_embedder
self.num_labels_classes = self.vocab.get_vocab_size(labels_namespace)
self.num_detect_classes = self.vocab.get_vocab_size(detect_namespace)
self.label_smoothing = label_smoothing
self.confidence = confidence
self.incorr_index = self.vocab.get_token_index("INCORRECT",
namespace=detect_namespace)
self._verbose_metrics = verbose_metrics
self.predictor_dropout = TimeDistributed(torch.nn.Dropout(predictor_dropout))
self.tag_labels_projection_layer = TimeDistributed(
Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_labels_classes))
self.tag_detect_projection_layer = TimeDistributed(
Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_detect_classes))
self.metrics = {"accuracy": CategoricalAccuracy()}
initializer(self)
@overrides
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
labels: torch.LongTensor = None,
d_tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
tokens : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
lables : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels of shape
``(batch_size, num_tokens)``.
d_tags : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels of shape
``(batch_size, num_tokens)``.
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
metadata containing the original words in the sentence to be tagged under a 'words' key.
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
unnormalised log probabilities of the tag classes.
class_probabilities : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
a distribution of the tag classes per word.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
encoded_text = self.text_field_embedder(tokens)
batch_size, sequence_length, _ = encoded_text.size()
mask = get_text_field_mask(tokens)
logits_labels = self.tag_labels_projection_layer(self.predictor_dropout(encoded_text))
logits_d = self.tag_detect_projection_layer(encoded_text)
class_probabilities_labels = F.softmax(logits_labels, dim=-1).view(
[batch_size, sequence_length, self.num_labels_classes])
class_probabilities_d = F.softmax(logits_d, dim=-1).view(
[batch_size, sequence_length, self.num_detect_classes])
error_probs = class_probabilities_d[:, :, self.incorr_index] * mask
incorr_prob = torch.max(error_probs, dim=-1)[0]
#if self.confidence > 0:
# FIXME
probability_change = [self.confidence] + [0] * (self.num_labels_classes - 1)
class_probabilities_labels += torch.cuda.FloatTensor(probability_change).repeat(
(batch_size, sequence_length, 1))
output_dict = {"logits_labels": logits_labels,
"logits_d_tags": logits_d,
"class_probabilities_labels": class_probabilities_labels,
"class_probabilities_d_tags": class_probabilities_d,
"max_error_probability": incorr_prob}
if labels is not None and d_tags is not None:
loss_labels = sequence_cross_entropy_with_logits(logits_labels, labels, mask,
label_smoothing=self.label_smoothing)
loss_d = sequence_cross_entropy_with_logits(logits_d, d_tags, mask)
for metric in self.metrics.values():
metric(logits_labels, labels, mask.float())
metric(logits_d, d_tags, mask.float())
output_dict["loss"] = loss_labels + loss_d
if metadata is not None:
output_dict["words"] = [x["words"] for x in metadata]
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does a simple position-wise argmax over each token, converts indices to string labels, and
adds a ``"tags"`` key to the dictionary with the result.
"""
for label_namespace in self.label_namespaces:
all_predictions = output_dict[f'class_probabilities_{label_namespace}']
all_predictions = all_predictions.cpu().data.numpy()
if all_predictions.ndim == 3:
predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]
else:
predictions_list = [all_predictions]
all_tags = []
for predictions in predictions_list:
argmax_indices = numpy.argmax(predictions, axis=-1)
tags = [self.vocab.get_token_from_index(x, namespace=label_namespace)
for x in argmax_indices]
all_tags.append(tags)
output_dict[f'{label_namespace}'] = all_tags
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics_to_return = {metric_name: metric.get_metric(reset) for
metric_name, metric in self.metrics.items()}
return metrics_to_return
| 9,880 | 49.671795 | 107 | py |
CTC2021 | CTC2021-main/ctc_gector/gector/wordpiece_indexer.py | """Tweaked version of corresponding AllenNLP file"""
import logging
from collections import defaultdict
from typing import Dict, List, Callable
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.tokenizers.token import Token
from allennlp.data.vocabulary import Vocabulary
from overrides import overrides
from transformers import AutoTokenizer, BertTokenizer
from utils.helpers import START_TOKEN
logger = logging.getLogger(__name__)
# TODO(joelgrus): Figure out how to generate token_type_ids out of this token indexer.
# This is the default list of tokens that should not be lowercased.
_NEVER_LOWERCASE = ['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]']
class WordpieceIndexer(TokenIndexer[int]):
"""
A token indexer that does the wordpiece-tokenization (e.g. for BERT embeddings).
If you are using one of the pretrained BERT models, you'll want to use the ``PretrainedBertIndexer``
subclass rather than this base class.
Parameters
----------
vocab : ``Dict[str, int]``
The mapping {wordpiece -> id}. Note this is not an AllenNLP ``Vocabulary``.
wordpiece_tokenizer : ``Callable[[str], List[str]]``
A function that does the actual tokenization.
namespace : str, optional (default: "wordpiece")
The namespace in the AllenNLP ``Vocabulary`` into which the wordpieces
will be loaded.
use_starting_offsets : bool, optional (default: False)
By default, the "offsets" created by the token indexer correspond to the
last wordpiece in each word. If ``use_starting_offsets`` is specified,
they will instead correspond to the first wordpiece in each word.
max_pieces : int, optional (default: 512)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Any inputs longer than this will
either be truncated (default), or be split apart and batched using a
sliding window.
do_lowercase : ``bool``, optional (default=``False``)
Should we lowercase the provided tokens before getting the indices?
You would need to do this if you are using an -uncased BERT model
but your DatasetReader is not lowercasing tokens (which might be the
case if you're also using other embeddings based on cased tokens).
never_lowercase: ``List[str]``, optional
Tokens that should never be lowercased. Default is
['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]'].
start_tokens : ``List[str]``, optional (default=``None``)
These are prepended to the tokens provided to ``tokens_to_indices``.
end_tokens : ``List[str]``, optional (default=``None``)
These are appended to the tokens provided to ``tokens_to_indices``.
separator_token : ``str``, optional (default=``[SEP]``)
This token indicates the segments in the sequence.
truncate_long_sequences : ``bool``, optional (default=``True``)
By default, long sequences will be truncated to the maximum sequence
length. Otherwise, they will be split apart and batched using a
sliding window.
token_min_padding_length : ``int``, optional (default=``0``)
See :class:`TokenIndexer`.
"""
def __init__(self,
vocab: Dict[str, int],
bpe_ranks: Dict,
byte_encoder: Dict,
wordpiece_tokenizer: Callable[[str], List[str]],
namespace: str = "wordpiece",
use_starting_offsets: bool = False,
max_pieces: int = 512,
max_pieces_per_token: int = 3,
is_test=False,
do_lowercase: bool = False,
never_lowercase: List[str] = None,
start_tokens: List[str] = None,
end_tokens: List[str] = None,
truncate_long_sequences: bool = True,
token_min_padding_length: int = 0) -> None:
super().__init__(token_min_padding_length)
self.vocab = vocab
# The BERT code itself does a two-step tokenization:
# sentence -> [words], and then word -> [wordpieces]
# In AllenNLP, the first step is implemented as the ``BertBasicWordSplitter``,
# and this token indexer handles the second.
self.wordpiece_tokenizer = wordpiece_tokenizer
self.max_pieces_per_token = max_pieces_per_token
self._namespace = namespace
self._added_to_vocabulary = False
self.max_pieces = max_pieces
self.use_starting_offsets = use_starting_offsets
self._do_lowercase = do_lowercase
self._truncate_long_sequences = truncate_long_sequences
self.max_pieces_per_sentence = 80
self.is_test = is_test
self.cache = {}
self.bpe_ranks = bpe_ranks
self.byte_encoder = byte_encoder
if self.is_test:
self.max_pieces_per_token = None
if never_lowercase is None:
# Use the defaults
self._never_lowercase = set(_NEVER_LOWERCASE)
else:
self._never_lowercase = set(never_lowercase)
# Convert the start_tokens and end_tokens to wordpiece_ids
self._start_piece_ids = [vocab[wordpiece]
for token in (start_tokens or [])
for wordpiece in wordpiece_tokenizer(token)]
self._end_piece_ids = [vocab[wordpiece]
for token in (end_tokens or [])
for wordpiece in wordpiece_tokenizer(token)]
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
# If we only use pretrained models, we don't need to do anything here.
pass
def _add_encoding_to_vocabulary(self, vocabulary: Vocabulary) -> None:
# pylint: disable=protected-access
for word, idx in self.vocab.items():
vocabulary._token_to_index[self._namespace][word] = idx
vocabulary._index_to_token[self._namespace][idx] = word
def get_pairs(self, word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = self.get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair,
float(
'inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def bpe_tokenize(self, text):
""" Tokenize a string."""
bpe_tokens = []
for token in text.split():
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
@overrides
def tokens_to_indices(self,
tokens: List[Token],
vocabulary: Vocabulary,
index_name: str) -> Dict[str, List[int]]:
if not self._added_to_vocabulary:
self._add_encoding_to_vocabulary(vocabulary)
self._added_to_vocabulary = True
# This lowercases tokens if necessary
text = (token.text.lower()
if self._do_lowercase and token.text not in self._never_lowercase
else token.text
for token in tokens)
# Obtain a nested sequence of wordpieces, each represented by a list of wordpiece ids
token_wordpiece_ids = []
for token in text:
if self.bpe_ranks != {}:
wps = self.bpe_tokenize(token)
else:
wps = self.wordpiece_tokenizer(token)
limited_wps = [self.vocab[wordpiece] for wordpiece in wps][:self.max_pieces_per_token]
token_wordpiece_ids.append(limited_wps)
# Flattened list of wordpieces. In the end, the output of the model (e.g., BERT) should
# have a sequence length equal to the length of this list. However, it will first be split into
# chunks of length `self.max_pieces` so that they can be fit through the model. After packing
# and passing through the model, it should be unpacked to represent the wordpieces in this list.
flat_wordpiece_ids = [wordpiece for token in token_wordpiece_ids for wordpiece in token]
# reduce max_pieces_per_token if piece length of sentence is bigger than max_pieces_per_sentence
# helps to fix CUDA out of memory errors meanwhile increasing batch size
while not self.is_test and len(flat_wordpiece_ids) > \
self.max_pieces_per_sentence - len(self._start_piece_ids) - len(self._end_piece_ids):
max_pieces = max([len(row) for row in token_wordpiece_ids])
token_wordpiece_ids = [row[:max_pieces - 1] for row in token_wordpiece_ids]
flat_wordpiece_ids = [wordpiece for token in token_wordpiece_ids for wordpiece in token]
# The code below will (possibly) pack the wordpiece sequence into multiple sub-sequences by using a sliding
# window `window_length` that overlaps with previous windows according to the `stride`. Suppose we have
# the following sentence: "I went to the store to buy some milk". Then a sliding window of length 4 and
# stride of length 2 will split them up into:
# "[I went to the] [to the store to] [store to buy some] [buy some milk [PAD]]".
# This is to ensure that the model has context of as much of the sentence as possible to get accurate
# embeddings. Finally, the sequences will be padded with any start/end piece ids, e.g.,
# "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ...".
# The embedder should then be able to split this token sequence by the window length,
# pass them through the model, and recombine them.
# Specify the stride to be half of `self.max_pieces`, minus any additional start/end wordpieces
window_length = self.max_pieces - len(self._start_piece_ids) - len(self._end_piece_ids)
stride = window_length // 2
# offsets[i] will give us the index into wordpiece_ids
# for the wordpiece "corresponding to" the i-th input token.
offsets = []
# If we're using initial offsets, we want to start at offset = len(text_tokens)
# so that the first offset is the index of the first wordpiece of tokens[0].
# Otherwise, we want to start at len(text_tokens) - 1, so that the "previous"
# offset is the last wordpiece of "tokens[-1]".
offset = len(self._start_piece_ids) if self.use_starting_offsets else len(self._start_piece_ids) - 1
for token in token_wordpiece_ids:
# Truncate the sequence if specified, which depends on where the offsets are
next_offset = 1 if self.use_starting_offsets else 0
if self._truncate_long_sequences and offset >= window_length + next_offset:
break
# For initial offsets, the current value of ``offset`` is the start of
# the current wordpiece, so add it to ``offsets`` and then increment it.
if self.use_starting_offsets:
offsets.append(offset)
offset += len(token)
# For final offsets, the current value of ``offset`` is the end of
# the previous wordpiece, so increment it and then add it to ``offsets``.
else:
offset += len(token)
offsets.append(offset)
if len(flat_wordpiece_ids) <= window_length:
# If all the wordpieces fit, then we don't need to do anything special
wordpiece_windows = [self._add_start_and_end(flat_wordpiece_ids)]
elif self._truncate_long_sequences:
logger.warning("Too many wordpieces, truncating sequence. If you would like a sliding window, set"
"`truncate_long_sequences` to False %s", str([token.text for token in tokens]))
wordpiece_windows = [self._add_start_and_end(flat_wordpiece_ids[:window_length])]
else:
# Create a sliding window of wordpieces of length `max_pieces` that advances by `stride` steps and
# add start/end wordpieces to each window
# TODO: this currently does not respect word boundaries, so words may be cut in half between windows
# However, this would increase complexity, as sequences would need to be padded/unpadded in the middle
wordpiece_windows = [self._add_start_and_end(flat_wordpiece_ids[i:i + window_length])
for i in range(0, len(flat_wordpiece_ids), stride)]
# Check for overlap in the last window. Throw it away if it is redundant.
last_window = wordpiece_windows[-1][1:]
penultimate_window = wordpiece_windows[-2]
if last_window == penultimate_window[-len(last_window):]:
wordpiece_windows = wordpiece_windows[:-1]
# Flatten the wordpiece windows
wordpiece_ids = [wordpiece for sequence in wordpiece_windows for wordpiece in sequence]
# Our mask should correspond to the original tokens,
# because calling util.get_text_field_mask on the
# "wordpiece_id" tokens will produce the wrong shape.
# However, because of the max_pieces constraint, we may
# have truncated the wordpieces; accordingly, we want the mask
# to correspond to the remaining tokens after truncation, which
# is captured by the offsets.
mask = [1 for _ in offsets]
return {index_name: wordpiece_ids,
f"{index_name}-offsets": offsets,
"mask": mask}
def _add_start_and_end(self, wordpiece_ids: List[int]) -> List[int]:
return self._start_piece_ids + wordpiece_ids + self._end_piece_ids
def _extend(self, token_type_ids: List[int]) -> List[int]:
"""
Extend the token type ids by len(start_piece_ids) on the left
and len(end_piece_ids) on the right.
"""
first = token_type_ids[0]
last = token_type_ids[-1]
return ([first for _ in self._start_piece_ids] +
token_type_ids +
[last for _ in self._end_piece_ids])
@overrides
def get_padding_token(self) -> int:
return 0
@overrides
def get_padding_lengths(self, token: int) -> Dict[str, int]: # pylint: disable=unused-argument
return {}
@overrides
def pad_token_sequence(self,
tokens: Dict[str, List[int]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int]) -> Dict[str, List[int]]: # pylint: disable=unused-argument
return {key: pad_sequence_to_length(val, desired_num_tokens[key])
for key, val in tokens.items()}
@overrides
def get_keys(self, index_name: str) -> List[str]:
"""
We need to override this because the indexer generates multiple keys.
"""
# pylint: disable=no-self-use
return [index_name, f"{index_name}-offsets", f"{index_name}-type-ids", "mask"]
class PretrainedBertIndexer(WordpieceIndexer):
# pylint: disable=line-too-long
"""
A ``TokenIndexer`` corresponding to a pretrained BERT model.
Parameters
----------
pretrained_model: ``str``
Either the name of the pretrained model to use (e.g. 'bert-base-uncased'),
or the path to the .txt file with its vocabulary.
If the name is a key in the list of pretrained models at
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/tokenization.py#L33
the corresponding path will be used; otherwise it will be interpreted as a path or URL.
use_starting_offsets: bool, optional (default: False)
By default, the "offsets" created by the token indexer correspond to the
last wordpiece in each word. If ``use_starting_offsets`` is specified,
they will instead correspond to the first wordpiece in each word.
do_lowercase: ``bool``, optional (default = True)
Whether to lowercase the tokens before converting to wordpiece ids.
never_lowercase: ``List[str]``, optional
Tokens that should never be lowercased. Default is
['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]'].
max_pieces: int, optional (default: 512)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Any inputs longer than this will
either be truncated (default), or be split apart and batched using a
sliding window.
truncate_long_sequences : ``bool``, optional (default=``True``)
By default, long sequences will be truncated to the maximum sequence
length. Otherwise, they will be split apart and batched using a
sliding window.
"""
def __init__(self,
pretrained_model: str,
use_starting_offsets: bool = False,
do_lowercase: bool = True,
never_lowercase: List[str] = None,
max_pieces: int = 512,
max_pieces_per_token=5,
is_test=False,
truncate_long_sequences: bool = True,
special_tokens_fix: int = 0) -> None:
if pretrained_model.endswith("-cased") and do_lowercase:
logger.warning("Your BERT model appears to be cased, "
"but your indexer is lowercasing tokens.")
elif pretrained_model.endswith("-uncased") and not do_lowercase:
logger.warning("Your BERT model appears to be uncased, "
"but your indexer is not lowercasing tokens.")
bert_tokenizer = BertTokenizer.from_pretrained(
pretrained_model, do_lower_case=do_lowercase, do_basic_tokenize=True)
# to adjust all tokenizers
if hasattr(bert_tokenizer, 'encoder'):
bert_tokenizer.vocab = bert_tokenizer.encoder
if hasattr(bert_tokenizer, 'sp_model'):
bert_tokenizer.vocab = defaultdict(lambda: 1)
for i in range(bert_tokenizer.sp_model.get_piece_size()):
bert_tokenizer.vocab[bert_tokenizer.sp_model.id_to_piece(i)] = i
if special_tokens_fix:
bert_tokenizer.add_tokens([START_TOKEN])
bert_tokenizer.vocab[START_TOKEN] = len(bert_tokenizer) - 1
#if "roberta" in pretrained_model:
# bpe_ranks = bert_tokenizer.bpe_ranks
# byte_encoder = bert_tokenizer.byte_encoder
#else:
bpe_ranks = {}
byte_encoder = None
super().__init__(vocab=bert_tokenizer.vocab,
bpe_ranks=bpe_ranks,
byte_encoder=byte_encoder,
wordpiece_tokenizer=bert_tokenizer.tokenize,
namespace="bert",
use_starting_offsets=use_starting_offsets,
max_pieces=max_pieces,
max_pieces_per_token=max_pieces_per_token,
is_test=is_test,
do_lowercase=do_lowercase,
never_lowercase=never_lowercase,
start_tokens=["[CLS]"] if not special_tokens_fix else [],
end_tokens=["[SEP]"] if not special_tokens_fix else [],
truncate_long_sequences=truncate_long_sequences)
| 21,046 | 46.296629 | 119 | py |
CTC2021 | CTC2021-main/ctc_gector/gector/bert_token_embedder.py | """Tweaked version of corresponding AllenNLP file"""
import logging
from copy import deepcopy
from typing import Dict
import torch
import torch.nn.functional as F
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn import util
#from transformers import AutoModel, PreTrainedModel
from transformers import BertModel, PreTrainedModel
logger = logging.getLogger(__name__)
class PretrainedBertModel:
"""
In some instances you may want to load the same BERT model twice
(e.g. to use as a token embedder and also as a pooling layer).
This factory provides a cache so that you don't actually have to load the model twice.
"""
_cache: Dict[str, PreTrainedModel] = {}
@classmethod
def load(cls, model_name: str, cache_model: bool = True) -> PreTrainedModel:
if model_name in cls._cache:
return PretrainedBertModel._cache[model_name]
#model = AutoModel.from_pretrained(model_name)
model = BertModel.from_pretrained(model_name)
if cache_model:
cls._cache[model_name] = model
return model
class BertEmbedder(TokenEmbedder):
"""
A ``TokenEmbedder`` that produces BERT embeddings for your tokens.
Should be paired with a ``BertIndexer``, which produces wordpiece ids.
Most likely you probably want to use ``PretrainedBertEmbedder``
for one of the named pretrained models, not this base class.
Parameters
----------
bert_model: ``BertModel``
The BERT model being wrapped.
top_layer_only: ``bool``, optional (default = ``False``)
If ``True``, then only return the top layer instead of apply the scalar mix.
max_pieces : int, optional (default: 512)
The BERT embedder uses positional embeddings and so has a corresponding
maximum length for its input ids. Assuming the inputs are windowed
and padded appropriately by this length, the embedder will split them into a
large batch, feed them into BERT, and recombine the output as if it was a
longer sequence.
num_start_tokens : int, optional (default: 1)
The number of starting special tokens input to BERT (usually 1, i.e., [CLS])
num_end_tokens : int, optional (default: 1)
The number of ending tokens input to BERT (usually 1, i.e., [SEP])
scalar_mix_parameters: ``List[float]``, optional, (default = None)
If not ``None``, use these scalar mix parameters to weight the representations
produced by different layers. These mixing weights are not updated during
training.
"""
def __init__(
self,
bert_model: PreTrainedModel,
top_layer_only: bool = False,
max_pieces: int = 512,
num_start_tokens: int = 1,
num_end_tokens: int = 1
) -> None:
super().__init__()
# self.bert_model = bert_model
self.bert_model = deepcopy(bert_model)
self.output_dim = bert_model.config.hidden_size
self.max_pieces = max_pieces
self.num_start_tokens = num_start_tokens
self.num_end_tokens = num_end_tokens
self._scalar_mix = None
def set_weights(self, freeze):
for param in self.bert_model.parameters():
param.requires_grad = not freeze
return
def get_output_dim(self) -> int:
return self.output_dim
def forward(
self,
input_ids: torch.LongTensor,
offsets: torch.LongTensor = None
) -> torch.Tensor:
"""
Parameters
----------
input_ids : ``torch.LongTensor``
The (batch_size, ..., max_sequence_length) tensor of wordpiece ids.
offsets : ``torch.LongTensor``, optional
The BERT embeddings are one per wordpiece. However it's possible/likely
you might want one per original token. In that case, ``offsets``
represents the indices of the desired wordpiece for each original token.
Depending on how your token indexer is configured, this could be the
position of the last wordpiece for each token, or it could be the position
of the first wordpiece for each token.
For example, if you had the sentence "Definitely not", and if the corresponding
wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids
would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4].
If offsets are provided, the returned tensor will contain only the wordpiece
embeddings at those positions, and (in particular) will contain one embedding
per token. If offsets are not provided, the entire tensor of wordpiece embeddings
will be returned.
"""
batch_size, full_seq_len = input_ids.size(0), input_ids.size(-1)
initial_dims = list(input_ids.shape[:-1])
# The embedder may receive an input tensor that has a sequence length longer than can
# be fit. In that case, we should expect the wordpiece indexer to create padded windows
# of length `self.max_pieces` for us, and have them concatenated into one long sequence.
# E.g., "[CLS] I went to the [SEP] [CLS] to the store to [SEP] ..."
# We can then split the sequence into sub-sequences of that length, and concatenate them
# along the batch dimension so we effectively have one huge batch of partial sentences.
# This can then be fed into BERT without any sentence length issues. Keep in mind
# that the memory consumption can dramatically increase for large batches with extremely
# long sentences.
needs_split = full_seq_len > self.max_pieces
last_window_size = 0
if needs_split:
# Split the flattened list by the window size, `max_pieces`
split_input_ids = list(input_ids.split(self.max_pieces, dim=-1))
# We want all sequences to be the same length, so pad the last sequence
last_window_size = split_input_ids[-1].size(-1)
padding_amount = self.max_pieces - last_window_size
split_input_ids[-1] = F.pad(split_input_ids[-1], pad=[0, padding_amount], value=0)
# Now combine the sequences along the batch dimension
input_ids = torch.cat(split_input_ids, dim=0)
input_mask = (input_ids != 0).long()
# input_ids may have extra dimensions, so we reshape down to 2-d
# before calling the BERT model and then reshape back at the end.
all_encoder_layers = self.bert_model(
input_ids=util.combine_initial_dims(input_ids),
attention_mask=util.combine_initial_dims(input_mask),
)[0]
if len(all_encoder_layers[0].shape) == 3:
all_encoder_layers = torch.stack(all_encoder_layers)
elif len(all_encoder_layers[0].shape) == 2:
all_encoder_layers = torch.unsqueeze(all_encoder_layers, dim=0)
if needs_split:
# First, unpack the output embeddings into one long sequence again
unpacked_embeddings = torch.split(all_encoder_layers, batch_size, dim=1)
unpacked_embeddings = torch.cat(unpacked_embeddings, dim=2)
# Next, select indices of the sequence such that it will result in embeddings representing the original
# sentence. To capture maximal context, the indices will be the middle part of each embedded window
# sub-sequence (plus any leftover start and final edge windows), e.g.,
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# "[CLS] I went to the very fine [SEP] [CLS] the very fine store to eat [SEP]"
# with max_pieces = 8 should produce max context indices [2, 3, 4, 10, 11, 12] with additional start
# and final windows with indices [0, 1] and [14, 15] respectively.
# Find the stride as half the max pieces, ignoring the special start and end tokens
# Calculate an offset to extract the centermost embeddings of each window
stride = (self.max_pieces - self.num_start_tokens - self.num_end_tokens) // 2
stride_offset = stride // 2 + self.num_start_tokens
first_window = list(range(stride_offset))
max_context_windows = [
i
for i in range(full_seq_len)
if stride_offset - 1 < i % self.max_pieces < stride_offset + stride
]
# Lookback what's left, unless it's the whole self.max_pieces window
if full_seq_len % self.max_pieces == 0:
lookback = self.max_pieces
else:
lookback = full_seq_len % self.max_pieces
final_window_start = full_seq_len - lookback + stride_offset + stride
final_window = list(range(final_window_start, full_seq_len))
select_indices = first_window + max_context_windows + final_window
initial_dims.append(len(select_indices))
recombined_embeddings = unpacked_embeddings[:, :, select_indices]
else:
recombined_embeddings = all_encoder_layers
# Recombine the outputs of all layers
# (layers, batch_size * d1 * ... * dn, sequence_length, embedding_dim)
# recombined = torch.cat(combined, dim=2)
input_mask = (recombined_embeddings != 0).long()
if self._scalar_mix is not None:
mix = self._scalar_mix(recombined_embeddings, input_mask)
else:
mix = recombined_embeddings[-1]
# At this point, mix is (batch_size * d1 * ... * dn, sequence_length, embedding_dim)
if offsets is None:
# Resize to (batch_size, d1, ..., dn, sequence_length, embedding_dim)
dims = initial_dims if needs_split else input_ids.size()
return util.uncombine_initial_dims(mix, dims)
else:
# offsets is (batch_size, d1, ..., dn, orig_sequence_length)
offsets2d = util.combine_initial_dims(offsets)
# now offsets is (batch_size * d1 * ... * dn, orig_sequence_length)
range_vector = util.get_range_vector(
offsets2d.size(0), device=util.get_device_of(mix)
).unsqueeze(1)
# selected embeddings is also (batch_size * d1 * ... * dn, orig_sequence_length)
selected_embeddings = mix[range_vector, offsets2d]
return util.uncombine_initial_dims(selected_embeddings, offsets.size())
# @TokenEmbedder.register("bert-pretrained")
class PretrainedBertEmbedder(BertEmbedder):
"""
Parameters
----------
pretrained_model: ``str``
Either the name of the pretrained model to use (e.g. 'bert-base-uncased'),
or the path to the .tar.gz file with the model weights.
If the name is a key in the list of pretrained models at
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/modeling.py#L41
the corresponding path will be used; otherwise it will be interpreted as a path or URL.
requires_grad : ``bool``, optional (default = False)
If True, compute gradient of BERT parameters for fine tuning.
top_layer_only: ``bool``, optional (default = ``False``)
If ``True``, then only return the top layer instead of apply the scalar mix.
scalar_mix_parameters: ``List[float]``, optional, (default = None)
If not ``None``, use these scalar mix parameters to weight the representations
produced by different layers. These mixing weights are not updated during
training.
"""
def __init__(
self,
pretrained_model: str,
requires_grad: bool = False,
top_layer_only: bool = False,
special_tokens_fix: int = 0,
) -> None:
model = PretrainedBertModel.load(pretrained_model)
for param in model.parameters():
param.requires_grad = requires_grad
super().__init__(
bert_model=model,
top_layer_only=top_layer_only
)
if special_tokens_fix:
try:
vocab_size = self.bert_model.embeddings.word_embeddings.num_embeddings
except AttributeError:
# reserve more space
vocab_size = self.bert_model.word_embedding.num_embeddings + 5
self.bert_model.resize_token_embeddings(vocab_size + 1)
| 12,469 | 44.677656 | 115 | py |
CTC2021 | CTC2021-main/ctc_gector/gector/trainer.py | """Tweaked version of corresponding AllenNLP file"""
import datetime
import logging
import math
import os
import time
import traceback
from typing import Dict, Optional, List, Tuple, Union, Iterable, Any
import torch
import torch.optim.lr_scheduler
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError, parse_cuda_device
from allennlp.common.tqdm import Tqdm
from allennlp.common.util import dump_metrics, gpu_memory_mb, peak_memory_mb, lazy_groups_of
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator, TensorDict
from allennlp.models.model import Model
from allennlp.nn import util as nn_util
from allennlp.training import util as training_util
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.metric_tracker import MetricTracker
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.moving_average import MovingAverage
from allennlp.training.optimizers import Optimizer
from allennlp.training.tensorboard_writer import TensorboardWriter
from allennlp.training.trainer_base import TrainerBase
logger = logging.getLogger(__name__)
class Trainer(TrainerBase):
def __init__(
self,
model: Model,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
accumulated_batch_count: int = 1,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
checkpointer: Checkpointer = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
log_batch_size_period: Optional[int] = None,
moving_average: Optional[MovingAverage] = None,
cold_step_count: int = 0,
cold_lr: float = 1e-3,
cuda_verbose_step=None,
) -> None:
"""
A trainer for doing supervised learning. It just takes a labeled dataset
and a ``DataIterator``, and uses the supplied ``Optimizer`` to learn the weights
for your model over some fixed number of epochs. You can also pass in a validation
dataset and enable early stopping. There are many other bells and whistles as well.
Parameters
----------
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
If you are training your model using GPUs, your model should already be
on the correct device. (If you use `Trainer.from_params` this will be
handled for you.)
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
shuffle: ``bool``, optional (default=True)
Whether to shuffle the instances in the iterator or not.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
checkpointer : ``Checkpointer``, optional (default=None)
An instance of class Checkpointer to use instead of the default. If a checkpointer is specified,
the arguments num_serialized_models_to_keep and keep_serialized_model_every_num_seconds should
not be specified. The caller is responsible for initializing the checkpointer so that it is
consistent with serialization_dir.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``Union[int, List[int]]``, optional (default = -1)
An integer or list of integers specifying the CUDA device(s) to use. If -1, the CPU is used.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``LearningRateScheduler``, optional (default = None)
If specified, the learning rate will be decayed with respect to
this schedule at the end of each epoch (or batch, if the scheduler implements
the ``step_batch`` method). If you use :class:`torch.optim.lr_scheduler.ReduceLROnPlateau`,
this will use the ``validation_metric`` provided to determine if learning has plateaued.
To support updating the learning rate on every batch, this can optionally implement
``step_batch(batch_num_total)`` which updates the learning rate given the batch number.
momentum_scheduler : ``MomentumScheduler``, optional (default = None)
If specified, the momentum will be updated at the end of each batch or epoch
according to the schedule.
summary_interval: ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
should_log_parameter_statistics : ``bool``, optional, (default = True)
Whether to send parameter statistics (mean and standard deviation
of parameters and gradients) to tensorboard.
should_log_learning_rate : ``bool``, optional, (default = False)
Whether to send parameter specific learning rate to tensorboard.
log_batch_size_period : ``int``, optional, (default = ``None``)
If defined, how often to log the average batch size.
moving_average: ``MovingAverage``, optional, (default = None)
If provided, we will maintain moving averages for all parameters. During training, we
employ a shadow variable for each parameter, which maintains the moving average. During
evaluation, we backup the original parameters and assign the moving averages to corresponding
parameters. Be careful that when saving the checkpoint, we will save the moving averages of
parameters. This is necessary because we want the saved model to perform as well as the validated
model if we load it later. But this may cause problems if you restart the training from checkpoint.
"""
super().__init__(serialization_dir, cuda_device)
# I am not calling move_to_gpu here, because if the model is
# not already on the GPU then the optimizer is going to be wrong.
self.model = model
self.iterator = iterator
self._validation_iterator = validation_iterator
self.shuffle = shuffle
self.optimizer = optimizer
self.scheduler = scheduler
self.train_data = train_dataset
self._validation_data = validation_dataset
self.accumulated_batch_count = accumulated_batch_count
self.cold_step_count = cold_step_count
self.cold_lr = cold_lr
self.cuda_verbose_step = cuda_verbose_step
if patience is None: # no early stopping
if validation_dataset:
logger.warning(
"You provided a validation dataset but patience was set to None, "
"meaning that early stopping is disabled"
)
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError(
'{} is an invalid value for "patience": it must be a positive integer '
"or None (if you want to disable early stopping)".format(patience)
)
# For tracking is_best_so_far and should_stop_early
self._metric_tracker = MetricTracker(patience, validation_metric)
# Get rid of + or -
self._validation_metric = validation_metric[1:]
self._num_epochs = num_epochs
if checkpointer is not None:
# We can't easily check if these parameters were passed in, so check against their default values.
# We don't check against serialization_dir since it is also used by the parent class.
if num_serialized_models_to_keep != 20 \
or keep_serialized_model_every_num_seconds is not None:
raise ConfigurationError(
"When passing a custom Checkpointer, you may not also pass in separate checkpointer "
"args 'num_serialized_models_to_keep' or 'keep_serialized_model_every_num_seconds'."
)
self._checkpointer = checkpointer
else:
self._checkpointer = Checkpointer(
serialization_dir,
keep_serialized_model_every_num_seconds,
num_serialized_models_to_keep,
)
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
self._momentum_scheduler = momentum_scheduler
self._moving_average = moving_average
# We keep the total batch number as an instance variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._tensorboard = TensorboardWriter(
get_batch_num_total=lambda: self._batch_num_total,
serialization_dir=serialization_dir,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
)
self._log_batch_size_period = log_batch_size_period
self._last_log = 0.0 # time of last logging
# Enable activation logging.
if histogram_interval is not None:
self._tensorboard.enable_activation_logging(self.model)
def rescale_gradients(self) -> Optional[float]:
return training_util.rescale_gradients(self.model, self._grad_norm)
def batch_loss(self, batch_group: List[TensorDict], for_training: bool) -> torch.Tensor:
"""
Does a forward pass on the given batches and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = training_util.data_parallel(batch_group, self.model, self._cuda_devices)
else:
assert len(batch_group) == 1
batch = batch_group[0]
batch = nn_util.move_to_device(batch, self._cuda_devices[0])
output_dict = self.model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self.model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError(
"The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs)."
)
loss = None
return loss
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
peak_cpu_usage = peak_memory_mb()
logger.info(f"Peak CPU memory usage MB: {peak_cpu_usage}")
gpu_usage = []
for gpu, memory in gpu_memory_mb().items():
gpu_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self.model.train()
num_gpus = len(self._cuda_devices)
# Get tqdm for the training batches
raw_train_generator = self.iterator(self.train_data, num_epochs=1, shuffle=self.shuffle)
train_generator = lazy_groups_of(raw_train_generator, num_gpus)
num_training_batches = math.ceil(self.iterator.get_num_batches(self.train_data) / num_gpus)
residue = num_training_batches % self.accumulated_batch_count
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
histogram_parameters = set(self.model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
train_generator_tqdm = Tqdm.tqdm(train_generator, total=num_training_batches)
cumulative_batch_size = 0
self.optimizer.zero_grad()
for batch_group in train_generator_tqdm:
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
iter_len = self.accumulated_batch_count \
if batches_this_epoch <= (num_training_batches - residue) else residue
if self.cuda_verbose_step is not None and batch_num_total % self.cuda_verbose_step == 0:
print(f'Before forward pass - Cuda memory allocated: {torch.cuda.memory_allocated() / 1e9}')
print(f'Before forward pass - Cuda memory cached: {torch.cuda.memory_cached() / 1e9}')
try:
loss = self.batch_loss(batch_group, for_training=True) / iter_len
except RuntimeError as e:
print(e)
for x in batch_group:
all_words = [len(y['words']) for y in x['metadata']]
print(f"Total sents: {len(all_words)}. "
f"Min {min(all_words)}. Max {max(all_words)}")
for elem in ['labels', 'd_tags']:
tt = x[elem]
print(
f"{elem} shape {list(tt.shape)} and min {tt.min().item()} and {tt.max().item()}")
for elem in ["bert", "mask", "bert-offsets"]:
tt = x['tokens'][elem]
print(
f"{elem} shape {list(tt.shape)} and min {tt.min().item()} and {tt.max().item()}")
raise e
if self.cuda_verbose_step is not None and batch_num_total % self.cuda_verbose_step == 0:
print(f'After forward pass - Cuda memory allocated: {torch.cuda.memory_allocated() / 1e9}')
print(f'After forward pass - Cuda memory cached: {torch.cuda.memory_cached() / 1e9}')
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss.backward()
if self.cuda_verbose_step is not None and batch_num_total % self.cuda_verbose_step == 0:
print(f'After backprop - Cuda memory allocated: {torch.cuda.memory_allocated() / 1e9}')
print(f'After backprop - Cuda memory cached: {torch.cuda.memory_cached() / 1e9}')
train_loss += loss.item() * iter_len
del batch_group, loss
torch.cuda.empty_cache()
if self.cuda_verbose_step is not None and batch_num_total % self.cuda_verbose_step == 0:
print(f'After collecting garbage - Cuda memory allocated: {torch.cuda.memory_allocated() / 1e9}')
print(f'After collecting garbage - Cuda memory cached: {torch.cuda.memory_cached() / 1e9}')
batch_grad_norm = self.rescale_gradients()
# This does nothing if batch_num_total is None or you are using a
# scheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(batch_num_total)
if self._tensorboard.should_log_histograms_this_batch():
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {
name: param.detach().cpu().clone()
for name, param in self.model.named_parameters()
}
if batches_this_epoch % self.accumulated_batch_count == 0 or \
batches_this_epoch == num_training_batches:
self.optimizer.step()
self.optimizer.zero_grad()
for name, param in self.model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1))
param_norm = torch.norm(param.view(-1)).cpu()
self._tensorboard.add_train_scalar(
"gradient_update/" + name, update_norm / (param_norm + 1e-7)
)
else:
if batches_this_epoch % self.accumulated_batch_count == 0 or \
batches_this_epoch == num_training_batches:
self.optimizer.step()
self.optimizer.zero_grad()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(batch_num_total)
# Update the description with the latest metrics
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch)
description = training_util.description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if self._tensorboard.should_log_this_batch():
self._tensorboard.log_parameter_and_gradient_statistics(self.model, batch_grad_norm)
self._tensorboard.log_learning_rates(self.model, self.optimizer)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"])
self._tensorboard.log_metrics({"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._tensorboard.should_log_histograms_this_batch():
self._tensorboard.log_histograms(self.model, histogram_parameters)
if self._log_batch_size_period:
cur_batch = sum([training_util.get_batch_size(batch) for batch in batch_group])
cumulative_batch_size += cur_batch
if (batches_this_epoch - 1) % self._log_batch_size_period == 0:
average = cumulative_batch_size / batches_this_epoch
logger.info(f"current batch size: {cur_batch} mean batch size: {average}")
self._tensorboard.add_train_scalar("current_batch_size", cur_batch)
self._tensorboard.add_train_scalar("mean_batch_size", average)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
"{0}.{1}".format(epoch, training_util.time_to_str(int(last_save_time)))
)
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch, reset=True)
metrics["cpu_memory_MB"] = peak_cpu_usage
for (gpu_num, memory) in gpu_usage:
metrics["gpu_" + str(gpu_num) + "_memory_MB"] = memory
return metrics
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self.model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
print('validation is not None')
else:
val_iterator = self.iterator
print('validation is None')
num_gpus = len(self._cuda_devices)
raw_val_generator = val_iterator(self._validation_data, num_epochs=1, shuffle=False)
val_generator = lazy_groups_of(raw_val_generator, num_gpus)
num_validation_batches = math.ceil(
val_iterator.get_num_batches(self._validation_data) / num_gpus
)
print('_validation_data_len', len(self._validation_data))
print('num_validation_batches', num_validation_batches)
val_generator_tqdm = Tqdm.tqdm(val_generator, total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
print('valid', 1)
for batch_group in val_generator_tqdm:
loss = self.batch_loss(batch_group, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(self.model, val_loss, batches_this_epoch)
description = training_util.description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
print('valid', 2)
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError(
"Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?"
)
training_util.enable_gradient_clipping(self.model, self._grad_clipping)
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
this_epoch_val_metric: float = None
metrics: Dict[str, Any] = {}
epochs_trained = 0
training_start_time = time.time()
if self.cold_step_count > 0:
base_lr = self.optimizer.param_groups[0]['lr']
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.cold_lr
self.model.text_field_embedder._token_embedders['bert'].set_weights(freeze=True)
metrics["best_epoch"] = self._metric_tracker.best_epoch
for key, value in self._metric_tracker.best_epoch_metrics.items():
metrics["best_validation_" + key] = value
for epoch in range(epoch_counter, self._num_epochs):
if epoch == self.cold_step_count and epoch != 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = base_lr
self.model.text_field_embedder._token_embedders['bert'].set_weights(freeze=False)
print('epoch', epoch, 1)
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
print('epoch', epoch, 2)
# get peak of memory usage
if "cpu_memory_MB" in train_metrics:
metrics["peak_cpu_memory_MB"] = max(
metrics.get("peak_cpu_memory_MB", 0), train_metrics["cpu_memory_MB"]
)
for key, value in train_metrics.items():
if key.startswith("gpu_"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
print('epoch', epoch, 3)
# clear cache before validation
torch.cuda.empty_cache()
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
val_metrics = training_util.get_metrics(
self.model, val_loss, num_batches, reset=True
)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
self._metric_tracker.add_metric(this_epoch_val_metric)
if self._metric_tracker.should_stop_early():
logger.info("Ran out of patience. Stopping training.")
print("************TEST********")
break
print('epoch', epoch, 4)
self._tensorboard.log_metrics(
train_metrics, val_metrics=val_metrics, log_to_console=True, epoch=epoch + 1
) # +1 because tensorboard doesn't like 0
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = str(datetime.timedelta(seconds=training_elapsed_time))
metrics["training_start_epoch"] = epoch_counter
metrics["training_epochs"] = epochs_trained
metrics["epoch"] = epoch
print('epoch', epoch)
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
# if self.cold_step_count <= epoch:
self.scheduler.step(metrics['validation_loss'])
if self._metric_tracker.is_best_so_far():
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics["best_epoch"] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
self._metric_tracker.best_epoch_metrics = val_metrics
if self._serialization_dir:
dump_metrics(
os.path.join(self._serialization_dir, f"metrics_epoch_{epoch}.json"), metrics
)
# The Scheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
if self._momentum_scheduler:
self._momentum_scheduler.step(this_epoch_val_metric, epoch)
self._save_checkpoint(epoch)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", datetime.timedelta(seconds=epoch_elapsed_time))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * (
(self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1
)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
# make sure pending events are flushed to disk and files are closed properly
# self._tensorboard.close()
# Load the best model state before returning
best_model_state = self._checkpointer.best_model_state()
if best_model_state:
self.model.load_state_dict(best_model_state)
return metrics
def _save_checkpoint(self, epoch: Union[int, str]) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
"""
# If moving averages are used for parameters, we save
# the moving average values into checkpoint, instead of the current values.
if self._moving_average is not None:
self._moving_average.assign_average_value()
# These are the training states we need to persist.
training_states = {
"metric_tracker": self._metric_tracker.state_dict(),
"optimizer": self.optimizer.state_dict(),
"batch_num_total": self._batch_num_total,
}
# If we have a learning rate or momentum scheduler, we should persist them too.
if self._learning_rate_scheduler is not None:
training_states["learning_rate_scheduler"] = self._learning_rate_scheduler.state_dict()
if self._momentum_scheduler is not None:
training_states["momentum_scheduler"] = self._momentum_scheduler.state_dict()
self._checkpointer.save_checkpoint(
model_state=self.model.state_dict(),
epoch=epoch,
training_states=training_states,
is_best_so_far=self._metric_tracker.is_best_so_far(),
)
# Restore the original values for parameters so that training will not be affected.
if self._moving_average is not None:
self._moving_average.restore()
def _restore_checkpoint(self) -> int:
"""
Restores the model and training state from the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
model_state, training_state = self._checkpointer.restore_checkpoint()
if not training_state:
# No checkpoint to restore, start at 0
return 0
self.model.load_state_dict(model_state)
self.optimizer.load_state_dict(training_state["optimizer"])
if self._learning_rate_scheduler is not None \
and "learning_rate_scheduler" in training_state:
self._learning_rate_scheduler.load_state_dict(training_state["learning_rate_scheduler"])
if self._momentum_scheduler is not None and "momentum_scheduler" in training_state:
self._momentum_scheduler.load_state_dict(training_state["momentum_scheduler"])
training_util.move_optimizer_to_cuda(self.optimizer)
# Currently the ``training_state`` contains a serialized ``MetricTracker``.
if "metric_tracker" in training_state:
self._metric_tracker.load_state_dict(training_state["metric_tracker"])
# It used to be the case that we tracked ``val_metric_per_epoch``.
elif "val_metric_per_epoch" in training_state:
self._metric_tracker.clear()
self._metric_tracker.add_metrics(training_state["val_metric_per_epoch"])
# And before that we didn't track anything.
else:
self._metric_tracker.clear()
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split(".")[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get("batch_num_total")
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return
# Requires custom from_params.
@classmethod
def from_params( # type: ignore
cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
train_data: Iterable[Instance],
validation_data: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None,
) -> "Trainer":
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
shuffle = params.pop_bool("shuffle", True)
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = parse_cuda_device(params.pop("cuda_device", -1))
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
momentum_scheduler_params = params.pop("momentum_scheduler", None)
if isinstance(cuda_device, list):
model_device = cuda_device[0]
else:
model_device = cuda_device
if model_device >= 0:
# Moving model to GPU here so that the optimizer state gets constructed on
# the right device.
model = model.cuda(model_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if "moving_average" in params:
moving_average = MovingAverage.from_params(
params.pop("moving_average"), parameters=parameters
)
else:
moving_average = None
if lr_scheduler_params:
lr_scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
lr_scheduler = None
if momentum_scheduler_params:
momentum_scheduler = MomentumScheduler.from_params(optimizer, momentum_scheduler_params)
else:
momentum_scheduler = None
if "checkpointer" in params:
if "keep_serialized_model_every_num_seconds" in params \
or "num_serialized_models_to_keep" in params:
raise ConfigurationError(
"Checkpointer may be initialized either from the 'checkpointer' key or from the "
"keys 'num_serialized_models_to_keep' and 'keep_serialized_model_every_num_seconds'"
" but the passed config uses both methods."
)
checkpointer = Checkpointer.from_params(params.pop("checkpointer"))
else:
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None
)
checkpointer = Checkpointer(
serialization_dir=serialization_dir,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
should_log_parameter_statistics = params.pop_bool("should_log_parameter_statistics", True)
should_log_learning_rate = params.pop_bool("should_log_learning_rate", False)
log_batch_size_period = params.pop_int("log_batch_size_period", None)
params.assert_empty(cls.__name__)
return cls(
model,
optimizer,
iterator,
train_data,
validation_data,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
shuffle=shuffle,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=lr_scheduler,
momentum_scheduler=momentum_scheduler,
checkpointer=checkpointer,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
log_batch_size_period=log_batch_size_period,
moving_average=moving_average,
)
| 42,210 | 48.139697 | 113 | py |
CTC2021 | CTC2021-main/ctc_gector/gector/gec_model.py | """Wrapper of AllenNLP model. Fixes errors based on model predictions"""
import logging
import os
import sys
from time import time
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.nn import util
from gector.bert_token_embedder import PretrainedBertEmbedder
from gector.seq2labels_model import Seq2Labels
from gector.wordpiece_indexer import PretrainedBertIndexer
from utils.helpers import PAD, UNK, get_target_sent_by_edits, START_TOKEN
logging.getLogger("werkzeug").setLevel(logging.ERROR)
logger = logging.getLogger(__file__)
def get_weights_name(transformer_name, lowercase):
if transformer_name == 'bert' and lowercase:
return 'bert-base-uncased'
if transformer_name == 'bert' and not lowercase:
return 'bert-base-cased'
if transformer_name == 'distilbert':
if not lowercase:
print('Warning! This model was trained only on uncased sentences.')
return 'distilbert-base-uncased'
if transformer_name == 'albert':
if not lowercase:
print('Warning! This model was trained only on uncased sentences.')
return 'albert-base-v1'
if lowercase:
print('Warning! This model was trained only on cased sentences.')
if transformer_name == 'roberta':
return 'roberta-base'
if transformer_name == 'gpt2':
return 'gpt2'
if transformer_name == 'transformerxl':
return 'transfo-xl-wt103'
if transformer_name == 'xlnet':
return 'xlnet-base-cased'
class GecBERTModel(object):
def __init__(self, vocab_path=None, model_paths=None,
weigths=None,
max_len=50,
min_len=3,
lowercase_tokens=False,
log=False,
iterations=3,
min_probability=0.0,
model_name='roberta',
special_tokens_fix=1,
is_ensemble=True,
min_error_probability=0.0,
confidence=0,
resolve_cycles=False,
):
self.model_weights = list(map(float, weigths)) if weigths else [1] * len(model_paths)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.max_len = max_len
self.min_len = min_len
self.lowercase_tokens = lowercase_tokens
self.min_probability = min_probability
self.min_error_probability = min_error_probability
self.vocab = Vocabulary.from_files(vocab_path)
self.log = log
self.iterations = iterations
self.confidence = confidence
self.resolve_cycles = resolve_cycles
# set training parameters and operations
self.indexers = []
self.models = []
for model_path in model_paths:
#if is_ensemble:
#model_name, special_tokens_fix = self._get_model_data(model_path)
#weights_name = get_weights_name(model_name, lowercase_tokens)
#else:
weights_name = model_name
print(weights_name, 'weights_name')
self.indexers.append(self._get_indexer(weights_name, special_tokens_fix))
model = Seq2Labels(vocab=self.vocab,
text_field_embedder=self._get_embbeder(weights_name, special_tokens_fix),
confidence=self.confidence
).to(self.device)
print('model_path', model_path)
if torch.cuda.is_available():
model.load_state_dict(torch.load(model_path))
else:
model.load_state_dict(torch.load(model_path,
map_location=torch.device('cpu')))
model.eval()
self.models.append(model)
@staticmethod
def _get_model_data(model_path):
model_name = model_path.split('/')[-1]
tr_model, stf = model_name.split('_')[:2]
return tr_model, int(stf)
def _restore_model(self, input_path):
if os.path.isdir(input_path):
print("Model could not be restored from directory", file=sys.stderr)
filenames = []
else:
filenames = [input_path]
for model_path in filenames:
try:
if torch.cuda.is_available():
loaded_model = torch.load(model_path)
else:
loaded_model = torch.load(model_path,
map_location=lambda storage,
loc: storage)
except:
print(f"{model_path} is not valid model", file=sys.stderr)
own_state = self.model.state_dict()
for name, weights in loaded_model.items():
if name not in own_state:
continue
try:
if len(filenames) == 1:
own_state[name].copy_(weights)
else:
own_state[name] += weights
except RuntimeError:
continue
print("Model is restored", file=sys.stderr)
def predict(self, batches):
t11 = time()
predictions = []
for batch, model in zip(batches, self.models):
batch = util.move_to_device(batch.as_tensor_dict(), 0 if torch.cuda.is_available() else -1)
with torch.no_grad():
prediction = model.forward(**batch)
predictions.append(prediction)
preds, idx, error_probs = self._convert(predictions)
t55 = time()
if self.log:
print(f"Inference time {t55 - t11}")
return preds, idx, error_probs
def get_token_action(self, token, index, prob, sugg_token):
"""Get lost of suggested actions for token."""
# cases when we don't need to do anything
if prob < self.min_probability or sugg_token in [UNK, PAD, '$KEEP']:
return None
if sugg_token.startswith('$REPLACE_') or sugg_token.startswith('$TRANSFORM_') or sugg_token == '$DELETE':
start_pos = index
end_pos = index + 1
elif sugg_token.startswith("$APPEND_") or sugg_token.startswith("$MERGE_"):
start_pos = index + 1
end_pos = index + 1
if sugg_token == "$DELETE":
sugg_token_clear = ""
elif sugg_token.startswith('$TRANSFORM_') or sugg_token.startswith("$MERGE_"):
sugg_token_clear = sugg_token[:]
else:
sugg_token_clear = sugg_token[sugg_token.index('_') + 1:]
return start_pos - 1, end_pos - 1, sugg_token_clear, prob
def _get_embbeder(self, weigths_name, special_tokens_fix):
embedders = {'bert': PretrainedBertEmbedder(
pretrained_model=weigths_name,
requires_grad=False,
top_layer_only=True,
special_tokens_fix=special_tokens_fix)
}
text_field_embedder = BasicTextFieldEmbedder(
token_embedders=embedders,
embedder_to_indexer_map={"bert": ["bert", "bert-offsets"]},
allow_unmatched_keys=True)
return text_field_embedder
def _get_indexer(self, weights_name, special_tokens_fix):
bert_token_indexer = PretrainedBertIndexer(
pretrained_model=weights_name,
do_lowercase=self.lowercase_tokens,
max_pieces_per_token=5,
use_starting_offsets=True,
truncate_long_sequences=True,
special_tokens_fix=special_tokens_fix,
is_test=True
)
return {'bert': bert_token_indexer}
def preprocess(self, token_batch):
seq_lens = [len(sequence) for sequence in token_batch if sequence]
if not seq_lens:
return []
max_len = min(max(seq_lens), self.max_len)
batches = []
for indexer in self.indexers:
batch = []
for sequence in token_batch:
tokens = sequence[:max_len]
tokens = [Token(token) for token in ['$START'] + tokens]
batch.append(Instance({'tokens': TextField(tokens, indexer)}))
batch = Batch(batch)
batch.index_instances(self.vocab)
batches.append(batch)
return batches
def _convert(self, data):
all_class_probs = torch.zeros_like(data[0]['class_probabilities_labels'])
error_probs = torch.zeros_like(data[0]['max_error_probability'])
for output, weight in zip(data, self.model_weights):
all_class_probs += weight * output['class_probabilities_labels'] / sum(self.model_weights)
error_probs += weight * output['max_error_probability'] / sum(self.model_weights)
max_vals = torch.max(all_class_probs, dim=-1)
probs = max_vals[0].tolist()
idx = max_vals[1].tolist()
return probs, idx, error_probs.tolist()
def update_final_batch(self, final_batch, pred_ids, pred_batch,
prev_preds_dict):
new_pred_ids = []
total_updated = 0
for i, orig_id in enumerate(pred_ids):
orig = final_batch[orig_id]
pred = pred_batch[i]
prev_preds = prev_preds_dict[orig_id]
if orig != pred and pred not in prev_preds:
final_batch[orig_id] = pred
new_pred_ids.append(orig_id)
prev_preds_dict[orig_id].append(pred)
total_updated += 1
elif orig != pred and pred in prev_preds:
# update final batch, but stop iterations
final_batch[orig_id] = pred
total_updated += 1
else:
continue
return final_batch, new_pred_ids, total_updated
def postprocess_batch(self, batch, all_probabilities, all_idxs,
error_probs,
max_len=50):
all_results = []
noop_index = self.vocab.get_token_index("$KEEP", "labels")
for tokens, probabilities, idxs, error_prob in zip(batch,
all_probabilities,
all_idxs,
error_probs):
length = min(len(tokens), max_len)
edits = []
# skip whole sentences if there no errors
if max(idxs) == 0:
all_results.append(tokens)
continue
# skip whole sentence if probability of correctness is not high
if error_prob < self.min_error_probability:
all_results.append(tokens)
continue
for i in range(length + 1):
# because of START token
if i == 0:
token = START_TOKEN
else:
token = tokens[i - 1]
# skip if there is no error
if idxs[i] == noop_index:
continue
sugg_token = self.vocab.get_token_from_index(idxs[i],
namespace='labels')
action = self.get_token_action(token, i, probabilities[i],
sugg_token)
if not action:
continue
edits.append(action)
all_results.append(get_target_sent_by_edits(tokens, edits))
return all_results
def handle_batch(self, full_batch):
"""
Handle batch of requests.
"""
final_batch = full_batch[:]
batch_size = len(full_batch)
prev_preds_dict = {i: [final_batch[i]] for i in range(len(final_batch))}
short_ids = [i for i in range(len(full_batch))
if len(full_batch[i]) < self.min_len]
pred_ids = [i for i in range(len(full_batch)) if i not in short_ids]
total_updates = 0
for n_iter in range(self.iterations):
orig_batch = [final_batch[i] for i in pred_ids]
sequences = self.preprocess(orig_batch)
if not sequences:
break
probabilities, idxs, error_probs = self.predict(sequences)
pred_batch = self.postprocess_batch(orig_batch, probabilities,
idxs, error_probs)
if self.log:
print(f"Iteration {n_iter + 1}. Predicted {round(100*len(pred_ids)/batch_size, 1)}% of sentences.")
final_batch, pred_ids, cnt = \
self.update_final_batch(final_batch, pred_ids, pred_batch,
prev_preds_dict)
total_updates += cnt
if not pred_ids:
break
return final_batch, total_updates
| 13,208 | 39.148936 | 115 | py |
CTC2021 | CTC2021-main/ctc_gector/utils/prepare_clc_fce_data.py | #!/usr/bin/env python
"""
Convert CLC-FCE dataset (The Cambridge Learner Corpus) to the parallel sentences format.
"""
import argparse
import glob
import os
import re
from xml.etree import cElementTree
from nltk.tokenize import sent_tokenize, word_tokenize
from tqdm import tqdm
def annotate_fce_doc(xml):
"""Takes a FCE xml document and yields sentences with annotated errors."""
result = []
doc = cElementTree.fromstring(xml)
paragraphs = doc.findall('head/text/*/coded_answer/p')
for p in paragraphs:
text = _get_formatted_text(p)
result.append(text)
return '\n'.join(result)
def _get_formatted_text(elem, ignore_tags=None):
text = elem.text or ''
ignore_tags = [tag.upper() for tag in (ignore_tags or [])]
correct = None
mistake = None
for child in elem.getchildren():
tag = child.tag.upper()
if tag == 'NS':
text += _get_formatted_text(child)
elif tag == 'UNKNOWN':
text += ' UNKNOWN '
elif tag == 'C':
assert correct is None
correct = _get_formatted_text(child)
elif tag == 'I':
assert mistake is None
mistake = _get_formatted_text(child)
elif tag in ignore_tags:
pass
else:
raise ValueError(f"Unknown tag `{child.tag}`", text)
if correct or mistake:
correct = correct or ''
mistake = mistake or ''
if '=>' not in mistake:
text += f'{{{mistake}=>{correct}}}'
else:
text += mistake
text += elem.tail or ''
return text
def convert_fce(fce_dir):
"""Processes the whole FCE directory. Yields annotated documents (strings)."""
# Ensure we got the valid dataset path
if not os.path.isdir(fce_dir):
raise UserWarning(
f"{fce_dir} is not a valid path")
dataset_dir = os.path.join(fce_dir, 'dataset')
if not os.path.exists(dataset_dir):
raise UserWarning(
f"{fce_dir} doesn't point to a dataset's root dir")
# Convert XML docs to the corpora format
filenames = sorted(glob.glob(os.path.join(dataset_dir, '*/*.xml')))
docs = []
for filename in filenames:
with open(filename, encoding='utf-8') as f:
doc = annotate_fce_doc(f.read())
docs.append(doc)
return docs
def main():
fce = convert_fce(args.fce_dataset_path)
with open(args.output + "/fce-original.txt", 'w', encoding='utf-8') as out_original, \
open(args.output + "/fce-applied.txt", 'w', encoding='utf-8') as out_applied:
for doc in tqdm(fce, unit='doc'):
sents = re.split(r"\n +\n", doc)
for sent in sents:
tokenized_sents = sent_tokenize(sent)
for i in range(len(tokenized_sents)):
if re.search(r"[{>][.?!]$", tokenized_sents[i]):
tokenized_sents[i + 1] = tokenized_sents[i] + " " + tokenized_sents[i + 1]
tokenized_sents[i] = ""
regexp = r'{([^{}]*?)=>([^{}]*?)}'
original = re.sub(regexp, r"\1", tokenized_sents[i])
applied = re.sub(regexp, r"\2", tokenized_sents[i])
# filter out nested alerts
if original != "" and applied != "" and not re.search(r"[{}=]", original) \
and not re.search(r"[{}=]", applied):
out_original.write(" ".join(word_tokenize(original)) + "\n")
out_applied.write(" ".join(word_tokenize(applied)) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=(
"Convert CLC-FCE dataset to the parallel sentences format."))
parser.add_argument('fce_dataset_path',
help='Path to the folder with the FCE dataset')
parser.add_argument('--output',
help='Path to the output folder')
args = parser.parse_args()
main()
| 4,032 | 31.524194 | 98 | py |
CTC2021 | CTC2021-main/ctc_gector/utils/preprocess_data.py | import argparse
import os
from difflib import SequenceMatcher
import Levenshtein
import numpy as np
from tqdm import tqdm
from helpers import write_lines, read_parallel_lines, encode_verb_form, \
apply_reverse_transformation, SEQ_DELIMETERS, START_TOKEN
def perfect_align(t, T, insertions_allowed=0,
cost_function=Levenshtein.distance):
# dp[i, j, k] is a minimal cost of matching first `i` tokens of `t` with
# first `j` tokens of `T`, after making `k` insertions after last match of
# token from `t`. In other words t[:i] aligned with T[:j].
# Initialize with INFINITY (unknown)
shape = (len(t) + 1, len(T) + 1, insertions_allowed + 1)
dp = np.ones(shape, dtype=int) * int(1e9)
come_from = np.ones(shape, dtype=int) * int(1e9)
come_from_ins = np.ones(shape, dtype=int) * int(1e9)
dp[0, 0, 0] = 0 # The only known starting point. Nothing matched to nothing.
for i in range(len(t) + 1): # Go inclusive
for j in range(len(T) + 1): # Go inclusive
for q in range(insertions_allowed + 1): # Go inclusive
if i < len(t):
# Given matched sequence of t[:i] and T[:j], match token
# t[i] with following tokens T[j:k].
for k in range(j, len(T) + 1):
transform = \
apply_transformation(t[i], ' '.join(T[j:k]))
if transform:
cost = 0
else:
cost = cost_function(t[i], ' '.join(T[j:k]))
current = dp[i, j, q] + cost
if dp[i + 1, k, 0] > current:
dp[i + 1, k, 0] = current
come_from[i + 1, k, 0] = j
come_from_ins[i + 1, k, 0] = q
if q < insertions_allowed:
# Given matched sequence of t[:i] and T[:j], create
# insertion with following tokens T[j:k].
for k in range(j, len(T) + 1):
cost = len(' '.join(T[j:k]))
current = dp[i, j, q] + cost
if dp[i, k, q + 1] > current:
dp[i, k, q + 1] = current
come_from[i, k, q + 1] = j
come_from_ins[i, k, q + 1] = q
# Solution is in the dp[len(t), len(T), *]. Backtracking from there.
alignment = []
i = len(t)
j = len(T)
q = dp[i, j, :].argmin()
while i > 0 or q > 0:
is_insert = (come_from_ins[i, j, q] != q) and (q != 0)
j, k, q = come_from[i, j, q], j, come_from_ins[i, j, q]
if not is_insert:
i -= 1
if is_insert:
alignment.append(['INSERT', T[j:k], (i, i)])
else:
alignment.append([f'REPLACE_{t[i]}', T[j:k], (i, i + 1)])
assert j == 0
return dp[len(t), len(T)].min(), list(reversed(alignment))
def _split(token):
if not token:
return []
parts = token.split()
return parts or [token]
def apply_merge_transformation(source_tokens, target_words, shift_idx):
edits = []
if len(source_tokens) > 1 and len(target_words) == 1:
# check merge
transform = check_merge(source_tokens, target_words)
if transform:
for i in range(len(source_tokens) - 1):
edits.append([(shift_idx + i, shift_idx + i + 1), transform])
return edits
if len(source_tokens) == len(target_words) == 2:
# check swap
transform = check_swap(source_tokens, target_words)
if transform:
edits.append([(shift_idx, shift_idx + 1), transform])
return edits
def is_sent_ok(sent, delimeters=SEQ_DELIMETERS):
for del_val in delimeters.values():
if del_val in sent and del_val != " ":
return False
return True
def check_casetype(source_token, target_token):
if source_token.lower() != target_token.lower():
return None
if source_token.lower() == target_token:
return "$TRANSFORM_CASE_LOWER"
elif source_token.capitalize() == target_token:
return "$TRANSFORM_CASE_CAPITAL"
elif source_token.upper() == target_token:
return "$TRANSFORM_CASE_UPPER"
elif source_token[1:].capitalize() == target_token[1:] and source_token[0] == target_token[0]:
return "$TRANSFORM_CASE_CAPITAL_1"
elif source_token[:-1].upper() == target_token[:-1] and source_token[-1] == target_token[-1]:
return "$TRANSFORM_CASE_UPPER_-1"
else:
return None
def check_equal(source_token, target_token):
if source_token == target_token:
return "$KEEP"
else:
return None
def check_split(source_token, target_tokens):
if source_token.split("-") == target_tokens:
return "$TRANSFORM_SPLIT_HYPHEN"
else:
return None
def check_merge(source_tokens, target_tokens):
if "".join(source_tokens) == "".join(target_tokens):
return "$MERGE_SPACE"
elif "-".join(source_tokens) == "-".join(target_tokens):
return "$MERGE_HYPHEN"
else:
return None
def check_swap(source_tokens, target_tokens):
if source_tokens == [x for x in reversed(target_tokens)]:
return "$MERGE_SWAP"
else:
return None
def check_plural(source_token, target_token):
if source_token.endswith("s") and source_token[:-1] == target_token:
return "$TRANSFORM_AGREEMENT_SINGULAR"
elif target_token.endswith("s") and source_token == target_token[:-1]:
return "$TRANSFORM_AGREEMENT_PLURAL"
else:
return None
def check_verb(source_token, target_token):
encoding = encode_verb_form(source_token, target_token)
if encoding:
return f"$TRANSFORM_VERB_{encoding}"
else:
return None
def apply_transformation(source_token, target_token):
target_tokens = target_token.split()
if len(target_tokens) > 1:
# check split
transform = check_split(source_token, target_tokens)
if transform:
return transform
checks = [check_equal, check_casetype, check_verb, check_plural]
for check in checks:
transform = check(source_token, target_token)
if transform:
return transform
return None
def align_sequences(source_sent, target_sent):
# check if sent is OK
if not is_sent_ok(source_sent) or not is_sent_ok(target_sent):
return None
source_tokens = source_sent.split()
target_tokens = target_sent.split()
matcher = SequenceMatcher(None, source_tokens, target_tokens)
diffs = list(matcher.get_opcodes())
all_edits = []
for diff in diffs:
tag, i1, i2, j1, j2 = diff
source_part = _split(" ".join(source_tokens[i1:i2]))
target_part = _split(" ".join(target_tokens[j1:j2]))
if tag == 'equal':
continue
elif tag == 'delete':
# delete all words separatly
for j in range(i2 - i1):
edit = [(i1 + j, i1 + j + 1), '$DELETE']
all_edits.append(edit)
elif tag == 'insert':
# append to the previous word
for target_token in target_part:
edit = ((i1 - 1, i1), f"$APPEND_{target_token}")
all_edits.append(edit)
else:
# check merge first of all
edits = apply_merge_transformation(source_part, target_part,
shift_idx=i1)
if edits:
all_edits.extend(edits)
continue
# normalize alignments if need (make them singleton)
_, alignments = perfect_align(source_part, target_part,
insertions_allowed=0)
for alignment in alignments:
new_shift = alignment[2][0]
edits = convert_alignments_into_edits(alignment,
shift_idx=i1 + new_shift)
all_edits.extend(edits)
# get labels
labels = convert_edits_into_labels(source_tokens, all_edits)
# match tags to source tokens
sent_with_tags = add_labels_to_the_tokens(source_tokens, labels)
return sent_with_tags
def convert_edits_into_labels(source_tokens, all_edits):
# make sure that edits are flat
flat_edits = []
for edit in all_edits:
(start, end), edit_operations = edit
if isinstance(edit_operations, list):
for operation in edit_operations:
new_edit = [(start, end), operation]
flat_edits.append(new_edit)
elif isinstance(edit_operations, str):
flat_edits.append(edit)
else:
raise Exception("Unknown operation type")
all_edits = flat_edits[:]
labels = []
total_labels = len(source_tokens) + 1
if not all_edits:
labels = [["$KEEP"] for x in range(total_labels)]
else:
for i in range(total_labels):
edit_operations = [x[1] for x in all_edits if x[0][0] == i - 1
and x[0][1] == i]
if not edit_operations:
labels.append(["$KEEP"])
else:
labels.append(edit_operations)
return labels
def convert_alignments_into_edits(alignment, shift_idx):
edits = []
action, target_tokens, new_idx = alignment
source_token = action.replace("REPLACE_", "")
# check if delete
if not target_tokens:
edit = [(shift_idx, 1 + shift_idx), "$DELETE"]
return [edit]
# check splits
for i in range(1, len(target_tokens)):
target_token = " ".join(target_tokens[:i + 1])
transform = apply_transformation(source_token, target_token)
if transform:
edit = [(shift_idx, shift_idx + 1), transform]
edits.append(edit)
target_tokens = target_tokens[i + 1:]
for target in target_tokens:
edits.append([(shift_idx, shift_idx + 1), f"$APPEND_{target}"])
return edits
transform_costs = []
transforms = []
for target_token in target_tokens:
transform = apply_transformation(source_token, target_token)
if transform:
cost = 0
transforms.append(transform)
else:
cost = Levenshtein.distance(source_token, target_token)
transforms.append(None)
transform_costs.append(cost)
min_cost_idx = transform_costs.index(min(transform_costs))
# append to the previous word
for i in range(0, min_cost_idx):
target = target_tokens[i]
edit = [(shift_idx - 1, shift_idx), f"$APPEND_{target}"]
edits.append(edit)
# replace/transform target word
transform = transforms[min_cost_idx]
target = transform if transform is not None \
else f"$REPLACE_{target_tokens[min_cost_idx]}"
edit = [(shift_idx, 1 + shift_idx), target]
edits.append(edit)
# append to this word
for i in range(min_cost_idx + 1, len(target_tokens)):
target = target_tokens[i]
edit = [(shift_idx, 1 + shift_idx), f"$APPEND_{target}"]
edits.append(edit)
return edits
def add_labels_to_the_tokens(source_tokens, labels, delimeters=SEQ_DELIMETERS):
tokens_with_all_tags = []
source_tokens_with_start = [START_TOKEN] + source_tokens
for token, label_list in zip(source_tokens_with_start, labels):
all_tags = delimeters['operations'].join(label_list)
comb_record = token + delimeters['labels'] + all_tags
tokens_with_all_tags.append(comb_record)
return delimeters['tokens'].join(tokens_with_all_tags)
def convert_data_from_raw_files(source_file, target_file, output_file, chunk_size, max_len):
skipped = 0
tagged = []
source_data, target_data = read_parallel_lines(source_file, target_file)
print(f"The size of raw dataset is {len(source_data)}")
cnt_total, cnt_all, cnt_tp = 0, 0, 0
for source_sent, target_sent in tqdm(zip(source_data, target_data)):
if len(source_sent.split()) > max_len:
skipped += 1
continue
try:
aligned_sent = align_sequences(source_sent, target_sent)
except Exception:
aligned_sent = align_sequences(source_sent, target_sent)
if source_sent != target_sent:
cnt_tp += 1
alignments = [aligned_sent]
cnt_all += len(alignments)
try:
check_sent = convert_tagged_line(aligned_sent)
except Exception:
# debug mode
aligned_sent = align_sequences(source_sent, target_sent)
check_sent = convert_tagged_line(aligned_sent)
if "".join(check_sent.split()) != "".join(
target_sent.split()):
# do it again for debugging
aligned_sent = align_sequences(source_sent, target_sent)
check_sent = convert_tagged_line(aligned_sent)
print(f"Incorrect pair: \n{target_sent}\n{check_sent}")
continue
if alignments:
cnt_total += len(alignments)
tagged.extend(alignments)
if len(tagged) > chunk_size:
write_lines(output_file, tagged, mode='a')
tagged = []
print(f"Overall extracted {cnt_total}. "
f"Original TP {cnt_tp}."
f" Original TN {cnt_all - cnt_tp}")
skipped_rate = skipped / len(source_data)
print(f"Skipped rate:", skipped_rate)
if tagged:
write_lines(output_file, tagged, 'a')
def convert_labels_into_edits(labels):
all_edits = []
for i, label_list in enumerate(labels):
if label_list == ["$KEEP"]:
continue
else:
edit = [(i - 1, i), label_list]
all_edits.append(edit)
return all_edits
def get_target_sent_by_levels(source_tokens, labels):
relevant_edits = convert_labels_into_edits(labels)
target_tokens = source_tokens[:]
leveled_target_tokens = {}
if not relevant_edits:
target_sentence = " ".join(target_tokens)
return leveled_target_tokens, target_sentence
max_level = max([len(x[1]) for x in relevant_edits])
for level in range(max_level):
rest_edits = []
shift_idx = 0
for edits in relevant_edits:
(start, end), label_list = edits
label = label_list[0]
target_pos = start + shift_idx
source_token = target_tokens[target_pos] if target_pos >= 0 else START_TOKEN
if label == "$DELETE":
del target_tokens[target_pos]
shift_idx -= 1
elif label.startswith("$APPEND_"):
word = label.replace("$APPEND_", "")
target_tokens[target_pos + 1: target_pos + 1] = [word]
shift_idx += 1
elif label.startswith("$REPLACE_"):
word = label.replace("$REPLACE_", "")
target_tokens[target_pos] = word
elif label.startswith("$TRANSFORM"):
word = apply_reverse_transformation(source_token, label)
if word is None:
word = source_token
target_tokens[target_pos] = word
elif label.startswith("$MERGE_"):
# apply merge only on last stage
if level == (max_level - 1):
target_tokens[target_pos + 1: target_pos + 1] = [label]
shift_idx += 1
else:
rest_edit = [(start + shift_idx, end + shift_idx), [label]]
rest_edits.append(rest_edit)
rest_labels = label_list[1:]
if rest_labels:
rest_edit = [(start + shift_idx, end + shift_idx), rest_labels]
rest_edits.append(rest_edit)
leveled_tokens = target_tokens[:]
# update next step
relevant_edits = rest_edits[:]
if level == (max_level - 1):
leveled_tokens = replace_merge_transforms(leveled_tokens)
leveled_labels = convert_edits_into_labels(leveled_tokens,
relevant_edits)
leveled_target_tokens[level + 1] = {"tokens": leveled_tokens,
"labels": leveled_labels}
target_sentence = " ".join(leveled_target_tokens[max_level]["tokens"])
return leveled_target_tokens, target_sentence
def replace_merge_transforms(tokens):
if all(not x.startswith("$MERGE_") for x in tokens):
return tokens
target_tokens = tokens[:]
allowed_range = (1, len(tokens) - 1)
for i in range(len(tokens)):
target_token = tokens[i]
if target_token.startswith("$MERGE"):
if target_token.startswith("$MERGE_SWAP") and i in allowed_range:
target_tokens[i - 1] = tokens[i + 1]
target_tokens[i + 1] = tokens[i - 1]
target_tokens[i: i + 1] = []
target_line = " ".join(target_tokens)
target_line = target_line.replace(" $MERGE_HYPHEN ", "-")
target_line = target_line.replace(" $MERGE_SPACE ", "")
return target_line.split()
def convert_tagged_line(line, delimeters=SEQ_DELIMETERS):
label_del = delimeters['labels']
source_tokens = [x.split(label_del)[0]
for x in line.split(delimeters['tokens'])][1:]
labels = [x.split(label_del)[1].split(delimeters['operations'])
for x in line.split(delimeters['tokens'])]
assert len(source_tokens) + 1 == len(labels)
levels_dict, target_line = get_target_sent_by_levels(source_tokens, labels)
return target_line
def main(args):
convert_data_from_raw_files(args.source, args.target, args.output_file, args.chunk_size, args.max_len)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source',
help='Path to the source file',
required=True)
parser.add_argument('-t', '--target',
help='Path to the target file',
required=True)
parser.add_argument('-o', '--output_file',
help='Path to the output file',
required=True)
parser.add_argument('--chunk_size',
type=int,
help='Dump each chunk size.',
default=1000000)
parser.add_argument('-m', '--max_len',
type=int,
help='max sentence length',
default=128)
args = parser.parse_args()
main(args)
| 18,758 | 36.443114 | 106 | py |
CTC2021 | CTC2021-main/ctc_gector/utils/helpers.py | import os
from pathlib import Path
VOCAB_DIR = Path(__file__).resolve().parent.parent / "data"
PAD = "@@PADDING@@"
UNK = "@@UNKNOWN@@"
START_TOKEN = "$START"
SEQ_DELIMETERS = {"tokens": " ",
"labels": "SEPL|||SEPR",
"operations": "SEPL__SEPR"}
def get_verb_form_dicts():
path_to_dict = os.path.join(VOCAB_DIR, "verb-form-vocab.txt")
encode, decode = {}, {}
with open(path_to_dict, encoding="utf-8") as f:
for line in f:
words, tags = line.split(":")
word1, word2 = words.split("_")
tag1, tag2 = tags.split("_")
decode_key = f"{word1}_{tag1}_{tag2.strip()}"
if decode_key not in decode:
encode[words] = tags
decode[decode_key] = word2
return encode, decode
ENCODE_VERB_DICT, DECODE_VERB_DICT = get_verb_form_dicts()
def get_target_sent_by_edits(source_tokens, edits):
target_tokens = source_tokens[:]
shift_idx = 0
for edit in edits:
start, end, label, _ = edit
target_pos = start + shift_idx
source_token = target_tokens[target_pos] \
if len(target_tokens) > target_pos >= 0 else ''
if label == "":
del target_tokens[target_pos]
shift_idx -= 1
elif start == end:
word = label.replace("$APPEND_", "")
target_tokens[target_pos: target_pos] = [word]
shift_idx += 1
elif label.startswith("$TRANSFORM_"):
word = apply_reverse_transformation(source_token, label)
if word is None:
word = source_token
target_tokens[target_pos] = word
elif start == end - 1:
word = label.replace("$REPLACE_", "")
target_tokens[target_pos] = word
elif label.startswith("$MERGE_"):
target_tokens[target_pos + 1: target_pos + 1] = [label]
shift_idx += 1
return replace_merge_transforms(target_tokens)
def replace_merge_transforms(tokens):
if all(not x.startswith("$MERGE_") for x in tokens):
return tokens
target_line = " ".join(tokens)
target_line = target_line.replace(" $MERGE_HYPHEN ", "-")
target_line = target_line.replace(" $MERGE_SPACE ", "")
return target_line.split()
def convert_using_case(token, smart_action):
if not smart_action.startswith("$TRANSFORM_CASE_"):
return token
if smart_action.endswith("LOWER"):
return token.lower()
elif smart_action.endswith("UPPER"):
return token.upper()
elif smart_action.endswith("CAPITAL"):
return token.capitalize()
elif smart_action.endswith("CAPITAL_1"):
return token[0] + token[1:].capitalize()
elif smart_action.endswith("UPPER_-1"):
return token[:-1].upper() + token[-1]
else:
return token
def convert_using_verb(token, smart_action):
key_word = "$TRANSFORM_VERB_"
if not smart_action.startswith(key_word):
raise Exception(f"Unknown action type {smart_action}")
encoding_part = f"{token}_{smart_action[len(key_word):]}"
decoded_target_word = decode_verb_form(encoding_part)
return decoded_target_word
def convert_using_split(token, smart_action):
key_word = "$TRANSFORM_SPLIT"
if not smart_action.startswith(key_word):
raise Exception(f"Unknown action type {smart_action}")
target_words = token.split("-")
return " ".join(target_words)
def convert_using_plural(token, smart_action):
if smart_action.endswith("PLURAL"):
return token + "s"
elif smart_action.endswith("SINGULAR"):
return token[:-1]
else:
raise Exception(f"Unknown action type {smart_action}")
def apply_reverse_transformation(source_token, transform):
if transform.startswith("$TRANSFORM"):
# deal with equal
if transform == "$KEEP":
return source_token
# deal with case
if transform.startswith("$TRANSFORM_CASE"):
return convert_using_case(source_token, transform)
# deal with verb
if transform.startswith("$TRANSFORM_VERB"):
return convert_using_verb(source_token, transform)
# deal with split
if transform.startswith("$TRANSFORM_SPLIT"):
return convert_using_split(source_token, transform)
# deal with single/plural
if transform.startswith("$TRANSFORM_AGREEMENT"):
return convert_using_plural(source_token, transform)
# raise exception if not find correct type
raise Exception(f"Unknown action type {transform}")
else:
return source_token
def read_parallel_lines(fn1, fn2):
lines1 = read_lines(fn1, skip_strip=True)
lines2 = read_lines(fn2, skip_strip=True)
assert len(lines1) == len(lines2)
out_lines1, out_lines2 = [], []
for line1, line2 in zip(lines1, lines2):
if not line1.strip() or not line2.strip():
continue
else:
out_lines1.append(line1)
out_lines2.append(line2)
return out_lines1, out_lines2
def read_lines(fn, skip_strip=False):
if not os.path.exists(fn):
return []
with open(fn, 'r', encoding='utf-8') as f:
lines = f.readlines()
return [s.strip() for s in lines if s.strip() or skip_strip]
def write_lines(fn, lines, mode='w'):
if mode == 'w' and os.path.exists(fn):
os.remove(fn)
with open(fn, encoding='utf-8', mode=mode) as f:
f.writelines(['%s\n' % s for s in lines])
def decode_verb_form(original):
return DECODE_VERB_DICT.get(original)
def encode_verb_form(original_word, corrected_word):
decoding_request = original_word + "_" + corrected_word
decoding_response = ENCODE_VERB_DICT.get(decoding_request, "").strip()
if original_word and decoding_response:
answer = decoding_response
else:
answer = None
return answer
def get_weights_name(transformer_name, lowercase):
if transformer_name == 'bert' and lowercase:
return 'bert-base-uncased'
if transformer_name == 'bert' and not lowercase:
return 'bert-base-cased'
if transformer_name == 'distilbert':
if not lowercase:
print('Warning! This model was trained only on uncased sentences.')
return 'distilbert-base-uncased'
if transformer_name == 'albert':
if not lowercase:
print('Warning! This model was trained only on uncased sentences.')
return 'albert-base-v1'
if lowercase:
print('Warning! This model was trained only on cased sentences.')
if transformer_name == 'roberta':
return 'roberta-base'
if transformer_name == 'gpt2':
return 'gpt2'
if transformer_name == 'transformerxl':
return 'transfo-xl-wt103'
if transformer_name == 'xlnet':
return 'xlnet-base-cased'
| 6,859 | 32.627451 | 79 | py |
MetaCat | MetaCat-master/main.py | # The code structure is adapted from the WeSTClass implementation
# https://github.com/yumeng5/WeSTClass
import numpy as np
np.random.seed(1234)
from time import time
from model import WSTC, f1
from keras.optimizers import SGD
from gen import augment, pseudodocs
from load_data import load_dataset
from gensim.models import word2vec
from gensim.models import KeyedVectors
from sklearn import preprocessing
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def load_embedding(vocabulary_inv, num_class, dataset_name, embedding_name):
model_dir = './' + dataset_name
model_name = 'embedding_' + embedding_name
model_name = os.path.join(model_dir, model_name)
if os.path.exists(model_name):
# embedding_model = word2vec.Word2Vec.load(model_name)
embedding_model = KeyedVectors.load_word2vec_format(model_name, binary = False, unicode_errors='ignore')
print("Loading existing embedding vectors {}...".format(model_name))
else:
print("Cannot find the embedding file!")
embedding_weights = {key: embedding_model[word] if word in embedding_model else
np.random.uniform(-0.25, 0.25, embedding_model.vector_size)
for key, word in vocabulary_inv.items()}
centers = [None for _ in range(num_class)]
for word in embedding_model.vocab:
if word.startswith('$LABL_'):
centers[int(word.split('_')[-1])] = embedding_model[word] / np.linalg.norm(embedding_model[word])
return embedding_weights, centers
def write_output(write_path, y_pred, perm):
invperm = np.zeros(len(perm), dtype='int32')
for i,v in enumerate(perm):
invperm[v] = i
y_pred = y_pred[invperm]
with open(os.path.join(write_path, 'out.txt'), 'w') as f:
for val in y_pred:
f.write(str(val) + '\n')
print("Classification results are written in {}".format(os.path.join(write_path, 'out.txt')))
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='main',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
### Basic settings ###
# dataset selection: GitHub-Bio (default), GitHub-AI, GitHub-Cyber, Twitter, Amazon
parser.add_argument('--dataset', default='bio', choices=['ai', 'bio', 'cyber', 'twitter', 'amazon'])
# embedding files: generation-guided embedding (default)
parser.add_argument('--embedding', default='gge')
# whether ground truth labels are available for evaluation: True (default), False
parser.add_argument('--with_evaluation', default='True', choices=['True', 'False'])
### Training settings ###
# mini-batch size for both pre-training and self-training: 256 (default)
parser.add_argument('--batch_size', default=256, type=int)
# training epochs: None (default)
parser.add_argument('--pretrain_epochs', default=None, type=int)
### Hyperparameters settings ###
# number of generated pseudo documents per class (beta): 100 (default)
parser.add_argument('--beta', default=100, type=int)
# keyword vocabulary size (gamma): 50 (default)
parser.add_argument('--gamma', default=50, type=int)
# vmf concentration parameter when synthesizing documents (kappa): 120 (default)
parser.add_argument('--kappa', default=120, type=float)
### Dummy arguments (please ignore) ###
# weak supervision selection: labeled documents (default)
parser.add_argument('--sup_source', default='docs', choices=['docs'])
# maximum self-training iterations: 0 (default)
parser.add_argument('--maxiter', default=0, type=int)
# self-training update interval: None (default)
parser.add_argument('--update_interval', default=None, type=int)
# background word distribution weight (alpha): 0.0 (default)
parser.add_argument('--alpha', default=0.0, type=float)
# self-training stopping criterion (delta): None (default)
parser.add_argument('--delta', default=0.1, type=float)
# trained model directory: None (default)
parser.add_argument('--trained_weights', default=None)
args = parser.parse_args()
print(args)
alpha = args.alpha
beta = args.beta
gamma = args.gamma
delta = args.delta
kappa = args.kappa
word_embedding_dim = 100
update_interval = 50
self_lr = 1e-4
if args.dataset == 'bio':
max_sequence_length = 1000
pretrain_epochs = 20
elif args.dataset == 'ai':
max_sequence_length = 1000
pretrain_epochs = 30
elif args.dataset == 'cyber':
max_sequence_length = 1000
pretrain_epochs = 20
elif args.dataset == 'amazon':
max_sequence_length = 150
pretrain_epochs = 40
elif args.dataset == 'twitter':
max_sequence_length = 30
pretrain_epochs = 40
decay = 1e-6
if args.update_interval is not None:
update_interval = args.update_interval
if args.pretrain_epochs is not None:
pretrain_epochs = args.pretrain_epochs
if args.with_evaluation == 'True':
with_evaluation = True
else:
with_evaluation = False
if args.sup_source == 'docs':
x, y, word_counts, vocabulary, vocabulary_inv_list, len_avg, len_std, word_sup_list, sup_idx, perm = \
load_dataset(args.dataset, model='cnn', sup_source=args.sup_source, with_evaluation=with_evaluation, truncate_len=max_sequence_length)
np.random.seed(1234)
vocabulary_inv = {key: value for key, value in enumerate(vocabulary_inv_list)}
vocab_sz = len(vocabulary_inv)
n_classes = len(word_sup_list)
if x.shape[1] < max_sequence_length:
max_sequence_length = x.shape[1]
x = x[:, :max_sequence_length]
sequence_length = max_sequence_length
print("\n### Input preparation ###")
embedding_weights, centers = load_embedding(vocabulary_inv, n_classes, args.dataset, args.embedding)
embedding_mat = np.array([np.array(embedding_weights[word]) for word in vocabulary_inv])
wstc = WSTC(input_shape=x.shape, n_classes=n_classes, y=y, model='cnn',
vocab_sz=vocab_sz, embedding_matrix=embedding_mat, word_embedding_dim=word_embedding_dim)
if args.trained_weights is None:
print("\n### Phase 1: vMF distribution fitting & pseudo document generation ###")
word_sup_array = np.array([np.array([vocabulary[word] for word in word_class_list]) for word_class_list in word_sup_list])
total_counts = sum(word_counts[ele] for ele in word_counts)
total_counts -= word_counts[vocabulary_inv_list[0]]
background_array = np.zeros(vocab_sz)
for i in range(1,vocab_sz):
background_array[i] = word_counts[vocabulary_inv[i]]/total_counts
seed_docs, seed_label = pseudodocs(word_sup_array, gamma, background_array,
sequence_length, len_avg, len_std, beta, alpha,
vocabulary_inv, embedding_mat, centers, kappa, 'cnn',
'./results/{}/{}/phase1/'.format(args.dataset, 'cnn'))
if args.sup_source == 'docs':
num_real_doc = len(sup_idx.flatten()) * int(1 + beta * 0.1)
real_seed_docs, real_seed_label = augment(x, sup_idx, num_real_doc)
seed_docs = np.concatenate((seed_docs, real_seed_docs), axis=0)
seed_label = np.concatenate((seed_label, real_seed_label), axis=0)
perm_seed = np.random.permutation(len(seed_label))
seed_docs = seed_docs[perm_seed]
seed_label = seed_label[perm_seed]
print('\n### Phase 2: pre-training with pseudo documents ###')
wstc.pretrain(x=seed_docs, pretrain_labels=seed_label,
sup_idx=sup_idx, optimizer=SGD(lr=0.1, momentum=0.9),
epochs=pretrain_epochs, batch_size=args.batch_size,
save_dir='./results/{}/{}/phase2'.format(args.dataset, 'cnn'))
y_pred = wstc.predict(x)
if y is not None:
f1_macro, f1_micro = np.round(f1(y, y_pred), 5)
print('F1 score after pre-training: f1_macro = {}, f1_micro = {}'.format(f1_macro, f1_micro))
else:
print("\n### Directly loading trained weights ###")
wstc.load_weights(args.trained_weights)
y_pred = wstc.predict(x)
if y is not None:
f1_macro, f1_micro = np.round(f1(y, y_pred), 5)
print('F1 score: f1_macro = {}, f1_micro = {}'.format(f1_macro, f1_micro))
print("\n### Generating outputs ###")
write_output('./' + args.dataset, y_pred, perm)
| 7,965 | 35.541284 | 137 | py |
MetaCat | MetaCat-master/model.py | import numpy as np
np.random.seed(1234)
import os
from time import time
import csv
import keras.backend as K
# K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=30, inter_op_parallelism_threads=30)))
from keras.engine.topology import Layer
from keras.layers import Dense, Input, Convolution1D, Embedding, GlobalMaxPooling1D, GRU, TimeDistributed
from keras.layers.merge import Concatenate
from keras.models import Model
from keras import initializers, regularizers, constraints
from keras.initializers import VarianceScaling, RandomUniform
from sklearn.metrics import f1_score
def f1(y_true, y_pred):
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
f1_macro = f1_score(y_true, y_pred, average='macro')
f1_micro = f1_score(y_true, y_pred, average='micro')
return f1_macro, f1_micro
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'):
x = Input(shape=(input_shape,), name='input')
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding",
weights=[embedding_matrix], trainable=word_trainable)(x)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation=act,
strides=1,
kernel_initializer=init)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dense(hidden_dim, activation="relu")(z)
y = Dense(n_classes, activation="softmax")(z)
return Model(inputs=x, outputs=y, name='classifier')
def dot_product(x, kernel):
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class AttentionWithContext(Layer):
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
init='glorot_uniform', bias=True, **kwargs):
self.supports_masking = True
self.init = init
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight(shape=(input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
def HierAttLayer(input_shape, n_classes, word_trainable=False, vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, gru_dim=100, fc_dim=100):
sentence_input = Input(shape=(input_shape[2],), dtype='int32')
embedded_sequences = Embedding(vocab_sz,
word_embedding_dim,
input_length=input_shape[2],
weights=[embedding_matrix],
trainable=word_trainable)(sentence_input)
l_lstm = GRU(gru_dim, return_sequences=True)(embedded_sequences)
l_dense = TimeDistributed(Dense(fc_dim))(l_lstm)
l_att = AttentionWithContext()(l_dense)
sentEncoder = Model(sentence_input, l_att)
x = Input(shape=(input_shape[1], input_shape[2]), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(x)
l_lstm_sent = GRU(gru_dim, return_sequences=True)(review_encoder)
l_dense_sent = TimeDistributed(Dense(fc_dim))(l_lstm_sent)
l_att_sent = AttentionWithContext()(l_dense_sent)
y = Dense(n_classes, activation='softmax')(l_att_sent)
return Model(inputs=x, outputs=y, name='classifier')
class WSTC(object):
def __init__(self,
input_shape,
n_classes=None,
init=RandomUniform(minval=-0.01, maxval=0.01),
y=None,
model='cnn',
vocab_sz=None,
word_embedding_dim=100,
embedding_matrix=None
):
super(WSTC, self).__init__()
self.input_shape = input_shape
self.y = y
self.n_classes = n_classes
if model == 'cnn':
self.classifier = ConvolutionLayer(self.input_shape[1], n_classes=n_classes,
vocab_sz=vocab_sz, embedding_matrix=embedding_matrix,
word_embedding_dim=word_embedding_dim, init=init)
elif model == 'rnn':
self.classifier = HierAttLayer(self.input_shape, n_classes=n_classes,
vocab_sz=vocab_sz, embedding_matrix=embedding_matrix,
word_embedding_dim=word_embedding_dim)
self.model = self.classifier
self.sup_list = {}
def pretrain(self, x, pretrain_labels, sup_idx=None, optimizer='adam',
loss='kld', epochs=200, batch_size=256, save_dir=None):
self.classifier.compile(optimizer=optimizer, loss=loss)
print("\nNeural model summary: ")
self.model.summary()
if sup_idx is not None:
for i, seed_idx in enumerate(sup_idx):
for idx in seed_idx:
self.sup_list[idx] = i
# begin pretraining
t0 = time()
print('\nPretraining...')
self.classifier.fit(x, pretrain_labels, batch_size=batch_size, epochs=epochs)
print('Pretraining time: {:.2f}s'.format(time() - t0))
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.classifier.save_weights(save_dir + '/pretrained.h5')
print('Pretrained model saved to {}/pretrained.h5'.format(save_dir))
self.pretrained = True
def load_weights(self, weights):
self.model.load_weights(weights)
def predict(self, x):
q = self.model.predict(x, verbose=0)
return q.argmax(1)
def target_distribution(self, q, power=2):
weight = q**power / q.sum(axis=0)
p = (weight.T / weight.sum(axis=1)).T
for i in self.sup_list:
p[i] = 0
p[i][self.sup_list[i]] = 1
return p
def compile(self, optimizer='sgd', loss='kld'):
self.model.compile(optimizer=optimizer, loss=loss)
def fit(self, x, y=None, maxiter=5e4, batch_size=256, tol=0.1, power=2,
update_interval=140, save_dir=None, save_suffix=''):
print('Update interval: {}'.format(update_interval))
pred = self.classifier.predict(x)
y_pred = np.argmax(pred, axis=1)
y_pred_last = np.copy(y_pred)
# logging file
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logfile = open(save_dir + '/self_training_log_{}.csv'.format(save_suffix), 'w')
logwriter = csv.DictWriter(logfile, fieldnames=['iter', 'f1_macro', 'f1_micro'])
logwriter.writeheader()
index = 0
index_array = np.arange(x.shape[0])
for ite in range(int(maxiter)):
if ite % update_interval == 0:
q = self.model.predict(x, verbose=0)
y_pred = q.argmax(axis=1)
p = self.target_distribution(q, power)
print('\nIter {}: '.format(ite), end='')
if y is not None:
f1_macro, f1_micro = np.round(f1(y, y_pred), 5)
logdict = dict(iter=ite, f1_macro=f1_macro, f1_micro=f1_micro)
logwriter.writerow(logdict)
print('f1_macro = {}, f1_micro = {}'.format(f1_macro, f1_micro))
# check stop criterion
delta_label = np.sum(y_pred != y_pred_last).astype(np.float) / y_pred.shape[0]
y_pred_last = np.copy(y_pred)
print('Fraction of documents with label changes: {} %'.format(np.round(delta_label*100, 3)))
if ite > 0 and delta_label < tol/100:
print('\nFraction: {} % < tol: {} %'.format(np.round(delta_label*100, 3), tol))
print('Reached tolerance threshold. Stopping training.')
logfile.close()
break
# train on batch
idx = index_array[index * batch_size: min((index+1) * batch_size, x.shape[0])]
self.model.train_on_batch(x=x[idx], y=p[idx])
index = index + 1 if (index + 1) * batch_size <= x.shape[0] else 0
ite += 1
logfile.close()
if save_dir is not None:
self.model.save_weights(save_dir + '/final.h5')
print("Final model saved to: {}/final.h5".format(save_dir))
return self.predict(x)
| 9,046 | 32.383764 | 124 | py |
MetaCat | MetaCat-master/gen.py | import numpy as np
import os
np.random.seed(1234)
from spherecluster import SphericalKMeans, VonMisesFisherMixture, sample_vMF
def seed_expansion(word_sup_array, prob_sup_array, sz, write_path, vocabulary_inv, embedding_mat):
expanded_seed = []
vocab_sz = len(vocabulary_inv)
for j, word_class in enumerate(word_sup_array):
prob_sup_class = prob_sup_array[j]
expanded_class = []
seed_vec = np.zeros(vocab_sz)
if len(word_class) < sz:
for i, word in enumerate(word_class):
seed_vec[word] = prob_sup_class[i]
expanded = np.dot(embedding_mat.transpose(), seed_vec)
expanded = np.dot(embedding_mat, expanded)
word_expanded = sorted(range(len(expanded)), key=lambda k: expanded[k], reverse=True)
for i in range(sz):
expanded_class.append(word_expanded[i])
expanded_seed.append(np.array(expanded_class))
else:
expanded_seed.append(word_class)
if write_path is not None:
if not os.path.exists(write_path):
os.makedirs(write_path)
f = open(write_path + 'class' + str(j) + '_' + str(sz) + '.txt', 'w')
for i, word in enumerate(expanded_class):
f.write(vocabulary_inv[word] + ' ')
f.close()
return expanded_seed
def label_expansion(class_labels, write_path, vocabulary_inv, embedding_mat):
print("Retrieving top-t nearest words...")
n_classes = len(class_labels)
prob_sup_array = []
current_szes = []
all_class_labels = []
for class_label in class_labels:
current_sz = len(class_label)
current_szes.append(current_sz)
prob_sup_array.append([1/current_sz] * current_sz)
all_class_labels += list(class_label)
current_sz = np.min(current_szes)
while len(all_class_labels) == len(set(all_class_labels)) and current_sz <= 200:
current_sz += 1
expanded_array = seed_expansion(class_labels, prob_sup_array, current_sz, None, vocabulary_inv, embedding_mat)
all_class_labels = [w for w_class in expanded_array for w in w_class]
expanded_array = seed_expansion(class_labels, prob_sup_array, current_sz-1, None, vocabulary_inv, embedding_mat)
print("Final expansion size t = {}".format(len(expanded_array[0])))
centers = []
kappas = []
print("Top-t nearest words for each class:")
for i in range(n_classes):
expanded_class = expanded_array[i]
vocab_expanded = [vocabulary_inv[w] for w in expanded_class]
print("Class {}:".format(i))
print(vocab_expanded)
expanded_mat = embedding_mat[np.asarray(expanded_class)]
vmf_soft = VonMisesFisherMixture(n_clusters=1, n_jobs=15)
vmf_soft.fit(expanded_mat)
center = vmf_soft.cluster_centers_[0]
kappa = vmf_soft.concentrations_[0]
centers.append(center)
kappas.append(kappa)
for j, expanded_class in enumerate(expanded_array):
if write_path is not None:
if not os.path.exists(write_path):
os.makedirs(write_path)
f = open(write_path + 'class' + str(j) + '.txt', 'w')
for i, word in enumerate(expanded_class):
f.write(vocabulary_inv[word] + ' ')
f.close()
print("Finished vMF distribution fitting.")
return expanded_array, centers, kappas
def pseudodocs(word_sup_array, total_num, background_array, sequence_length, len_avg,
len_std, num_doc, interp_weight, vocabulary_inv, embedding_mat, centers, kappa, model, save_dir=None):
for i in range(len(embedding_mat)):
embedding_mat[i] = embedding_mat[i] / np.linalg.norm(embedding_mat[i])
# _, centers, kappas = \
# label_expansion(word_sup_array, save_dir, vocabulary_inv, embedding_mat)
print("Pseudo documents generation...")
background_vec = interp_weight * background_array
if model == 'cnn':
docs = np.zeros((num_doc*len(word_sup_array), sequence_length), dtype='int32')
label = np.zeros((num_doc*len(word_sup_array), len(word_sup_array)))
for i in range(len(word_sup_array)):
docs_len = len_avg*np.ones(num_doc)
center = centers[i]
# kappa = kappas[i]
discourses = sample_vMF(center, kappa, num_doc)
for j in range(num_doc):
discourse = discourses[j]
prob_vec = np.dot(embedding_mat, discourse)
prob_vec = np.exp(prob_vec)
sorted_idx = np.argsort(prob_vec)[::-1]
delete_idx = sorted_idx[total_num:]
prob_vec[delete_idx] = 0
prob_vec /= np.sum(prob_vec)
prob_vec *= 1 - interp_weight
prob_vec += background_vec
doc_len = int(docs_len[j])
docs[i*num_doc+j][:doc_len] = np.random.choice(len(prob_vec), size=doc_len, p=prob_vec)
label[i*num_doc+j] = interp_weight/len(word_sup_array)*np.ones(len(word_sup_array))
label[i*num_doc+j][i] += 1 - interp_weight
elif model == 'rnn':
docs = np.zeros((num_doc*len(word_sup_array), sequence_length[0], sequence_length[1]), dtype='int32')
label = np.zeros((num_doc*len(word_sup_array), len(word_sup_array)))
doc_len = int(len_avg[0])
sent_len = int(len_avg[1])
for period_idx in vocabulary_inv:
if vocabulary_inv[period_idx] == '.':
break
for i in range(len(word_sup_array)):
center = centers[i]
# kappa = kappas[i]
discourses = sample_vMF(center, kappa, num_doc)
for j in range(num_doc):
discourse = discourses[j]
prob_vec = np.dot(embedding_mat, discourse)
prob_vec = np.exp(prob_vec)
sorted_idx = np.argsort(prob_vec)[::-1]
delete_idx = sorted_idx[total_num:]
prob_vec[delete_idx] = 0
prob_vec /= np.sum(prob_vec)
prob_vec *= 1 - interp_weight
prob_vec += background_vec
for k in range(doc_len):
docs[i*num_doc+j][k][:sent_len] = np.random.choice(len(prob_vec), size=sent_len, p=prob_vec)
docs[i*num_doc+j][k][sent_len] = period_idx
label[i*num_doc+j] = interp_weight/len(word_sup_array)*np.ones(len(word_sup_array))
label[i*num_doc+j][i] += 1 - interp_weight
print("Finished Pseudo documents generation.")
return docs, label
def augment(x, sup_idx, total_len):
print("Labeled documents augmentation...")
print(sup_idx)
docs = x[sup_idx.flatten()]
curr_len = len(docs)
copy_times = int(total_len/curr_len) - 1
y = np.zeros(len(sup_idx.flatten()), dtype='int32')
label_nums = [len(seed_idx) for seed_idx in sup_idx]
cnt = 0
for i in range(len(sup_idx)):
y[cnt:cnt+label_nums[i]] = i
cnt += label_nums[i]
new_docs = docs
new_y = y
for i in range(copy_times):
new_docs = np.concatenate((new_docs, docs), axis=0)
new_y = np.concatenate((new_y, y), axis=0)
pretrain_labels = np.zeros((len(new_y),len(np.unique(y))))
for i in range(len(new_y)):
pretrain_labels[i][new_y[i]] = 1.0
print("Finished labeled documents augmentation.")
return new_docs, pretrain_labels
| 6,432 | 36.184971 | 113 | py |
MetaCat | MetaCat-master/data_preprocess.py | import json
import argparse
parser = argparse.ArgumentParser(description='main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', default='bio', choices=['bio', 'ai', 'cyber', 'amazon', 'twitter'])
args = parser.parse_args()
dataset = args.dataset
with open(f'{dataset}/meta_dict.json') as fin:
meta_dict = json.load(fin)
local = meta_dict['local']
with open(f'{dataset}/{dataset}.json') as fin, open(f'{dataset}/dataset.csv', 'w') as fout:
for line in fin:
data = json.loads(line)
text = data['text']
for lm in local:
local_metadata = ['$'+lm.upper()+'_'+x for x in data[lm]]
text += ' ' + ' '.join(local_metadata)
fout.write(str(data['label'])+',\"'+text.strip()+'\"\n')
| 733 | 33.952381 | 108 | py |
MetaCat | MetaCat-master/eval.py | import string
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
import argparse
parser = argparse.ArgumentParser(description='main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', default='bio', choices=['bio', 'ai', 'cyber', 'amazon', 'twitter'])
args = parser.parse_args()
dataset = args.dataset
train = []
with open(f'{dataset}/doc_id.txt') as fin:
for line in fin:
idx = line.strip().split(':')[1].split(',')
train += [int(x) for x in idx]
y = []
with open(f'{dataset}/dataset.csv') as fin:
for idx, line in enumerate(fin):
if idx not in train:
y.append(line.strip().split(',')[0])
y_pred = []
with open(f'{dataset}/out.txt') as fin:
for idx, line in enumerate(fin):
if idx not in train:
y_pred.append(line.strip())
print(f1_score(y, y_pred, average='micro'))
print(f1_score(y, y_pred, average='macro'))
print(confusion_matrix(y, y_pred)) | 939 | 28.375 | 108 | py |
MetaCat | MetaCat-master/load_data.py | import csv
import numpy as np
import os
import re
import itertools
from collections import Counter
from os.path import join
from nltk import tokenize
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
def read_file(data_dir, with_evaluation):
data = []
target = []
with open(join(data_dir, 'dataset.csv'), 'rt', encoding='utf-8') as csvfile:
csv.field_size_limit(500 * 1024 * 1024)
reader = csv.reader(csvfile)
for row in reader:
if data_dir == './agnews':
doc = row[1] + '. ' + row[2]
data.append(doc)
target.append(int(row[0]) - 1)
elif data_dir == './yelp':
data.append(row[1])
target.append(int(row[0]) - 1)
else:
data.append(row[1])
target.append(int(row[0]))
if with_evaluation:
y = np.asarray(target)
assert len(data) == len(y)
assert set(range(len(np.unique(y)))) == set(np.unique(y))
else:
y = None
return data, y
def clean_str(string):
string = re.sub(r"[^A-Za-z0-9(),.!?_\"\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\"", " \" ", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'m", " \'m", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"\.", " . ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\$", " $ ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def preprocess_doc(data):
data = [s.strip() for s in data]
data = [clean_str(s) for s in data]
return data
def pad_sequences(sentences, padding_word="<PAD/>", pad_len=None):
if pad_len is not None:
sequence_length = pad_len
else:
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return word_counts, vocabulary, vocabulary_inv
def build_input_data_cnn(sentences, vocabulary):
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
return x
def build_input_data_rnn(data, vocabulary, max_doc_len, max_sent_len):
x = np.zeros((len(data), max_doc_len, max_sent_len), dtype='int32')
for i, doc in enumerate(data):
for j, sent in enumerate(doc):
if j >= max_doc_len:
break
k = 0
for word in sent:
if k >= max_sent_len:
break
x[i,j,k] = vocabulary[word]
k += 1
return x
def extract_keywords(data_path, vocab, class_type, num_keywords, data, perm):
sup_data = []
sup_idx = []
sup_label = []
file_name = 'doc_id.txt'
infile = open(join(data_path, file_name), mode='r', encoding='utf-8')
text = infile.readlines()
for i, line in enumerate(text):
line = line.split('\n')[0]
class_id, doc_ids = line.split(':')
assert int(class_id) == i
seed_idx = doc_ids.split(',')
seed_idx = [int(idx) for idx in seed_idx]
sup_idx.append(seed_idx)
for idx in seed_idx:
sup_data.append(" ".join(data[idx]))
sup_label.append(i)
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
tfidf = TfidfVectorizer(norm='l2', sublinear_tf=True, max_df=0.2, stop_words='english')
sup_x = tfidf.fit_transform(sup_data)
sup_x = np.asarray(sup_x.todense())
vocab_dict = tfidf.vocabulary_
vocab_inv_dict = {v: k for k, v in vocab_dict.items()}
# print("\n### Supervision type: Labeled documents ###")
# print("Extracted keywords for each class: ")
keywords = []
cnt = 0
for i in range(len(sup_idx)):
class_vec = np.average(sup_x[cnt:cnt+len(sup_idx[i])], axis=0)
cnt += len(sup_idx[i])
sort_idx = np.argsort(class_vec)[::-1]
keyword = []
if class_type == 'topic':
j = 0
k = 0
while j < num_keywords:
w = vocab_inv_dict[sort_idx[k]]
if w in vocab:
keyword.append(vocab_inv_dict[sort_idx[k]])
j += 1
k += 1
elif class_type == 'sentiment':
j = 0
k = 0
while j < num_keywords:
w = vocab_inv_dict[sort_idx[k]]
w, t = nltk.pos_tag([w])[0]
if t.startswith("J") and w in vocab:
keyword.append(w)
j += 1
k += 1
# print("Class {}:".format(i))
# print(keyword)
keywords.append(keyword)
new_sup_idx = []
m = {v: k for k, v in enumerate(perm)}
for seed_idx in sup_idx:
new_seed_idx = []
for ele in seed_idx:
new_seed_idx.append(m[ele])
new_sup_idx.append(new_seed_idx)
# padding
maxlen = 0
for idlist in new_sup_idx:
if len(idlist) > maxlen:
maxlen = len(idlist)
new_sup_idx0 = []
for idlist in new_sup_idx:
idlist0 = []
for j in range(int(maxlen/len(idlist)) + 1):
idlist0 += idlist
new_sup_idx0.append(idlist0[:maxlen])
new_sup_idx0 = np.asarray(new_sup_idx0)
return keywords, new_sup_idx0
def load_keywords(data_path, sup_source):
if sup_source == 'labels':
file_name = 'classes.txt'
print("\n### Supervision type: Label Surface Names ###")
print("Label Names for each class: ")
elif sup_source == 'keywords':
file_name = 'keywords.txt'
print("\n### Supervision type: Class-related Keywords ###")
print("Keywords for each class: ")
infile = open(join(data_path, file_name), mode='r', encoding='utf-8')
text = infile.readlines()
keywords = []
for i, line in enumerate(text):
line = line.split('\n')[0]
class_id, contents = line.split(':')
assert int(class_id) == i
keyword = contents.split(',')
print("Supervision content of class {}:".format(i))
print(keyword)
keywords.append(keyword)
return keywords
def load_cnn(dataset_name, sup_source, num_keywords=10, with_evaluation=True, truncate_len=None):
data_path = './' + dataset_name
data, y = read_file(data_path, with_evaluation)
sz = len(data)
np.random.seed(1234)
perm = np.random.permutation(sz)
data = preprocess_doc(data)
data = [s.split(" ") for s in data]
tmp_list = [len(doc) for doc in data]
len_max = max(tmp_list)
len_avg = np.average(tmp_list)
len_std = np.std(tmp_list)
print("\n### Dataset statistics: ###")
print('Document max length: {} (words)'.format(len_max))
print('Document average length: {} (words)'.format(len_avg))
print('Document length std: {} (words)'.format(len_std))
if truncate_len is None:
truncate_len = min(int(len_avg + 3*len_std), len_max)
print("Defined maximum document length: {} (words)".format(truncate_len))
print('Fraction of truncated documents: {}'.format(sum(tmp > truncate_len for tmp in tmp_list)/len(tmp_list)))
sequences_padded = pad_sequences(data)
word_counts, vocabulary, vocabulary_inv = build_vocab(sequences_padded)
x = build_input_data_cnn(sequences_padded, vocabulary)
x = x[perm]
if with_evaluation:
print("Number of classes: {}".format(len(np.unique(y))))
print("Number of documents in each class:")
for i in range(len(np.unique(y))):
print("Class {}: {}".format(i, len(np.where(y == i)[0])))
y = y[perm]
print("Vocabulary Size: {:d}".format(len(vocabulary_inv)))
if sup_source == 'labels' or sup_source == 'keywords':
keywords = load_keywords(data_path, sup_source)
return x, y, word_counts, vocabulary, vocabulary_inv, len_avg, len_std, keywords, perm
elif sup_source == 'docs':
class_type = 'topic'
keywords, sup_idx = extract_keywords(data_path, vocabulary, class_type, num_keywords, data, perm)
return x, y, word_counts, vocabulary, vocabulary_inv, len_avg, len_std, keywords, sup_idx, perm
def load_rnn(dataset_name, sup_source, num_keywords=10, with_evaluation=True, truncate_len=None):
data_path = './' + dataset_name
data, y = read_file(data_path, with_evaluation)
sz = len(data)
np.random.seed(1234)
perm = np.random.permutation(sz)
data = preprocess_doc(data)
data_copy = [s.split(" ") for s in data]
docs_padded = pad_sequences(data_copy)
word_counts, vocabulary, vocabulary_inv = build_vocab(docs_padded)
data = [tokenize.sent_tokenize(doc) for doc in data]
flat_data = [sent for doc in data for sent in doc]
tmp_list = [len(sent.split(" ")) for sent in flat_data]
max_sent_len = max(tmp_list)
avg_sent_len = np.average(tmp_list)
std_sent_len = np.std(tmp_list)
print("\n### Dataset statistics: ###")
print('Sentence max length: {} (words)'.format(max_sent_len))
print('Sentence average length: {} (words)'.format(avg_sent_len))
if truncate_len is None:
truncate_sent_len = min(int(avg_sent_len + 3*std_sent_len), max_sent_len)
else:
truncate_sent_len = truncate_len[1]
print("Defined maximum sentence length: {} (words)".format(truncate_sent_len))
print('Fraction of truncated sentences: {}'.format(sum(tmp > truncate_sent_len for tmp in tmp_list)/len(tmp_list)))
tmp_list = [len(doc) for doc in data]
max_doc_len = max(tmp_list)
avg_doc_len = np.average(tmp_list)
std_doc_len = np.std(tmp_list)
print('Document max length: {} (sentences)'.format(max_doc_len))
print('Document average length: {} (sentences)'.format(avg_doc_len))
if truncate_len is None:
truncate_doc_len = min(int(avg_doc_len + 3*std_doc_len), max_doc_len)
else:
truncate_doc_len = truncate_len[0]
print("Defined maximum document length: {} (sentences)".format(truncate_doc_len))
print('Fraction of truncated documents: {}'.format(sum(tmp > truncate_doc_len for tmp in tmp_list)/len(tmp_list)))
len_avg = [avg_doc_len, avg_sent_len]
len_std = [std_doc_len, std_sent_len]
data = [[sent.split(" ") for sent in doc] for doc in data]
x = build_input_data_rnn(data, vocabulary, int(avg_doc_len + 3*std_doc_len), int(avg_sent_len + 3*std_sent_len))
x = x[perm]
if with_evaluation:
print("Number of classes: {}".format(len(np.unique(y))))
print("Number of documents in each class:")
for i in range(len(np.unique(y))):
print("Class {}: {}".format(i, len(np.where(y == i)[0])))
y = y[perm]
print("Vocabulary Size: {:d}".format(len(vocabulary_inv)))
if sup_source == 'labels' or sup_source == 'keywords':
keywords = load_keywords(data_path, sup_source)
return x, y, word_counts, vocabulary, vocabulary_inv, len_avg, len_std, keywords, perm
elif sup_source == 'docs':
if dataset_name == 'nyt':
class_type = 'topic'
elif dataset_name == 'agnews':
class_type = 'topic'
elif dataset_name == 'yelp':
class_type = 'sentiment'
else:
class_type = 'topic'
keywords, sup_idx = extract_keywords(data_path, vocabulary, class_type, num_keywords, data_copy, perm)
return x, y, word_counts, vocabulary, vocabulary_inv, len_avg, len_std, keywords, sup_idx, perm
def load_dataset(dataset_name, sup_source, model='cnn', with_evaluation=True, truncate_len=None):
if model == 'cnn':
return load_cnn(dataset_name, sup_source, with_evaluation=with_evaluation, truncate_len=truncate_len)
elif model == 'rnn':
return load_rnn(dataset_name, sup_source, with_evaluation=with_evaluation, truncate_len=truncate_len)
| 11,359 | 31.181303 | 116 | py |
MetaCat | MetaCat-master/gge/preprocess.py | import string
import json
from collections import defaultdict
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser(description='main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', default='bio', choices=['bio', 'ai', 'cyber', 'amazon', 'twitter'])
args = parser.parse_args()
dataset = args.dataset
folder = '../' + dataset + '/'
lengths = {'bio': 1000, 'ai': 1000, 'cyber': 1000, 'amazon': 150, 'twitter': 30}
if dataset in lengths:
length = lengths[dataset]
else:
length = 200
doc_id = []
label = set()
with open(folder+'doc_id.txt') as fin:
for line in fin:
data = line.strip().split(':')
doc_idx = data[1].split(',')
doc_id += [int(x) for x in doc_idx]
label.add('$LABL_'+data[0])
with open(folder+'meta_dict.json') as fin:
meta_dict = json.load(fin)
globl = meta_dict['global']
local = meta_dict['local']
metadata = set()
document = set()
cnt = defaultdict(int)
with open(folder+dataset+'.json') as fin:
for idx, line in enumerate(tqdm(fin)):
data = json.loads(line)
document.add('$DOCU_'+str(idx))
for gm in globl:
for x in data[gm]:
metadata.add('$'+gm.upper()+'_'+x)
for lm in local:
for x in data[lm]:
metadata.add('$'+lm.upper()+'_'+x)
W = data['text'].split()
for token in W[:length]:
cnt[token] += 1
node2id = defaultdict()
with open('node2id.txt', 'w') as fout:
for D in document:
node2id[D] = len(node2id)
fout.write(D+' '+str(node2id[D])+'\n')
for L in label:
node2id[L] = len(node2id)
fout.write(L+' '+str(node2id[L])+'\n')
for M in metadata:
node2id[M] = len(node2id)
fout.write(M+' '+str(node2id[M])+'\n')
for W in cnt:
if cnt[W] < 10:
continue
node2id[W] = len(node2id)
node2id['$CTXT_'+W] = len(node2id)
fout.write(W+' '+str(node2id[W])+'\n')
fout.write('$CTXT_'+W+' '+str(node2id['$CTXT_'+W])+'\n')
mod = len(node2id)+1
win = 5
edge = defaultdict(int)
with open(folder+dataset+'.json') as fin:
for idx, line in enumerate(tqdm(fin)):
data = json.loads(line)
L = '$LABL_'+str(data['label'])
D = '$DOCU_'+str(idx)
W = data['text'].split()
sent = []
for token in W[:length]:
if cnt[token] < 10:
continue
sent.append(token)
for i in range(len(sent)):
for j in range(i-win, i+win+1):
if j >= len(sent) or j < 0 or j == i:
continue
id1 = node2id[sent[i]]
id2 = node2id['$CTXT_'+sent[j]]
edge[id1*mod+id2] += 1
for i in range(len(sent)):
id1 = node2id[D]
id2 = node2id[sent[i]]
edge[id1*mod+id2] += win
if idx in doc_id:
id1 = node2id[L]
id2 = node2id[D]
edge[id1*mod+id2] += win * length
for gm in globl:
for x in data[gm]:
M = '$'+gm.upper()+'_'+x
id1 = node2id[M]
id2 = node2id[D]
edge[id1*mod+id2] += win * length
for lm in local:
for x in data[lm]:
M = '$'+lm.upper()+'_'+x
id1 = node2id[D]
id2 = node2id[M]
edge[id1*mod+id2] += win
with open('edge.txt', 'w') as fout:
for e in edge:
id1 = e // mod
id2 = e % mod
fout.write(str(id1)+'\t'+str(id2)+'\t'+str(edge[e])+'\n') | 3,072 | 22.821705 | 108 | py |
MetaCat | MetaCat-master/gge/postprocess.py | import string
import json
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser(description='main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', default='bio', choices=['bio', 'ai', 'cyber', 'amazon', 'twitter'])
args = parser.parse_args()
dataset = args.dataset
folder = '../' + dataset + '/'
id2node = dict()
with open('node2id.txt') as fin:
for line in fin:
data = line.strip().split()
id2node[data[1]] = data[0]
with open(folder+'meta_dict.json') as fin:
meta_dict = json.load(fin)
local = meta_dict['local']
output = []
with open('out.emb') as fin, open(folder+'embedding_gge', 'w') as fout:
for idx, line in enumerate(tqdm(fin)):
if idx == 0:
continue
data = line.strip().split()
data[0] = id2node[data[0]]
if data[0].startswith('$LABL_') or not data[0].startswith('$'):
output.append(' '.join(data)+'\n')
for lm in local:
if data[0].startswith('$'+lm.upper()+'_'):
output.append(' '.join(data)+'\n')
break
fout.write(str(len(output))+'\t100\n')
for line in output:
fout.write(line) | 1,097 | 26.45 | 108 | py |
drn | drn-master/classify.py | import argparse
import shutil
import time
import numpy as np
import os
from os.path import exists, split, join, splitext
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import drn as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('cmd', choices=['train', 'test', 'map', 'locate'])
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='drn18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: drn18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--check-freq', default=10, type=int,
metavar='N', help='checkpoint frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--lr-adjust', dest='lr_adjust',
choices=['linear', 'step'], default='step')
parser.add_argument('--crop-size', dest='crop_size', type=int, default=224)
parser.add_argument('--scale-size', dest='scale_size', type=int, default=256)
parser.add_argument('--step-ratio', dest='step_ratio', type=float, default=0.1)
args = parser.parse_args()
return args
def main():
print(' '.join(sys.argv))
args = parse_args()
print(args)
if args.cmd == 'train':
run_training(args)
elif args.cmd == 'test':
test_model(args)
def run_training(args):
# create model
model = models.__dict__[args.arch](args.pretrained)
model = torch.nn.DataParallel(model).cuda()
best_prec1 = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(args, optimizer, epoch)
# train for one epoch
train(args, train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(args, val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_path = 'checkpoint_latest.pth.tar'
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % args.check_freq == 0:
history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
shutil.copyfile(checkpoint_path, history_path)
def test_model(args):
# create model
model = models.__dict__[args.arch](args.pretrained)
model = torch.nn.DataParallel(model).cuda()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
t = transforms.Compose([
transforms.Scale(args.scale_size),
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
normalize])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, t),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss().cuda()
validate(args, val_loader, model, criterion)
def train(args, train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(args, val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (args.step_ratio ** (epoch // 30))
print('Epoch [{}] Learning rate: {}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 12,237 | 34.6793 | 83 | py |
drn | drn-master/drn.py | import pdb
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
BatchNorm = nn.BatchNorm2d
# __all__ = ['DRN', 'drn26', 'drn42', 'drn58']
webroot = 'http://dl.yf.io/drn/'
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'drn-c-26': webroot + 'drn_c_26-ddedf421.pth',
'drn-c-42': webroot + 'drn_c_42-9d336e8c.pth',
'drn-c-58': webroot + 'drn_c_58-0a53a92c.pth',
'drn-d-22': webroot + 'drn_d_22-4bd2f8ea.pth',
'drn-d-38': webroot + 'drn_d_38-eebb45f0.pth',
'drn-d-54': webroot + 'drn_d_54-0e0534ff.pth',
'drn-d-105': webroot + 'drn_d_105-12b40979.pth'
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride,
padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes,
padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation[1], bias=False,
dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DRN(nn.Module):
def __init__(self, block, layers, num_classes=1000,
channels=(16, 32, 64, 128, 256, 512, 512, 512),
out_map=False, out_middle=False, pool_size=28, arch='D'):
super(DRN, self).__init__()
self.inplanes = channels[0]
self.out_map = out_map
self.out_dim = channels[-1]
self.out_middle = out_middle
self.arch = arch
if arch == 'C':
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False)
self.bn1 = BatchNorm(channels[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
BasicBlock, channels[0], layers[0], stride=1)
self.layer2 = self._make_layer(
BasicBlock, channels[1], layers[1], stride=2)
elif arch == 'D':
self.layer0 = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,
bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_conv_layers(
channels[0], layers[0], stride=1)
self.layer2 = self._make_conv_layers(
channels[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
self.layer5 = self._make_layer(block, channels[4], layers[4],
dilation=2, new_level=False)
self.layer6 = None if layers[5] == 0 else \
self._make_layer(block, channels[5], layers[5], dilation=4,
new_level=False)
if arch == 'C':
self.layer7 = None if layers[6] == 0 else \
self._make_layer(BasicBlock, channels[6], layers[6], dilation=2,
new_level=False, residual=False)
self.layer8 = None if layers[7] == 0 else \
self._make_layer(BasicBlock, channels[7], layers[7], dilation=1,
new_level=False, residual=False)
elif arch == 'D':
self.layer7 = None if layers[6] == 0 else \
self._make_conv_layers(channels[6], layers[6], dilation=2)
self.layer8 = None if layers[7] == 0 else \
self._make_conv_layers(channels[7], layers[7], dilation=1)
if num_classes > 0:
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(self.out_dim, num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
new_level=True, residual=True):
assert dilation == 1 or dilation % 2 == 0
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = list()
layers.append(block(
self.inplanes, planes, stride, downsample,
dilation=(1, 1) if dilation == 1 else (
dilation // 2 if new_level else dilation, dilation),
residual=residual))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, residual=residual,
dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def _make_conv_layers(self, channels, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(self.inplanes, channels, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(channels),
nn.ReLU(inplace=True)])
self.inplanes = channels
return nn.Sequential(*modules)
def forward(self, x):
y = list()
if self.arch == 'C':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
elif self.arch == 'D':
x = self.layer0(x)
x = self.layer1(x)
y.append(x)
x = self.layer2(x)
y.append(x)
x = self.layer3(x)
y.append(x)
x = self.layer4(x)
y.append(x)
x = self.layer5(x)
y.append(x)
if self.layer6 is not None:
x = self.layer6(x)
y.append(x)
if self.layer7 is not None:
x = self.layer7(x)
y.append(x)
if self.layer8 is not None:
x = self.layer8(x)
y.append(x)
if self.out_map:
x = self.fc(x)
else:
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
if self.out_middle:
return x, y
else:
return x
class DRN_A(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = 512 * block.expansion
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4)
self.avgpool = nn.AvgPool2d(28, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def drn_a_50(pretrained=False, **kwargs):
model = DRN_A(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def drn_c_26(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-26']))
return model
def drn_c_42(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-42']))
return model
def drn_c_58(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-58']))
return model
def drn_d_22(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-22']))
return model
def drn_d_24(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-24']))
return model
def drn_d_38(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-38']))
return model
def drn_d_40(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-40']))
return model
def drn_d_54(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-54']))
return model
def drn_d_56(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-56']))
return model
def drn_d_105(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-105']))
return model
def drn_d_107(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-107']))
return model | 14,175 | 33.241546 | 88 | py |
drn | drn-master/segment.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import math
import os
from os.path import exists, join, split
import threading
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import drn
import data_transforms as transforms
try:
from modules import batchnormsync
except ImportError:
pass
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CITYSCAPE_PALETTE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
TRIPLET_PALETTE = np.asarray([
[0, 0, 0, 255],
[217, 83, 79, 255],
[91, 192, 222, 255]], dtype=np.uint8)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DRNSeg(nn.Module):
def __init__(self, model_name, classes, pretrained_model=None,
pretrained=True, use_torch_up=False):
super(DRNSeg, self).__init__()
model = drn.__dict__.get(model_name)(
pretrained=pretrained, num_classes=1000)
pmodel = nn.DataParallel(model)
if pretrained_model is not None:
pmodel.load_state_dict(pretrained_model)
self.base = nn.Sequential(*list(model.children())[:-2])
self.seg = nn.Conv2d(model.out_dim, classes,
kernel_size=1, bias=True)
self.softmax = nn.LogSoftmax()
m = self.seg
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
if use_torch_up:
self.up = nn.UpsamplingBilinear2d(scale_factor=8)
else:
up = nn.ConvTranspose2d(classes, classes, 16, stride=8, padding=4,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
self.up = up
def forward(self, x):
x = self.base(x)
x = self.seg(x)
y = self.up(x)
return self.softmax(y), x
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.seg.parameters():
yield param
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
data = list(self.transforms(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
# data = list(self.transforms(*data))
out_data = list(self.transforms(*data))
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))[0]
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
score.update(eval_score(output, target_var), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score))
logger.info(' * Score {top1.avg:.3f}'.format(top1=score))
return score.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data[0]
def train(train_loader, model, criterion, optimizer, epoch,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
# Data loading code
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'],
std=info['std'])
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.extend([transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'train', transforms.Compose(t),
list_dir=args.list_dir),
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True, drop_last=True
)
val_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'val', transforms.Compose([
transforms.RandomCrop(crop_size),
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir),
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=True, drop_last=True
)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,
eval_score=accuracy)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_path = os.path.join(args.save_path, 'checkpoint_latest.pth.tar')
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % args.save_iter == 0:
history_path = os.path.join(args.save_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
def adjust_learning_rate(args, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(
pred, name, output_dir + '_color',
TRIPLET_PALETTE if num_classes == 3 else CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
# pdb.set_trace()
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
# _, pred = torch.max(torch.from_numpy(final), 1)
# pred = pred.cpu().numpy()
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, pretrained_model=None,
pretrained=False)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
scales = [0.5, 0.75, 1.25, 1.5, 1.75]
if args.ms:
dataset = SegListMS(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), scales, list_dir=args.list_dir)
else:
dataset = SegList(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir, out_name=True)
test_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
logger.info('mAP: %f', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('cmd', choices=['train', 'test'])
parser.add_argument('-d', '--data-dir', default=None, required=True)
parser.add_argument('-l', '--list-dir', default=None,
help='List dir to look for train_images.txt etc. '
'It is the same with --data-dir if not set.')
parser.add_argument('-c', '--classes', default=0, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-mode', type=str, default='step')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained',
default='', type=str, metavar='PATH',
help='use pre-trained model')
parser.add_argument('--save_path', default='', type=str, metavar='PATH',
help='output path for training checkpoints')
parser.add_argument('--save_iter', default=1, type=int,
help='number of training iterations between'
'checkpoint history saves')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--bn-sync', action='store_true')
parser.add_argument('--ms', action='store_true',
help='Turn on multi-scale testing')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('--test-suffix', default='', type=str)
args = parser.parse_args()
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
if args.bn_sync:
drn.BatchNorm = batchnormsync.BatchNormSync
return args
def main():
args = parse_args()
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
if __name__ == '__main__':
main()
| 26,909 | 34.927904 | 102 | py |
drn | drn-master/data_transforms.py | import numbers
import random
import numpy as np
from PIL import Image, ImageOps
import torch
class RandomCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, image, label, *args):
assert label is None or image.size == label.size, \
"image and label doesn't have the same size {} / {}".format(
image.size, label.size)
w, h = image.size
tw, th = self.size
top = bottom = left = right = 0
if w < tw:
left = (tw - w) // 2
right = tw - w - left
if h < th:
top = (th - h) // 2
bottom = th - h - top
if left > 0 or right > 0 or top > 0 or bottom > 0:
label = pad_image(
'constant', label, top, bottom, left, right, value=255)
image = pad_image(
'reflection', image, top, bottom, left, right)
w, h = image.size
if w == tw and h == th:
return (image, label, *args)
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
results = [image.crop((x1, y1, x1 + tw, y1 + th))]
if label is not None:
results.append(label.crop((x1, y1, x1 + tw, y1 + th)))
results.extend(args)
return results
class RandomScale(object):
def __init__(self, scale):
if isinstance(scale, numbers.Number):
scale = [1 / scale, scale]
self.scale = scale
def __call__(self, image, label):
ratio = random.uniform(self.scale[0], self.scale[1])
w, h = image.size
tw = int(ratio * w)
th = int(ratio * h)
if ratio == 1:
return image, label
elif ratio < 1:
interpolation = Image.ANTIALIAS
else:
interpolation = Image.CUBIC
return image.resize((tw, th), interpolation), \
label.resize((tw, th), Image.NEAREST)
class RandomRotate(object):
"""Crops the given PIL.Image at a random location to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, angle):
self.angle = angle
def __call__(self, image, label=None, *args):
assert label is None or image.size == label.size
w, h = image.size
p = max((h, w))
angle = random.randint(0, self.angle * 2) - self.angle
if label is not None:
label = pad_image('constant', label, h, h, w, w, value=255)
label = label.rotate(angle, resample=Image.NEAREST)
label = label.crop((w, h, w + w, h + h))
image = pad_image('reflection', image, h, h, w, w)
image = image.rotate(angle, resample=Image.BILINEAR)
image = image.crop((w, h, w + w, h + h))
return image, label
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __call__(self, image, label):
if random.random() < 0.5:
results = [image.transpose(Image.FLIP_LEFT_RIGHT),
label.transpose(Image.FLIP_LEFT_RIGHT)]
else:
results = [image, label]
return results
class Normalize(object):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std):
self.mean = torch.FloatTensor(mean)
self.std = torch.FloatTensor(std)
def __call__(self, image, label=None):
for t, m, s in zip(image, self.mean, self.std):
t.sub_(m).div_(s)
if label is None:
return image,
else:
return image, label
def pad_reflection(image, top, bottom, left, right):
if top == 0 and bottom == 0 and left == 0 and right == 0:
return image
h, w = image.shape[:2]
next_top = next_bottom = next_left = next_right = 0
if top > h - 1:
next_top = top - h + 1
top = h - 1
if bottom > h - 1:
next_bottom = bottom - h + 1
bottom = h - 1
if left > w - 1:
next_left = left - w + 1
left = w - 1
if right > w - 1:
next_right = right - w + 1
right = w - 1
new_shape = list(image.shape)
new_shape[0] += top + bottom
new_shape[1] += left + right
new_image = np.empty(new_shape, dtype=image.dtype)
new_image[top:top+h, left:left+w] = image
new_image[:top, left:left+w] = image[top:0:-1, :]
new_image[top+h:, left:left+w] = image[-1:-bottom-1:-1, :]
new_image[:, :left] = new_image[:, left*2:left:-1]
new_image[:, left+w:] = new_image[:, -right-1:-right*2-1:-1]
return pad_reflection(new_image, next_top, next_bottom,
next_left, next_right)
def pad_constant(image, top, bottom, left, right, value):
if top == 0 and bottom == 0 and left == 0 and right == 0:
return image
h, w = image.shape[:2]
new_shape = list(image.shape)
new_shape[0] += top + bottom
new_shape[1] += left + right
new_image = np.empty(new_shape, dtype=image.dtype)
new_image.fill(value)
new_image[top:top+h, left:left+w] = image
return new_image
def pad_image(mode, image, top, bottom, left, right, value=0):
if mode == 'reflection':
return Image.fromarray(
pad_reflection(np.asarray(image), top, bottom, left, right))
elif mode == 'constant':
return Image.fromarray(
pad_constant(np.asarray(image), top, bottom, left, right, value))
else:
raise ValueError('Unknown mode {}'.format(mode))
class Pad(object):
"""Pads the given PIL.Image on all sides with the given "pad" value"""
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number)
assert isinstance(fill, numbers.Number) or isinstance(fill, str) or \
isinstance(fill, tuple)
self.padding = padding
self.fill = fill
def __call__(self, image, label=None, *args):
if label is not None:
label = pad_image(
'constant', label,
self.padding, self.padding, self.padding, self.padding,
value=255)
if self.fill == -1:
image = pad_image(
'reflection', image,
self.padding, self.padding, self.padding, self.padding)
else:
image = pad_image(
'constant', image,
self.padding, self.padding, self.padding, self.padding,
value=self.fill)
return (image, label, *args)
class PadImage(object):
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number)
assert isinstance(fill, numbers.Number) or isinstance(fill, str) or \
isinstance(fill, tuple)
self.padding = padding
self.fill = fill
def __call__(self, image, label=None, *args):
if self.fill == -1:
image = pad_image(
'reflection', image,
self.padding, self.padding, self.padding, self.padding)
else:
image = ImageOps.expand(image, border=self.padding, fill=self.fill)
return (image, label, *args)
class ToTensor(object):
"""Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic, label=None):
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic)
else:
# handle PIL Image
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
img = img.float().div(255)
if label is None:
return img,
else:
return img, torch.LongTensor(np.array(label, dtype=np.int))
class Compose(object):
"""Composes several transforms together.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *args):
for t in self.transforms:
args = t(*args)
return args
| 8,825 | 32.05618 | 82 | py |
drn | drn-master/datasets/compute_mean_std.py | import argparse
import json
import numpy as np
from PIL import Image
from os import path as osp
def compute_mean_std(data_dir, list_dir):
image_list_path = osp.join(list_dir, 'train_images.txt')
image_list = [line.strip() for line in open(image_list_path, 'r')]
np.random.shuffle(image_list)
pixels = []
for image_path in image_list[:500]:
image = Image.open(osp.join(data_dir, image_path), 'r')
pixels.append(np.asarray(image).reshape(-1, 3))
pixels = np.vstack(pixels)
mean = np.mean(pixels, axis=0) / 255
std = np.std(pixels, axis=0) / 255
print(mean, std)
info = {'mean': mean.tolist(), 'std': std.tolist()}
with open(osp.join(data_dir, 'info.json'), 'w') as fp:
json.dump(info, fp)
def parse_args():
parser = argparse.ArgumentParser(
description='Compute mean and std of a dataset.')
parser.add_argument('data_dir', default='./', required=True,
help='data folder where train_images.txt resides.')
parser.add_argument('list_dir', default=None, required=False,
help='data folder where train_images.txt resides.')
args = parser.parse_args()
if args.list_dir is None:
args.list_dir = args.data_dir
return args
def main():
args = parse_args()
compute_mean_std(args.data_dir, args.list_dir)
if __name__ == '__main__':
main()
| 1,400 | 30.133333 | 75 | py |
drn | drn-master/datasets/cityscapes/prepare_data.py | from collections import namedtuple
import os
from os.path import join, split, exists
import sys
import numpy as np
from PIL import Image
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
def label2id(image):
array = np.array(image)
out_array = np.empty(array.shape, dtype=array.dtype)
for l in labels:
if 0 <= l.trainId < 255:
out_array[array == l.trainId] = l.id
return Image.fromarray(out_array)
def id2label(image):
array = np.array(image)
out_array = np.empty(array.shape, dtype=array.dtype)
for l in labels:
out_array[array == l.id] = l.trainId
return Image.fromarray(out_array)
def prepare_cityscape_submission(in_dir):
our_dir = in_dir + '_id'
for root, dirs, filenames in os.walk(in_dir):
for name in filenames:
in_path = join(root, name)
out_path = join(root.replace(in_dir, our_dir), name)
file_dir = split(out_path)[0]
if not exists(file_dir):
os.makedirs(file_dir)
image = Image.open(in_path)
id_map = label2id(image)
print('Writing', out_path)
id_map.save(out_path)
def prepare_cityscape_training(in_dir):
for root, dirs, filenames in os.walk(in_dir):
for name in filenames:
parts = name.split('_')
if parts[-1] != 'labelIds.png':
continue
parts[-1] = 'trainIds.png'
out_name = '_'.join(parts)
in_path = join(root, name)
out_path = join(root, out_name)
image = Image.open(in_path)
id_map = id2label(image)
print('Writing', out_path)
id_map.save(out_path)
if __name__ == '__main__':
prepare_cityscape_training(sys.argv[1])
| 8,397 | 60.29927 | 129 | py |
drn | drn-master/lib/test.py | import pdb
import time
import logging
import torch
from torch.autograd import Variable
from torch.autograd import gradcheck
from modules import batchnormsync
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
batchnormsync.BatchNormSync.checking_mode = True
batchnormsync.BatchNormSync.sync = True
cuda = True
batch_size = 3
input = torch.randn(3, 3, 2, 2).float()
# input = torch.Tensor(range(60 * batch_size)).float().resize_(batch_size, 3, 2, 2) / 100
bn = batchnormsync.BatchNormSync(3, eps=0, affine=True,
device_ids=None)
bn2 = torch.nn.BatchNorm2d(3, eps=0, affine=False)
# bn.train()
bn1 = batchnormsync.BatchNormSync(3, eps=0, affine=True, device_ids=[0])
bn1.train()
if cuda:
bn = torch.nn.DataParallel(bn)
bn2 = torch.nn.DataParallel(bn2)
bn = bn.cuda()
bn1 = bn1.cuda()
bn2 = bn2.cuda()
input = input.cuda()
inputs = (Variable(input, requires_grad=True),)
# output = bn(inputs[0])
# output1 = bn1(inputs[0])
# output2 = bn2(inputs[0])
# print((output1 - output2).abs().max())
# print((output - output2).abs().max())
# test = gradcheck(bn, inputs, eps=1e-4, atol=1e-4, rtol=1e-8)
for i in range(1000):
logger.info(i)
start_time = time.time()
test = gradcheck(bn, inputs, eps=1e-4, atol=1e-2, rtol=1e-3)
logger.info('%s %f', test, time.time() - start_time)
| 1,481 | 25.945455 | 89 | py |
drn | drn-master/lib/build.py | import glob
import os
import torch
from torch.utils.ffi import create_extension
this_file = os.path.dirname(__file__)
sources = ['src/batchnormp.c']
headers = ['src/batchnormp.h']
defines = []
with_cuda = False
abs_path = os.path.dirname(os.path.realpath(__file__))
extra_objects = [os.path.join(abs_path, 'dense/batchnormp_kernel.so')]
extra_objects += glob.glob('/usr/local/cuda/lib64/*.a')
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/batchnormp_cuda.c']
headers += ['src/batchnormp_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
ffi = create_extension(
'dense.batch_norm',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects)
if __name__ == '__main__':
ffi.build() | 846 | 23.911765 | 70 | py |
drn | drn-master/lib/functions/batchnormp.py | import pdb
import numpy as np
import torch
from torch.autograd import Function
from dense import batch_norm
from queue import Queue
from threading import Condition
cum_queue = Queue()
broadcast_queue = Queue()
broadcast_cv = Condition()
class BatchNormPFunction(Function):
def __init__(self, running_mean, running_var, training,
cum_queue, broadcast_queue, device_ids, sync,
eps=1e-5, momentum=0.1, affine=True):
self.affine = affine
self.eps = eps
self.momentum = momentum
self.running_mean = running_mean
self.running_var = running_var
self.mean = None
self.var = None
self.training = training
self.cum_queue = cum_queue
self.broadcast_queue = broadcast_queue
self.device_ids = device_ids
self.sync = sync
def forward(self, input, weight, bias):
output = input.new()
self.save_for_backward(input, weight, bias)
# input_t = input.transpose(0, 1).double()
# input_size = input_t.size()
batch_size = int(input.size(0))
# input_t.resize_(int(input_size[0]), int(np.prod(input_size[1:])))
# self.mean = input_t.mean(dim=1)
device_ids = self.device_ids
# print('device', input.get_device(), flush=True)
if input.is_cuda:
# self.mean.copy_(torch.from_numpy(
# self.cum_mean(input.get_device(),
# self.mean.cpu().numpy(),
# batch_size)))
# var = input_t - torch.unsqueeze(self.mean, 1)
# var *= var
# var = var.mean(dim=1)
# total_var = self.cum_mean(
# input.get_device(), var.cpu().numpy(), batch_size)
# self.std = input_t.new().resize_as_(self.mean). \
# copy_(torch.from_numpy(total_var)).sqrt()
mean_cuda = input.new().resize_(input.size(1))
var_cuda = input.new().resize_(input.size(1))
batch_norm.BatchNormalizationP_mean_cuda(input, mean_cuda)
if len(device_ids) > 1 and self.sync and self.training:
mean_cuda.copy_(torch.from_numpy(self.cum_mean(
input.get_device(), mean_cuda.cpu().numpy(), batch_size)))
batch_norm.BatchNormalizationP_var_cuda(input, mean_cuda, var_cuda)
if len(device_ids) > 1 and self.sync and self.training:
var_cuda.copy_(torch.from_numpy(self.cum_mean(
input.get_device(), var_cuda.cpu().numpy(), batch_size)))
else:
# self.std = input_t.std(dim=1, unbiased=False)
batch_norm.BatchNormalizationP_var_cuda(input, mean_cuda, var_cuda)
self.mean = mean_cuda
self.var = var_cuda
if not input.is_cuda:
self.std = input_t.std(dim=1, unbiased=False)
batch_norm.BatchNormalizationP_forward(
input, output, weight, bias,
self.running_mean, self.running_var, self.mean, self.std,
self.training, self.momentum, self.eps)
else:
batch_norm.BatchNormalizationP_forward_cuda(
input, output, weight, bias,
self.running_mean, self.running_var, self.mean, self.var,
self.training, self.momentum, self.eps)
return output
def cum_mean(self, this_device, this_mean, batch_size):
cum_queue.put((batch_size, this_mean))
total_mean = np.zeros(this_mean.shape, dtype=np.float64)
total_batch_size = 0
if this_device == self.device_ids[0]:
for _ in self.device_ids:
item = cum_queue.get()
total_batch_size += item[0]
total_mean += item[0] * item[1]
cum_queue.task_done()
total_mean /= total_batch_size
broadcast_cv.acquire()
for _ in range(len(self.device_ids) - 1):
broadcast_queue.put(total_mean)
broadcast_cv.notify_all()
broadcast_cv.release()
else:
broadcast_cv.acquire()
if broadcast_queue.qsize() == 0:
broadcast_cv.wait()
total_mean = broadcast_queue.get()
broadcast_queue.task_done()
broadcast_cv.release()
# assert cum_queue.empty()
broadcast_queue.join()
return total_mean
def backward(self, grad_output):
input, weight, bias = self.saved_tensors
grad_input = grad_output.new().resize_as_(input)
grad_weight = grad_output.new().resize_as_(weight).zero_()
grad_bias = grad_output.new().resize_as_(bias).zero_()
if not grad_output.is_cuda:
batch_norm.BatchNormalizationP_backward(
input, grad_output, grad_input, grad_weight, grad_bias,
weight, self.running_mean, self.running_var, self.mean,
self.std, self.training, 1, self.eps)
else:
# grad_output_t = grad_output.transpose(0, 1).double()
# batch_size = int(grad_output.size(0))
# grad_output_t.resize_(int(grad_output_t.size(0)),
# int(np.prod(grad_output_t.size()[1:])))
# grad_output_mean = grad_output_t.mean(dim=1)
# device_ids = self.device_ids
# if len(device_ids) > 1 and self.sync:
# grad_output_mean.copy_(torch.from_numpy(
# self.cum_mean(grad_output.get_device(),
# grad_output_mean.cpu().numpy(),
# batch_size)))
# grad_output_mean = grad_output_mean.float()
#
# input_t = input.transpose(0, 1).double()
# input_size = input_t.size()
# input_t.resize_(int(input_size[0]), int(np.prod(input_size[1:])))
# dotP = (input_t - torch.unsqueeze(self.mean.double(), 1)) * \
# grad_output_t
# dotP = dotP.mean(dim=1)
# if len(device_ids) > 1 and self.sync:
# dotP.copy_(torch.from_numpy(
# self.cum_mean(grad_output.get_device(),
# dotP.cpu().numpy(),
# batch_size)))
# dotP = dotP.float()
batch_size = int(grad_output.size(0))
grad_output_mean_cuda = grad_output.new().resize_(grad_output.size(1))
dotP_cuda = grad_output.new().resize_(
grad_output.size(1))
batch_norm.BatchNormalizationP_mean_grad_cuda(
input, grad_output, self.running_mean,
self.mean, grad_output_mean_cuda, dotP_cuda, self.training
)
if len(self.device_ids) > 1 and self.sync:
grad_output_mean_cuda.copy_(torch.from_numpy(
self.cum_mean(grad_output.get_device(),
grad_output_mean_cuda.cpu().numpy(),
batch_size)))
dotP_cuda.copy_(torch.from_numpy(
self.cum_mean(grad_output.get_device(),
dotP_cuda.cpu().numpy(),
batch_size)))
# pdb.set_trace()
batch_norm.BatchNormalizationP_backward_cuda(
input, grad_output, grad_output_mean_cuda, dotP_cuda,
grad_input, grad_weight, grad_bias,
weight, self.running_mean, self.running_var,
self.mean, self.var, self.training, 1, self.eps)
return grad_input, grad_weight, grad_bias
| 7,692 | 41.977654 | 82 | py |
drn | drn-master/lib/functions/__init__.py | 0 | 0 | 0 | py |
|
drn | drn-master/lib/modules/__init__.py | 0 | 0 | 0 | py |
|
drn | drn-master/lib/modules/batchnormsync.py | from queue import Queue
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from functions.batchnormp import BatchNormPFunction
class BatchNormSync(Module):
sync = True
checking_mode = False
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
device_ids=None):
super(BatchNormSync, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.mean = torch.zeros(num_features)
self.std = torch.ones(num_features)
self.reset_parameters()
self.cum_queue = Queue()
self.broadcast_queue = Queue()
if device_ids is None:
self.device_ids = list(range(torch.cuda.device_count()))
else:
self.device_ids = device_ids
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
self.mean.zero_()
self.std.fill_(1)
if self.affine:
if BatchNormSync.checking_mode:
self.weight.data.fill_(1)
else:
self.weight.data.uniform_()
self.bias.data.zero_()
def forward(self, input):
training = int(self.training)
assert input.size(1) == self.num_features
bn_func = BatchNormPFunction(
self.running_mean, self.running_var, # self.mean, self.std,
training, self.cum_queue, self.broadcast_queue, self.device_ids,
BatchNormSync.sync, self.eps, self.momentum, self.affine)
return bn_func(input, self.weight, self.bias)
def __repr__(self):
return ('{name}({num_features}, eps={eps}, momentum={momentum},'
' affine={affine})'
.format(name=self.__class__.__name__, **self.__dict__)) | 2,268 | 34.453125 | 76 | py |
drn | drn-master/lib/dense/__init__.py | 0 | 0 | 0 | py |
|
drn | drn-master/lib/dense/batch_norm/__init__.py |
from torch.utils.ffi import _wrap_function
from ._batch_norm import lib as _lib, ffi as _ffi
__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
locals[symbol] = _wrap_function(fn, _ffi)
__all__.append(symbol)
_import_symbols(locals())
| 309 | 22.846154 | 49 | py |
Reweight-CC | Reweight-CC-master/visualization.py | import numpy as np
from PIL import Image, ImageFont, ImageDraw
import keras.backend as K
from utils import color_correction
BACKGROUND_COLOR = (10, 10, 10, 160)
TEXT_COLOR = BOX_COLOR = (230, 230, 230)
FONT = ImageFont.truetype(font='arial', size=24)
EPSILON = 1E-9
def white_balance(input_img,
global_estimate,
color_correction_matrix=None):
img_wb = input_img.copy()
# normalize the gain, otherwise white-balanced image may suffer from overflowing
global_gain = global_estimate / global_estimate.min()
img_wb *= global_gain.reshape((1, 1, 3))
img_wb = img_wb.clip(0., 1.)
if color_correction_matrix is not None:
img_wb = color_correction(img_wb, color_correction_matrix)
img_wb **= 1./2.2
return img_wb
def generate_local_wb_visualization(input_img,
local_estimates,
global_estimate,
boxes,
remained_boxes_indices=None,
confidences=None,
ground_truth=None,
local_angular_errors=None,
global_angular_error=None,
color_correction_matrix=None):
img_height, img_width, _ = input_img.shape
if remained_boxes_indices is not None:
valid_boxes_indices = np.where(remained_boxes_indices < 12)[0]
local_estimates = local_estimates[valid_boxes_indices, :]
boxes = boxes[valid_boxes_indices, :]
if confidences is not None:
confidences = confidences[valid_boxes_indices]
if local_angular_errors is not None:
local_angular_errors = local_angular_errors[valid_boxes_indices]
local_rgb_estimates = 1. / local_estimates
local_rgb_estimates /= local_rgb_estimates.sum(axis=1, keepdims=True)
global_rgb_estimate = 1. / global_estimate
global_rgb_estimate /= global_rgb_estimate.sum()
nb_valid_patch = local_estimates.shape[0]
local_wb_img = input_img.copy()
for i in range(nb_valid_patch):
# normalize the gains, otherwise white-balanced image may suffer from overflowing
local_gains = local_estimates[i, :] / local_estimates[i, :].min()
local_wb_img[boxes[i, 1]:boxes[i, 3], boxes[i, 0]:boxes[i, 2], :] *= local_gains.reshape((1, 1, 3))
local_wb_img = local_wb_img.clip(0., 1.)
if color_correction_matrix is not None:
local_wb_img = color_correction(local_wb_img, color_correction_matrix)
local_wb_img **= 1./2.2
# add some labels
local_wb_img = Image.fromarray((local_wb_img * 255).astype('uint8'))
draw = ImageDraw.Draw(local_wb_img, 'RGBA')
for i in range(nb_valid_patch):
draw.rectangle([(boxes[i, 0], boxes[i, 1]), (boxes[i, 2], boxes[i, 3])], outline=BOX_COLOR)
text = '[{0:.3f}, {1:.3f}, {2:.3f}]'.format(*local_rgb_estimates[i, :])
w0, h0 = draw.textsize(text, FONT)
draw.rectangle((boxes[i, 0], boxes[i, 1], boxes[i, 0] + w0, boxes[i, 1] + h0), fill=BACKGROUND_COLOR)
draw.text((boxes[i, 0], boxes[i, 1]), text, fill=(230, 230, 230), font=FONT)
if confidences is not None:
text = 'Confidence {0:.3f}'.format(confidences[i])
w1, h1 = draw.textsize(text, FONT)
draw.rectangle((boxes[i, 0], boxes[i, 1] + h0, boxes[i, 0] + w1, boxes[i, 1] + h0 + h1),
fill=BACKGROUND_COLOR)
draw.text((boxes[i, 0], boxes[i, 1] + h0), text, fill=TEXT_COLOR, font=FONT)
else:
w1 = h1 = 0
if local_angular_errors is not None:
text = 'Error: {0:.2f}°'.format(local_angular_errors[i])
w2, h2 = draw.textsize(text, FONT)
draw.rectangle((boxes[i, 0], boxes[i, 1] + h0 + h1, boxes[i, 0] + w2, boxes[i, 1] + h0 + h1 + h2),
fill=BACKGROUND_COLOR)
draw.text((boxes[i, 0], boxes[i, 1] + h0 + h1), text, fill=TEXT_COLOR, font=FONT)
# write the global information on the right bottom corner
text = 'Global estimate: [{0:.3f}, {1:.3f}, {2:.3f}]'.format(*global_rgb_estimate)
w0, h0 = draw.textsize(text, FONT)
draw.rectangle((img_width - w0, img_height - h0, img_width, img_height), fill=BACKGROUND_COLOR)
draw.text((img_width - w0, img_height - h0), text, fill=(230, 230, 230), font=FONT)
if (ground_truth is not None) and (global_angular_error is not None):
text = 'Ground-truth: [{0:.3f}, {1:.3f}, {2:.3f}]'.format(*ground_truth)
w1, h1 = draw.textsize(text, FONT)
draw.rectangle((img_width - w1, img_height - h0 - h1, img_width, img_height - h0), fill=BACKGROUND_COLOR)
draw.text((img_width - w1, img_height - h0 - h1), text, fill=TEXT_COLOR, font=FONT)
text = 'Global error: {0:.2f}°'.format(global_angular_error)
w2, h2 = draw.textsize(text, FONT)
draw.rectangle((img_width - w2, img_height - h0 - h1 - h2, img_width, img_height - h0 - h1), fill=BACKGROUND_COLOR)
draw.text((img_width - w2, img_height - h0 - h1 - h2), text, fill=TEXT_COLOR, font=FONT)
return local_wb_img
def get_intermediate_output(model, input_img, layer_idx):
"""
get the output of an intermediate layer in a keras model
:param model: keras model
:param input_img: input image tensor as Numpy array
:param layer_idx: index of the target layer
:return: output tensor of intermediate layer as Numpy array
"""
if input_img.ndim == 3:
input_img = np.expand_dims(input_img, axis=0) # convert into batch format
f = K.function([model.layers[0].input, K.learning_phase()],
[model.layers[layer_idx].output])
# the second argument denotes the learning phase (0 for test mode and 1 for train mode)
intermediate_output = f([input_img, 0])[0].squeeze()
if intermediate_output.ndim == 2:
intermediate_output = np.expand_dims(intermediate_output, axis=-1).repeat(3, axis=-1)
elif intermediate_output.ndim == 3:
# remain 3 channels with max activations, if the #channels > 3
if intermediate_output.shape[-1] > 3:
max_activation_channels_indices = np.sum(intermediate_output, axis=(0, 1)).argsort()[::-1][:3]
intermediate_output = intermediate_output[:, :, max_activation_channels_indices]
return intermediate_output
def generate_feature_maps_visualization(model,
input_img,
input_feature_maps_names,
reweight_maps_names,
output_feature_maps_names):
"""
generate a mosaic image with intermediate feature maps
:param model: keras model
:param input_img: input image tensor as Numpy array
:param input_feature_maps_names: list of the input feature maps of the ReWU
:param reweight_maps_names: list of the reweight map
:param output_feature_maps_names: list of the output feature maps of the ReWU
:return: the mosaic image as Image object
"""
model_layers_dict = {layer.name: layer for layer in model.layers}
input_feature_maps, reweight_maps, output_feature_maps = dict(), dict(), dict()
for layer_name in model_layers_dict:
layer_idx = list(model_layers_dict.keys()).index(layer_name)
if layer_name in input_feature_maps_names:
input_feature_maps[layer_name] = get_intermediate_output(model, input_img, layer_idx)
elif layer_name in reweight_maps_names:
reweight_maps[layer_name] = get_intermediate_output(model, input_img, layer_idx)
elif layer_name in output_feature_maps_names:
output_feature_maps[layer_name] = get_intermediate_output(model, input_img, layer_idx)
thumbnail_size = list(map(lambda x: x.shape, list(reweight_maps.values())))[-1]
thumbnail_size = (thumbnail_size[1], thumbnail_size[0]) # in [width, height] format
margin = 10
mosaic_img = Image.new('RGB', (3 * thumbnail_size[0] + 4 * margin,
len(output_feature_maps) * (thumbnail_size[1] + margin) + margin))
input_img_thumbnail = Image.fromarray((input_img * 255).astype('uint8'))
input_img_thumbnail.thumbnail(thumbnail_size)
mosaic_img.paste(im=input_img_thumbnail, box=(margin, margin))
h = thumbnail_size[1] + 2 * margin
for layer_name in input_feature_maps:
tmp_map = input_feature_maps[layer_name]
tmp_map = tmp_map.clip(0., None) / tmp_map.clip(0., None).max()
tmp_map = Image.fromarray((tmp_map * 255).astype('uint8'))
tmp_map.thumbnail(thumbnail_size)
mosaic_img.paste(im=tmp_map, box=(0, h))
h += (thumbnail_size[1] + margin)
h = margin
for layer_name in reweight_maps:
tmp_map = reweight_maps[layer_name]
tmp_map = tmp_map.clip(0., None)
tmp_map *= 1 / (np.percentile(tmp_map, 95) + EPSILON)
tmp_map = tmp_map.clip(0., 1.) ** (1 / 2.2) # gamma for better visualization
tmp_map = Image.fromarray((tmp_map * 255).astype('uint8'))
tmp_map.thumbnail(thumbnail_size)
mosaic_img.paste(im=tmp_map, box=(thumbnail_size[0] + 2 * margin, h))
h += (thumbnail_size[1] + margin)
h = margin
for layer_name in output_feature_maps:
tmp_map = output_feature_maps[layer_name]
tmp_map = tmp_map.clip(0., None)
tmp_map *= 1 / (np.percentile(tmp_map, 95) + EPSILON)
tmp_map = tmp_map.clip(0., 1.) ** (1 / 2.2) # gamma for better visualization
tmp_map = Image.fromarray((tmp_map * 255).astype('uint8'))
tmp_map.thumbnail(thumbnail_size)
mosaic_img.paste(im=tmp_map, box=(2 * thumbnail_size[0] + 3 * margin, h))
h += (thumbnail_size[1] + margin)
return mosaic_img
| 9,917 | 50.388601 | 123 | py |
Reweight-CC | Reweight-CC-master/cc.py | # -*- coding: utf-8 -*-
import os
import argparse
parser = argparse.ArgumentParser(description="Read image(s) and perform computational color constancy. "
"See README and paper Color Constancy by Image Feature Maps Reweighting for more details.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("img_path", type=str,
help="dateset direcroty.\n"
"Use wildcard '*' to load all images in the directory (multi-image mode).\n"
"e.g., c:\\foo.jpg or sample_images\\MultiCam\\*")
parser.add_argument("-d", "--dataset", type=str, choices=['MultiCam', 'RECommended'], default='MultiCam',
help="select pre-trained model for MultiCam dataset or ColorChecker RECommended dataset. (default: MultiCam)\n"
"Images in MultiCam dataset are device-independent,\n"
"so models pre-trained on this dataset are also suitable for images from other sources.\n")
parser.add_argument("-l", "--level", type=int, choices=[1, 3], default=3,
help="the number of hierarchical levels. (default: 3)\n")
parser.add_argument("-c", "--confidence",
help="use network with the confidence estimation branch and aggregate local estimates based on their confidence scores.",
action="store_true")
parser.add_argument("-g", "--gamma",
help="apply the inverse gamma correction to the (non-linear) input image(s).\n"
"Turn on this option only if the input image(s) has gone through post-processing (e.g., downloaded from Internet).",
action="store_true")
parser.add_argument("-s", "--save", type=int, choices=[0, 1, 2, 3, 4], default=0,
help="save option. (default: 1)\n"
"-s 0/--save 0: save nothing (only for inference time test).\n"
"-s 1/--save 1: save the corrected image(s) only.\n"
"-s 2/--save 2: save the corrected image(s) as well as the result(s) of the local estimates.\n"
"-s 3/--save 3: save the corrected image(s) as well as the intermediate feature maps (may be slow).\n"
"-s 4/--save 4: save all described above.")
parser.add_argument("-r", "--record",
help="write illuminant estimation results into a text file.",
action="store_true")
parser.add_argument("-b", "--batch", type=int, metavar='N', default=64,
help="-b N/--batch N: batch size (default: 64).\n"
"Decrease it if encounter memory allocations issue.")
args = parser.parse_args()
from timeit import default_timer as timer
import glob
import matplotlib.pyplot as plt
from config import *
from utils import (read_image,
img2batch,
angular_error,
get_ground_truth_dict,
get_masks_dict,
local_estimates_aggregation_naive,
local_estimates_aggregation,
write_records,
write_statistics)
from visualization import *
from model import model_builder
# load configuration based on the pre-trained dataset
dataset_config = get_dataset_config(dataset=args.dataset)
# network architecture selection
model_config = get_model_config(level=args.level, confidence=args.confidence)
# configurations
##############################
DATASET = dataset_config['dataset']
PATCHES = dataset_config['patches'] # the number of square sub-images
PATCH_SIZE = dataset_config['patch_size'] # the size of square sub-image
CONFIDENCE_THRESHOLD = dataset_config['confidence_threshold']
MODEL_DIR = dataset_config['model_dir'] # pre-trained model directory
INPUT_BITS = dataset_config['input_bits'] # bit length of the input image
VALID_BITS = dataset_config['valid_bits'] # valid bit length of the data
DARKNESS = dataset_config['darkness'] # black level
BRIGHTNESS_SCALE = dataset_config['brightness_scale'] # scale image brightness for better visualization
COLOR_CORRECTION_MATRIX = dataset_config['color_correction_matrix']
NETWORK = model_config['network']
PRETRAINED_MODEL = model_config['pretrained_model']
# feature maps names for visualization
INPUT_FEATURE_MAPS_NAMES = model_config['input_feature_maps_names']
REWEIGHT_MAPS_NAMES = model_config['reweight_maps_names']
OUTPUT_FEATURE_MAPS_NAMES = model_config['output_feature_maps_names']
##############################
if os.path.isdir(args.img_path):
args.img_path = os.path.join(args.img_path, '*')
img_dir = os.path.dirname(args.img_path)
# get all image paths
imgs_path = glob.glob(args.img_path)
imgs_path = [i for i in imgs_path if os.path.splitext(i)[1] in ('.png', '.jpg')]
if len(imgs_path) > 1:
multiple_images_mode = True
else:
multiple_images_mode = False
# inverse gamma correction
if args.gamma:
inverse_gamma_correction_mode = True
GAMMA = 2.2
else:
inverse_gamma_correction_mode = False
GAMMA = None
if args.dataset == 'R':
inverse_gamma_correction_mode = False
GAMMA = None
# confidence estimation branch
if args.confidence:
confidence_estimation_mode = True
else:
confidence_estimation_mode = False
# record mode
if args.record:
record_mode = True
record_file_path = os.path.join(img_dir, 'Records_' + NETWORK + '.txt')
else:
record_mode = False
# search ground-truth.txt file which contains the ground-truth illuminant colors of images
ground_truth_path = os.path.join(img_dir, 'ground-truth.txt')
if os.path.exists(ground_truth_path):
ground_truth_mode = True
# ground_truth_dict is a dictionary with image IDs as keys and ground truth colors as values
ground_truth_dict = get_ground_truth_dict(ground_truth_path)
else:
ground_truth_mode = False
ground_truth_dict = None
# search masks.txt file which contains the coordinates to be excluded
masks_path = os.path.join(img_dir, 'masks.txt')
if os.path.exists(masks_path):
# masks_dict is a dictionary with image IDs as keys and coordiantes as values
masks_dict = get_masks_dict(masks_path)
else:
masks_dict = None
# import model and load pre-trained parameters
model = model_builder(level=args.level,
confidence=args.confidence,
input_shape=(*PATCH_SIZE, 3))
network_path = os.path.join(MODEL_DIR, PRETRAINED_MODEL)
model.load_weights(network_path)
print('=' * 110)
print('{network:s} architecture is selected with batch size {batch_size:02d} (pre-trained on {dataset:s} dataset).'.
format(**{'network': NETWORK,
'dataset': DATASET,
'batch_size': args.batch}))
if ground_truth_dict is not None:
print('Ground-truth file found.')
if masks_dict is not None:
print('Masks file found.')
if args.save == 3 or args.save == 4:
print('Generating intermediate feature maps may take long time (>5s/image). Keep your patience.')
# from keras.utils import plot_model # uncomment these 2 lines to plot the model architecture, if needed
# plot_model(model, to_file=os.path.join(model_dir, network+'_architecture.pdf'), show_shapes=True)
# model.summary() # uncomment this line to print model details, if needed
if __name__ == '__main__':
print('Processing started...')
angular_errors_statistics = [] # record angular errors for all test images
inference_times = []
for (counter, img_path) in enumerate(imgs_path):
img_name = os.path.splitext(os.path.basename(img_path))[0] # image name without extension
print(img_name + ':', end=' ', flush=True)
# data generator
batch, boxes, remained_boxes_indices, ground_truth = img2batch(img_path,
patch_size=PATCH_SIZE,
input_bits=INPUT_BITS,
valid_bits=VALID_BITS,
darkness=DARKNESS,
ground_truth_dict=ground_truth_dict,
masks_dict=masks_dict,
gamma=GAMMA)
nb_batch = int(np.ceil(PATCHES / args.batch))
batch_size = int(PATCHES / nb_batch) # actual batch size
local_estimates, confidences = np.empty(shape=(0, 3)), np.empty(shape=(0,))
start_time = timer()
# use batch(es) to feed into the network
for b in range(nb_batch):
batch_start_index, batch_end_index = b * batch_size, (b + 1) * batch_size
batch_tmp = batch[batch_start_index:batch_end_index, ]
if confidence_estimation_mode:
# the model requires 2 inputs when confidence estimation mode is activated
batch_tmp = [batch_tmp, np.zeros((batch_size, 3))]
outputs = model.predict(batch_tmp) # model inference
if confidence_estimation_mode:
# the model produces 6 outputs when confidence estimation mode is on. See model.py for more details
# local_estimates is the gain instead of illuminant color!
local_estimates = np.vstack((local_estimates, outputs[4]))
confidences = np.hstack((confidences, outputs[5].squeeze()))
else:
# local_estimates is the gain instead of illuminant color!
local_estimates = np.vstack((local_estimates, outputs))
confidences = None
if confidence_estimation_mode:
global_estimate = local_estimates_aggregation(local_estimates, confidences)
else:
global_estimate = local_estimates_aggregation_naive(local_estimates)
end_time = timer()
inference_times.append(end_time - start_time)
local_rgb_estimates = 1. / local_estimates # convert gains into rgb triplet
local_rgb_estimates /= local_rgb_estimates.sum(axis=1, keepdims=True)
global_rgb_estimate = 1. / global_estimate # convert gain into rgb triplet
global_rgb_estimate /= global_rgb_estimate.sum()
if ground_truth_mode:
local_angular_errors = angular_error(ground_truth, local_rgb_estimates)
global_angular_error = angular_error(ground_truth, global_rgb_estimate)
angular_errors_statistics.append(global_angular_error)
else:
local_angular_errors = global_angular_error = None
# Save the white balanced image
if args.save in [1, 2, 3, 4]:
img = read_image(img_path=img_path,
input_bits=INPUT_BITS,
valid_bits=VALID_BITS,
darkness=DARKNESS,
gamma=GAMMA)
wb_imgs_path = os.path.join(img_dir, 'white_balanced_images')
if not os.path.exists(wb_imgs_path):
os.mkdir(wb_imgs_path)
wb_img = white_balance(input_img=np.clip(BRIGHTNESS_SCALE*img, 0., 1.),
global_estimate=global_estimate,
color_correction_matrix=COLOR_CORRECTION_MATRIX)
wb_img_path = os.path.join(wb_imgs_path, img_name + '_wb.png')
plt.imsave(wb_img_path, wb_img)
# Save the result of the local estimates
if args.save in [2, 4]:
local_wb_imgs_path = os.path.join(img_dir, 'local_estimates_images')
if not os.path.exists(local_wb_imgs_path):
os.mkdir(local_wb_imgs_path)
local_wb_img = generate_local_wb_visualization(input_img=np.clip(BRIGHTNESS_SCALE*img, 0., 1.),
local_estimates=local_estimates,
global_estimate=global_estimate,
boxes=boxes,
remained_boxes_indices=remained_boxes_indices,
confidences=confidences,
ground_truth=ground_truth,
local_angular_errors=local_angular_errors,
global_angular_error=global_angular_error,
color_correction_matrix=COLOR_CORRECTION_MATRIX)
local_wb_img_path = os.path.join(local_wb_imgs_path, img_name + '_local_estimates.jpg')
local_wb_img.save(local_wb_img_path)
# Save the mosaic image of intermediate feature maps
if args.save in [3, 4]:
mosaic_img_dir = os.path.join(img_dir, 'intermediate_feature_maps')
if not os.path.exists(mosaic_img_dir):
os.mkdir(mosaic_img_dir)
mosaic_img = generate_feature_maps_visualization(model=model,
input_img=img,
input_feature_maps_names=INPUT_FEATURE_MAPS_NAMES,
reweight_maps_names=REWEIGHT_MAPS_NAMES,
output_feature_maps_names=OUTPUT_FEATURE_MAPS_NAMES)
mosaic_img_path = os.path.join(mosaic_img_dir, img_name + '_intermediate_maps.jpg')
mosaic_img.save(mosaic_img_path)
# Record illuminant estimation results into a text file
if args.record:
write_records(record_file_path=record_file_path,
img_path=img_path,
global_estimate=global_estimate,
ground_truth=ground_truth,
global_angular_error=global_angular_error)
print('done. ({0:d}/{1:d})'.format(counter + 1, len(imgs_path)))
# Record overall statistics into a text file
if args.record and ground_truth_mode:
write_statistics(record_file_path, angular_errors_statistics)
if len(inference_times) > 1:
print('Average inference time: {0:.0f}ms/image.'.format(1000 * np.mean(inference_times[1:])))
if args.dataset == 'MultiCam' and ground_truth_mode:
print('Converting linear sRGB values back into individual camera color spaces...')
convert_back(record_file_path)
print('Done. Check the file {0:s}'.format(record_file_path.replace('.txt', '_camcolorspace.txt')))
| 14,919 | 51.167832 | 141 | py |
Reweight-CC | Reweight-CC-master/utils.py | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import path
from PIL import Image
from keras.utils import Sequence
from keras.preprocessing.image import img_to_array, load_img
class DataGenerator(Sequence):
"""Generates data for Keras"""
def __init__(self, list_imgs, labels, batch_size=64, dim=(224, 224, 3), shuffle=True):
self.list_imgs = list_imgs
self.labels = labels
self.batch_size = batch_size
self.dim = dim
self.shuffle = shuffle
self.indexes = None
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.list_imgs) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of images
list_imgs_temp = [self.list_imgs[k] for k in indexes]
# Generate data
x, y = self.__data_generation(list_imgs_temp)
return x, y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.list_imgs))
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_imgs_temp):
"""Generates data containing batch_size samples # X : (n_samples, *dim, n_channels)"""
# Initialization
x = np.empty((self.batch_size, *self.dim))
y = np.empty((self.batch_size, 3), dtype='float32')
# Generate data
for i, img_path in enumerate(list_imgs_temp):
# store image
x[i, ] = img_to_array(load_img(img_path))/255.
# store gains
y[i, ] = self.labels[img_path]
return x, y
def read_image(img_path,
input_bits=8,
valid_bits=8,
darkness=0.,
gamma=None):
"""
read image from disk and return a Numpy array
:param img_path: image path
:param input_bits: the bit length of the input image, usually 8 or 16.
:param valid_bits: the actual bit length of the image, e.g., 8, 10, 12, 14,...
:param darkness: the black level of the input image.
:param gamma: apply inverse gamma correction (gamma=gamma) to the input image.
:return: image data as Numpy array
"""
img = plt.imread(img_path)
if img.dtype == 'uint8':
img = (img * (2 ** input_bits - 1.) - darkness) / (2 ** valid_bits - 1.) / 255.
else:
img = (img * (2 ** input_bits - 1.) - darkness) / (2 ** valid_bits - 1.)
img = img.clip(0., 1.)
if gamma is not None:
img **= gamma
return img
def img2batch(img_path,
patch_size=(224, 224),
input_bits=8,
valid_bits=8,
darkness=0,
ground_truth_dict=None,
masks_dict=None,
gamma=None):
"""
Sample sub-images from the input full-resolution image and return a P*H*W*3 tensor,
where P is the number of sub-images, H and W are height and width of each sub-image.
In this script we fixed all these parameters: P = 18, H = W = 224.
:param img_path: the path of the input full-resolution image.
:param patch_size: the target size of the sub-image.
:param input_bits: the bit length of the input image, usually 8 or 16.
:param valid_bits: the actual bit length of the image, e.g., 8, 10, 12, 14,...
:param darkness: the black level of the input image.
:param ground_truth_dict: the dictionary containing the ground truth illuminant colors.
:param masks_dict: the dictionary containing the coordinates that should be removed from the sub-images.
:param gamma: apply inverse gamma correction (gamma=gamma) to the input image.
:return:
patches: P*H*W*3 tensor of the sub-images as Numpy array
boxes: coordinates of sub-images,
remained_boxes_indices: the indices of sub-images to be visualized
ground_truth: ground truth color of the illuminant in the input image (if provided)
Please see README.md for more detailed information.
"""
n_x, n_y = 4, 3
n_patches = n_x * n_y + (n_x-1) * (n_y-1) # number of sub-images
img = read_image(img_path,
input_bits=input_bits,
valid_bits=valid_bits,
darkness=darkness,
gamma=gamma)
h, w, _ = img.shape
if h > w:
n_x, n_y = n_y, n_x # for portrait mode
patch_size_orig = int(min(w/n_x, h/n_y)) # size of sub-image before resizing
assert patch_size_orig >= 224, "The image size is too small to produce patches with lengths greater than 224px."
x1, y1 = np.meshgrid(range(int((w - patch_size_orig*n_x)/2), int(w - int((w - patch_size_orig*n_x)/2) - 1), patch_size_orig),
range(int((h - patch_size_orig*n_y)/2), int(h - int((h - patch_size_orig*n_y)/2) - 1), patch_size_orig))
x2, y2 = np.meshgrid(range(int((w - patch_size_orig * n_x) / 2 + patch_size_orig/2),
int(w - int((w - patch_size_orig * n_x) / 2 + patch_size_orig/2) - patch_size_orig/2 - 1), patch_size_orig),
range(int((h - patch_size_orig * n_y) / 2 + patch_size_orig/2),
int(h - int((h - patch_size_orig * n_y) / 2 + patch_size_orig/2) - patch_size_orig/2 - 1), patch_size_orig))
x = np.hstack([x1.flatten(), x2.flatten()])
y = np.hstack([y1.flatten(), y2.flatten()])
del x1, y1, x2, y2
# remove sub-images that contains the points recorded in the masks_dict
if (masks_dict is not None) and (os.path.basename(img_path) in masks_dict):
mask_coordinates = masks_dict[os.path.basename(img_path)]
remained_boxes_indices = []
idx = 0
for _x, _y in zip(x, y):
vertices = path.Path([(_x, _y),
(_x + patch_size_orig, _y),
(_x + patch_size_orig, _y + patch_size_orig),
(_x, _y + patch_size_orig)])
isinsquare = vertices.contains_points(mask_coordinates)
if np.all(np.logical_not(isinsquare)):
remained_boxes_indices.append(idx)
idx += 1
remained_boxes_indices = np.array(remained_boxes_indices)
n_patches = len(remained_boxes_indices)
# if the number of remained sub-images is smaller than 3, ignore the masks
if n_patches >= 3:
x, y = x[remained_boxes_indices], y[remained_boxes_indices]
else:
n_patches = n_x * n_y + (n_x-1) * (n_y-1)
remained_boxes_indices = np.array(range(n_patches))
else:
remained_boxes_indices = np.array(range(n_patches))
patches = np.empty((n_patches, *patch_size, 3)) # tensor of sub-images
boxes = np.empty((n_patches, 4), dtype='int16') # coordinate of boxes, in [x0, y0, x0+width, y0+height] format
p = 0
for _x, _y in zip(x, y):
patch = img[_y:_y+patch_size_orig, _x:_x+patch_size_orig, :]
patch = Image.fromarray((patch * 255).astype('uint8'), mode='RGB')
patch.thumbnail(patch_size, Image.BICUBIC) # image resizing
patches[p, ] = (np.array(patch).astype('float32') / 255.).clip(0., 1.)
boxes[p, ] = np.array([_x, _y, _x+patch_size_orig, _y+patch_size_orig])
p += 1
if (ground_truth_dict is not None) and (os.path.basename(img_path) in ground_truth_dict):
ground_truth = ground_truth_dict[os.path.basename(img_path)]
else:
ground_truth = None
return patches, boxes, remained_boxes_indices, ground_truth
def angular_error(ground_truth, prediction):
"""
calculate angular error(s) between the ground truth RGB triplet(s) and the predicted one(s)
:param ground_truth: N*3 or 1*3 Numpy array, each row for one ground truth triplet
:param prediction: N*3 Numpy array, each row for one predicted triplet
:return: angular error(s) in degree as Numpy array
"""
ground_truth_norm = ground_truth / np.linalg.norm(ground_truth, ord=2, axis=-1, keepdims=True)
prediction_norm = prediction / np.linalg.norm(prediction, ord=2, axis=-1, keepdims=True)
return 180 * np.arccos(np.sum(ground_truth_norm * prediction_norm, axis=-1).clip(0., 1.)) / np.pi
def get_ground_truth_dict(ground_truth_path):
"""
read ground-truth illuminant colors from text file, in which each line is in the 'ID r g b' format
:param ground_truth_path: path of the ground-truth.txt file
:return: a dictionary with image IDs as keys and ground truth rgb colors as values
"""
ground_truth_dict = dict()
with open(ground_truth_path) as f:
for line in f:
img_id = line.split('\t')[0]
illuminant_rgb = line.split('\t')[1] # string type
ground_truth_dict[img_id] = np.array([float(char) for char in illuminant_rgb.split(' ')])
return ground_truth_dict
def get_masks_dict(masks_path):
"""
read coordinates from text file, in which each line is in the 'ID x1 x2 x3... y1 y2 y3...' format
:param masks_path: path of the masks.txt file
:return: a dictionary with image IDs as keys and coordinates as values
"""
masks_dict = dict()
with open(masks_path) as f:
for line in f:
img_id = line.split('\t')[0]
coordinates_x = line.split('\t')[1] # string type
coordinates_y = line.split('\t')[2] # string type
coordinates_x = np.array([float(char) for char in coordinates_x.split(' ')]).reshape((-1, 1))
coordinates_y = np.array([float(char) for char in coordinates_y.split(' ')]).reshape((-1, 1))
masks_dict[img_id] = np.hstack([coordinates_x, coordinates_y])
return masks_dict
def local_estimates_aggregation_naive(local_estimates):
"""
aggregate local illuminant color estimates into a global estimate
:param local_estimates: N*3 numpy array, each row is a local estimate in [R_gain, G_gain, B_gain] format
:return: 1*3 numpy array, the aggregated global estimate
"""
local_estimates_norm = local_estimates / np.expand_dims(local_estimates[:, 1], axis=-1)
global_estimate = np.median(local_estimates_norm, axis=0)
return global_estimate / global_estimate.sum()
def local_estimates_aggregation(local_estimates, confidences):
"""
aggregate local illuminant color estimates into a global estimate
Note: the aggregation method here is kind of different from that described in the paper
:param local_estimates: N*3 numpy array, each row is a local estimate in [R_gain, G_gain, B_gain] format
:param confidences: (N, ) numpy array recording the confidence scores of N local patches
:return: 1*3 numpy array, the aggregated global estimate
"""
reliable_patches_indices = np.where((confidences > np.median(confidences)) & (confidences > 0.5))[0]
if confidences.max() > 0.5 and len(reliable_patches_indices) >= 2:
local_estimates_confident = local_estimates[reliable_patches_indices, :]
local_estimates_confident_norm = local_estimates_confident / np.expand_dims(local_estimates_confident[:, 1], axis=-1)
global_estimate = np.median(local_estimates_confident_norm, axis=0)
return global_estimate / global_estimate.sum()
else:
return local_estimates_aggregation_naive(local_estimates)
def color_correction(img, color_correction_matrix):
"""
use color correction matrix to correct image
:param img: image to be corrected
:param color_correction_matrix: 3*3 color correction matrix which has been normalized
such that the sum of each row is equal to 1
:return:
"""
color_correction_matrix = np.asarray(color_correction_matrix)
h, w, _ = img.shape
img = np.reshape(img, (h * w, 3))
img = np.dot(img, color_correction_matrix.T)
return np.reshape(img, (h, w, 3)).clip(0., 1.)
def percentile_mean(x, prctile_lower, prctile_upper):
"""
calculate the mean of elements within a percentile range.
can be used to calculate the 'best x%' or 'worst x%' accuracy of a color constancy algorithm
:param x: input numpy array
:param prctile_lower: the lower limit of the percentile range
:param prctile_upper: the upper limit of the percentile range
:return: the arithmetic mean of elements within the percentile range [prctile_lower, prctile_upper]
"""
if len(x) == 1:
return x[0]
else:
x_sorted = np.sort(x)
element_start_index = int(len(x) * prctile_lower / 100)
element_end_index = int(len(x) * prctile_upper / 100)
return x_sorted[element_start_index:element_end_index].mean()
def write_records(record_file_path,
img_path,
global_estimate,
ground_truth=None,
global_angular_error=None):
"""
write one illuminant estimation entry in to the text file
:param record_file_path: text file path
:param img_path: source image path
:param global_estimate: the estimated illuminant color
:param ground_truth: the ground truth illuminant color
:param global_angular_error: the angular error between ground_truth and global_estimate
:return: None
"""
with open(record_file_path, "a") as f:
f.write("{0}\t".format(os.path.basename(img_path)))
global_rgb_estimate = 1. / global_estimate
global_rgb_estimate /= global_rgb_estimate.sum()
f.write("Global estimate: [{0:.4f}, {1:.4f}, {2:.4f}]\t".format(*global_rgb_estimate))
if (ground_truth is not None) and (global_angular_error is not None):
ground_truth_rgb = ground_truth / ground_truth.sum()
f.write("Ground-truth: [{0:.4f}, {1:.4f}, {2:.4f}]\t".format(*ground_truth_rgb))
f.write("Angular error: {0:.2f}\t".format(global_angular_error))
f.write('\n')
def write_statistics(record_file_path, angular_error_statistics):
"""
write illuminant estimation results in to the text file
:param record_file_path: text file path
:param angular_error_statistics: list of angular errors for all test images
:return: None
"""
angular_error_statistics = np.asarray(angular_error_statistics)
angular_errors_mean = angular_error_statistics.mean()
angular_errors_median = np.median(angular_error_statistics)
angular_errors_trimean = (np.percentile(angular_error_statistics, 25) + 2 * np.median(angular_error_statistics) + np.percentile(angular_error_statistics, 75)) / 4.
angular_errors_best_quarter = percentile_mean(angular_error_statistics, 0, 25)
angular_errors_worst_quarter = percentile_mean(angular_error_statistics, 75, 100)
with open(record_file_path, "a") as f:
f.write("Angular error metrics (in degree):\n"
"mean: {angular_errors_mean:.2f}, "
"median: {angular_errors_median:.2f}, "
"trimean: {angular_errors_trimean:.2f}, "
"best 25%: {angular_errors_best_quarter:.2f}, "
"worst 25%: {angular_errors_worst_quarter:.2f} "
"({nb_imgs:d} images)".format(**{'angular_errors_mean': angular_errors_mean,
'angular_errors_median': angular_errors_median,
'angular_errors_trimean': angular_errors_trimean,
'angular_errors_best_quarter': angular_errors_best_quarter,
'angular_errors_worst_quarter': angular_errors_worst_quarter,
'nb_imgs': len(angular_error_statistics)}))
def convert_back(record_file_path):
"""
convert estimated illuminant colors (and the ground truth, if necessary) back into individual camera color spaces
such that the angular errors could be more comparable with those in other literatures.
THIS FUNCTION WORKS ONLY FOR MULTICAM DATASET!
:param record_file_path: text file path, same as that in write_statistics function
:return: None
"""
record_file_path_cam_colorspace = record_file_path.replace('.txt', '_camcolorspace.txt')
errors_in_cam_colorspace = []
with open(record_file_path, "r") as f:
for line in f:
[img_path, groundtruth, prediction] = get_line_info(line)
if not line: break
ccm = get_ccm(get_camera_model(img_path))
groundtruth_in_cam_colorspace = groundtruth * ccm^(-1)
prediction_in_cam_colorspace = prediction * ccm^(-1)
errors_in_cam_colorspace.append(angular_error(groundtruth_in_cam_colorspace, prediction_in_cam_colorspace))
write_records(record_file_path_cam_colorspace,
img_path,
prediction_in_cam_colorspace,
ground_truth=groundtruth_in_cam_colorspace,
global_angular_error=errors_in_cam_colorspace[-1])
write_statistics(record_file_path_cam_colorspace, errors_in_cam_colorspace)
def get_line_info(line):
s = line.split('\t')
if len(s) != 4:
return None
img_path = s[0]
prediction = [float(x) for x in re.split('(\d+\.?\d*)', s[1])[1:-1:2]]
groundtruth = [float(x) for x in re.split('(\d+\.?\d*)', s[2])[1:-1:2]]
return img_path, groundtruth, prediction
def get_camera_model(img_name):
if '_' in img_name:
camera_model = img_name.split('_')[0]
camera_model = 'Canon5D' if camera_model == 'IMG'
elif '8D5U' in img_name:
camera_model = 'Canon1D'
else:
camera_model = 'Canon550D'
def get_ccm(camera_model):
camera_models = ('Canon5D', 'Canon1D', 'Canon550D', 'Canon1DsMkIII',
'Canon600D', 'FujifilmXM1', 'NikonD5200', 'OlympusEPL6',
'PanasonicGX1', 'SamsungNX2000', 'SonyA57')
if camera_model not in camera_models:
return None
# extracted from dcraw.c
matrices = ((6347,-479,-972,-8297,15954,2480,-1968,2131,7649), # Canon 5D
(4374,3631,-1743,-7520,15212,2472,-2892,3632,8161), # Canon 1Ds
(6941,-1164,-857,-3825,11597,2534,-416,1540,6039), # Canon 550D
(5859,-211,-930,-8255,16017,2353,-1732,1887,7448), # Canon 1Ds Mark III
(6461,-907,-882,-4300,12184,2378,-819,1944,5931), # Canon 600D
(10413,-3996,-993,-3721,11640,2361,-733,1540,6011), # FujifilmXM1
(8322,-3112,-1047,-6367,14342,2179,-988,1638,6394), # Nikon D5200
(8380,-2630,-639,-2887,10725,2496,-627,1427,5438), # Olympus E-PL6
(6763,-1919,-863,-3868,11515,2684,-1216,2387,5879), # Panasonic GX1
(7557,-2522,-739,-4679,12949,1894,-840,1777,5311), # SamsungNX2000
(5991,-1456,-455,-4764,12135,2980,-707,1425,6701)) # Sony SLT-A57
xyz2cam = np.asarray(matrices[camera_models.index(camera_model)])/10000
xyz2cam = xyz2cam.reshape(3, 3).T
linsRGB2XYZ = np.array((0.4124564, 0.3575761, 0.1804375),
(0.2126729, 0.7151522, 0.0721750),
(0.0193339, 0.1191920, 0.9503041))
return xyz2cam.dot(linsRGB2XYZ).inverse.T # camera2linsRGB matrix | 19,374 | 46.839506 | 167 | py |
Reweight-CC | Reweight-CC-master/model.py | import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.models import Model
from keras.engine.topology import Layer
from keras.layers.convolutional import Conv2D
from keras.layers.normalization import BatchNormalization
from keras.layers import (
Input,
Activation,
MaxPooling2D,
GlobalAveragePooling2D,
Concatenate,
Dense,
Dropout,
Lambda,
Multiply
)
from keras.initializers import RandomNormal, Constant, Ones
from keras.losses import mse
from normalization import ChannellNormalization
from scipy.stats import norm
from scipy.integrate import quad
EPSILON = 1E-9
# threshold subtraction layer
class ThresholdLayer(Layer):
def __init__(self, filters, **kwargs):
super(ThresholdLayer, self).__init__(**kwargs)
self.filters = filters
self.initializer = Constant(-self._calc_offset(self.filters))
self.trainable = True
def build(self, input_shape):
self.threshold = self.add_weight(name='threshold',
shape=(self.filters,),
initializer=self.initializer,
trainable=self.trainable)
super(ThresholdLayer, self).build(input_shape)
def call(self, x):
return x + K.reshape(self.threshold, (1, 1, 1, self.filters))
@staticmethod
def _calc_offset(kernel_num):
"""calculate the initialized value of T,
assuming the input is Gaussian distributed along channel axis with mean=0 and std=1 (~N(0, 1))
"""
return quad(lambda x: kernel_num * x * norm.pdf(x, 0, 1) * (1 - norm.cdf(x, 0, 1)) ** (kernel_num - 1),
-np.float('Inf'), np.float('Inf'))[0]
# gain layer
class GainLayer(Layer):
def __init__(self, **kwargs):
super(GainLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.gain = self.add_weight(name='gain',
shape=(1,),
initializer=Ones(),
trainable=True)
super(GainLayer, self).build(input_shape)
def call(self, x, mask=None):
return self.gain * x
def model_builder(level, confidence=False, input_shape=(224, 224, 3)):
if not confidence:
if level == 1:
return hierarchy1(input_shape)
elif level == 2:
return hierarchy2(input_shape)
elif level == 3:
return hierarchy3(input_shape)
else:
if level == 1:
return hierarchy1_confidence(input_shape)
elif level == 2:
return hierarchy2_confidence(input_shape)
elif level == 3:
return hierarchy3_confidence(input_shape)
def _handle_dim_ordering():
"""Keras backend check
"""
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = -1
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _conv_unit(**params):
"""Helper to build a conventional convolution unit
"""
filters = params["filters"]
kernel_size = params.setdefault("kernel_size", (3, 3))
strides = params.setdefault("strides", (1, 1))
kernel_initializer = params.setdefault("kernel_initializer", RandomNormal(mean=0.0, stddev=0.1))
use_bias = params.setdefault("use_bias", True)
bias_initializer = params.setdefault("bias_initializer", "zeros")
padding = params.setdefault("padding", "valid")
pool_bool = params.setdefault("pool_bool", False)
def f(inputs):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
use_bias=use_bias,
bias_initializer=bias_initializer)(inputs)
bnorm = BatchNormalization(axis=-1, epsilon=EPSILON)(conv)
relu = Activation("relu")(bnorm)
if pool_bool:
return MaxPooling2D(pool_size=(2, 2), padding='valid')(relu)
else:
return relu
return f
def _reweight_unit(**params):
"""Helper to build a ReWU
"""
kernel_size = params.setdefault("kernel_size", (1, 1)) # ReWU uses 1*1 convolution kernel
strides = params.setdefault("strides", (1, 1))
kernel_initializer = params.setdefault("kernel_initializer", RandomNormal(mean=0.0, stddev=0.1))
padding = params.setdefault("padding", "valid")
def f(inputs):
input_batch_shape = K.int_shape(inputs)
if 'filters' in params:
filters = params["filters"]
else:
# default: the depth of the output of ReWU is equal to the input
filters = input_batch_shape[CHANNEL_AXIS]
conv = Conv2D(filters=filters, kernel_size=kernel_size, use_bias=False,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer)(inputs)
chnorm = ChannellNormalization(axis=-1)(conv)
threshold = ThresholdLayer(filters=filters)(chnorm)
chmin = Lambda(lambda x: K.min(x, axis=-1, keepdims=True))(threshold)
reweight_map = Activation("relu")(chmin)
reweighted = Multiply()([inputs, reweight_map])
return GainLayer()(reweighted)
return f
def _concat_unit():
"""Helper to build a global average pooling and concatenation unit
"""
def f(inputs):
gap_vectors = list()
for block in inputs:
gap_vectors.append(GlobalAveragePooling2D()(block))
return Concatenate()(gap_vectors)
return f
def _fc_unit(**params):
"""Helper to build a fully-connected unit
"""
units = params["units"]
kernel_initializer = params.setdefault("kernel_initializer", RandomNormal(mean=0.0, stddev=0.02))
bias_initializer = params.setdefault("bias_initializer", "zeros")
relu_bool = params.setdefault("relu_bool", True)
bnorm_bool = params.setdefault("bnorm_bool", False)
dropout_bool = params.setdefault("dropout_bool", False)
softmax_bool = params.setdefault("softmax_bool", False)
sigmoid_bool = params.setdefault("sigmoid_bool", False)
# the final activation layer can only be either softmax or sigmoid
assert not (softmax_bool and sigmoid_bool)
def f(inputs):
fc = Dense(units=units,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)(inputs)
if bnorm_bool:
bnorm = BatchNormalization(axis=-1, epsilon=EPSILON)(fc)
else:
bnorm = fc
if relu_bool:
relu = Activation("relu")(bnorm)
else:
relu = bnorm
if dropout_bool:
out = Dropout(rate=0.4)(relu)
else:
out = relu
if softmax_bool:
return Activation('softmax')(out)
elif sigmoid_bool:
return Activation('sigmoid')(out)
else:
return out
return f
"""
Here are some custom layers that are used to build up models with confidence estimation branches.
Building up a multi-input/output model in Keras is not as neat as building up a single input/output model.
We treat the following metrics as the outputs of the model:
1. angular error
2. mean squared error
3. task error
4. regularization error
During training, (task error + lambda * regularization error) is the final loss to be minimized,
by setting the loss weights to [1, lambda, 0, 0] for these four outputs.
Angular error and mean squared error are for performance evaluation only.
"""
def angular_error_layer(args):
y_true, y_pred = args
p = K.sum(K.l2_normalize(y_true, axis=-1) * K.l2_normalize(y_pred, axis=-1), axis=-1)
p = K.clip(p, EPSILON, 1. - EPSILON)
return 180 * tf.acos(p) / np.pi
def mean_squared_error_layer(args):
y_true, y_pred = args
return mse(y_true, y_pred)
def task_error_layer(args):
y_true, y_pred, conf = args
predictions_adjusted = (conf * y_pred) + ((1 - conf) * y_true)
# task_error_part1 is the basic task error introduced in the paper
task_error_part1 = mse(y_true, predictions_adjusted)
# this mse threshold parameter should be determined by evaluating the naive network
# and find a appropriate value that corresponds to the max allowable angular error
task_err_threshold = K.variable(0.0006)
# task_error_part2 imposes stronger penalties to those sample with task error larger than task_err_threshold
task_error_part2 = 10 * K.relu(task_error_part1 - task_err_threshold)
return task_error_part1 + task_error_part2
def regularization_error_layer(conf):
return -K.log(K.clip(conf, EPSILON, 1. - EPSILON))
# TODO: use an abstracted function to simplify the network architecture constructions
def hierarchy1(input_shape):
"""Builds a custom Hierarchy-1 network.
Args:
input_shape: The input_image shape in the form (nb_rows, nb_cols, nb_channels)
Returns:
The Keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'th':
input_shape = (input_shape[2], input_shape[0], input_shape[1])
input_image = Input(shape=input_shape)
conv_unit1 = _conv_unit(filters=32, strides=(2, 2), use_bias=False)(input_image)
input_norm = BatchNormalization(axis=-1, epsilon=EPSILON)(input_image)
rewu0 = _reweight_unit(filters=16, kernel_initializer=RandomNormal(mean=0.0, stddev=0.06))(input_norm)
rewu1 = _reweight_unit(kernel_initializer=RandomNormal(mean=0.0, stddev=0.05))(conv_unit1)
concat = _concat_unit()([rewu0, rewu1])
fc1 = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.02))(concat)
fc2 = _fc_unit(units=32, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.02))(fc1)
fc3 = _fc_unit(units=16, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.03))(fc2)
estimate = _fc_unit(units=3, softmax_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.06))(fc3)
model = Model([input_image], [estimate])
return model
def hierarchy1_confidence(input_shape):
"""Builds a custom Hierarchy-1 network with confidence estimation branch.
Args:
input_shape: The input_image shape in the form (nb_rows, nb_cols, nb_channels)
Returns:
The Keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'th':
input_shape = (input_shape[2], input_shape[0], input_shape[1])
groundtruth = Input(shape=(3,))
input_image = Input(shape=input_shape)
conv_unit1 = _conv_unit(filters=32, strides=(2, 2), use_bias=False)(input_image)
input_norm = BatchNormalization(axis=-1, epsilon=EPSILON)(input_image)
rewu0 = _reweight_unit(filters=16)(input_norm)
rewu1 = _reweight_unit()(conv_unit1)
concat = _concat_unit()([rewu0, rewu1])
fc1 = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True)(concat)
fc2 = _fc_unit(units=32, bnorm_bool=True, dropout_bool=True)(fc1)
fc3 = _fc_unit(units=16, bnorm_bool=True, dropout_bool=True)(fc2)
estimate = _fc_unit(units=3, softmax_bool=True)(fc3)
# confidence estimation branch
fc1_conf = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True)(concat)
fc2_conf = _fc_unit(units=32, bnorm_bool=True, dropout_bool=True)(fc1_conf)
fc3_conf = _fc_unit(units=16, bnorm_bool=True, dropout_bool=True, relu_bool=False)(fc2_conf)
confidence = _fc_unit(units=1, relu_bool=False, sigmoid_bool=True)(fc3_conf)
mean_squared_error = Lambda(mean_squared_error_layer, output_shape=(1,),
name='mean_squared_error')([groundtruth, estimate])
ang_err = Lambda(angular_error_layer, output_shape=(1,),
name='ang_error')([groundtruth, estimate])
task_err = Lambda(task_error_layer, output_shape=(1,),
name='task_error')([groundtruth, estimate, confidence])
regularization_err = Lambda(regularization_error_layer, output_shape=(1,),
name='regularization_error')(confidence)
model = Model(inputs=[input_image, groundtruth],
outputs=[task_err, regularization_err, ang_err, mean_squared_error, estimate, confidence])
return model
def hierarchy2(input_shape):
"""Builds a custom Hierarchy-2 network.
Args:
input_shape: The input_image shape in the form (nb_rows, nb_cols, nb_channels)
Returns:
The Keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'th':
input_shape = (input_shape[2], input_shape[0], input_shape[1])
input_image = Input(shape=input_shape)
conv_unit1 = _conv_unit(filters=32, strides=(2, 2), use_bias=False)(input_image)
conv_unit2 = _conv_unit(filters=32, use_bias=False)(conv_unit1)
input_norm = BatchNormalization(axis=-1, epsilon=EPSILON)(input_image)
rewu0 = _reweight_unit(filters=16, kernel_initializer=RandomNormal(mean=0.0, stddev=0.06))(input_norm)
rewu1 = _reweight_unit(kernel_initializer=RandomNormal(mean=0.0, stddev=0.05))(conv_unit1)
rewu2 = _reweight_unit(kernel_initializer=RandomNormal(mean=0.0, stddev=0.05))(conv_unit2)
concat = _concat_unit()([rewu0, rewu1, rewu2])
fc1 = _fc_unit(units=128, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.01))(concat)
fc2 = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.02))(fc1)
fc3 = _fc_unit(units=32, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.02))(fc2)
estimate = _fc_unit(units=3, softmax_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.03))(fc3)
model = Model([input_image], [estimate])
return model
def hierarchy2_confidence(input_shape):
"""Builds a custom Hierarchy-3 network with confidence estimation branch.
Args:
input_shape: The input_image shape in the form (nb_rows, nb_cols, nb_channels)
Returns:
The Keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'th':
input_shape = (input_shape[2], input_shape[0], input_shape[1])
groundtruth = Input(shape=(3,))
input_image = Input(shape=input_shape)
conv_unit1 = _conv_unit(filters=32, strides=(2, 2), use_bias=False)(input_image)
conv_unit2 = _conv_unit(filters=32, use_bias=False)(conv_unit1)
input_norm = BatchNormalization(axis=-1, epsilon=EPSILON)(input_image)
rewu0 = _reweight_unit(filters=16)(input_norm)
rewu1 = _reweight_unit()(conv_unit1)
rewu2 = _reweight_unit()(conv_unit2)
concat = _concat_unit()([rewu0, rewu1, rewu2])
fc1 = _fc_unit(units=128, bnorm_bool=True, dropout_bool=True)(concat)
fc2 = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True)(fc1)
fc3 = _fc_unit(units=32, bnorm_bool=True, dropout_bool=True)(fc2)
estimate = _fc_unit(units=3, softmax_bool=True)(fc3)
# confidence estimation branch
fc1_conf = _fc_unit(units=128, bnorm_bool=True, dropout_bool=True)(concat)
fc2_conf = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True)(fc1_conf)
fc3_conf = _fc_unit(units=32, bnorm_bool=True, dropout_bool=True, relu_bool=False)(fc2_conf)
confidence = _fc_unit(units=1, relu_bool=False, sigmoid_bool=True)(fc3_conf)
mean_squared_error = Lambda(mean_squared_error_layer, output_shape=(1,),
name='mean_squared_error')([groundtruth, estimate])
ang_err = Lambda(angular_error_layer, output_shape=(1,),
name='ang_error')([groundtruth, estimate])
task_err = Lambda(task_error_layer, output_shape=(1,),
name='task_error')([groundtruth, estimate, confidence])
regularization_err = Lambda(regularization_error_layer, output_shape=(1,),
name='regularization_error')(confidence)
model = Model(inputs=[input_image, groundtruth],
outputs=[task_err, regularization_err, ang_err, mean_squared_error, estimate, confidence])
return model
def hierarchy3(input_shape):
"""Builds a custom Hierarchy-3 network.
Args:
input_shape: The input_image shape in the form (nb_rows, nb_cols, nb_channels)
Returns:
The Keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'th':
input_shape = (input_shape[2], input_shape[0], input_shape[1])
input_image = Input(shape=input_shape)
conv_unit1 = _conv_unit(filters=32, strides=(2, 2), use_bias=False)(input_image)
conv_unit2 = _conv_unit(filters=32, use_bias=False)(conv_unit1)
conv_unit3 = _conv_unit(filters=64, pool_bool=True, use_bias=False)(conv_unit2)
input_norm = BatchNormalization(axis=-1, epsilon=EPSILON)(input_image)
rewu0 = _reweight_unit(filters=16, kernel_initializer=RandomNormal(mean=0.0, stddev=0.06))(input_norm)
rewu1 = _reweight_unit(kernel_initializer=RandomNormal(mean=0.0, stddev=0.05))(conv_unit1)
rewu2 = _reweight_unit(kernel_initializer=RandomNormal(mean=0.0, stddev=0.05))(conv_unit2)
rewu3 = _reweight_unit(kernel_initializer=RandomNormal(mean=0.0, stddev=0.03))(conv_unit3)
concat = _concat_unit()([rewu0, rewu1, rewu2, rewu3])
fc1 = _fc_unit(units=256, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.01))(concat)
fc2 = _fc_unit(units=128, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.02))(fc1)
fc3 = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.02))(fc2)
estimate = _fc_unit(units=3, softmax_bool=True,
kernel_initializer=RandomNormal(mean=0.0, stddev=0.04))(fc3)
model = Model([input_image], [estimate])
return model
def hierarchy3_confidence(input_shape):
"""Builds a custom Hierarchy-3 network with confidence estimation branch.
Args:
input_shape: The input_image shape in the form (nb_rows, nb_cols, nb_channels)
Returns:
The Keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'th':
input_shape = (input_shape[2], input_shape[0], input_shape[1])
groundtruth = Input(shape=(3,))
input_image = Input(shape=input_shape)
conv_unit1 = _conv_unit(filters=32, strides=(2, 2), use_bias=False)(input_image)
conv_unit2 = _conv_unit(filters=32, use_bias=False)(conv_unit1)
conv_unit3 = _conv_unit(filters=64, pool_bool=True, use_bias=False)(conv_unit2)
input_norm = BatchNormalization(axis=-1, epsilon=EPSILON)(input_image)
rewu0 = _reweight_unit(filters=16)(input_norm)
rewu1 = _reweight_unit()(conv_unit1)
rewu2 = _reweight_unit()(conv_unit2)
rewu3 = _reweight_unit()(conv_unit3)
concat = _concat_unit()([rewu0, rewu1, rewu2, rewu3])
fc1 = _fc_unit(units=256, bnorm_bool=True, dropout_bool=True)(concat)
fc2 = _fc_unit(units=128, bnorm_bool=True, dropout_bool=True)(fc1)
fc3 = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True)(fc2)
estimate = _fc_unit(units=3, softmax_bool=True)(fc3)
# confidence estimation branch
fc1_conf = _fc_unit(units=256, bnorm_bool=True, dropout_bool=True)(concat)
fc2_conf = _fc_unit(units=128, bnorm_bool=True, dropout_bool=True)(fc1_conf)
fc3_conf = _fc_unit(units=64, bnorm_bool=True, dropout_bool=True, relu_bool=False)(fc2_conf)
confidence = _fc_unit(units=1, relu_bool=False, sigmoid_bool=True)(fc3_conf)
mean_squared_error = Lambda(mean_squared_error_layer, output_shape=(1,),
name='mean_squared_error')([groundtruth, estimate])
ang_err = Lambda(angular_error_layer, output_shape=(1,),
name='ang_error')([groundtruth, estimate])
task_err = Lambda(task_error_layer, output_shape=(1,),
name='task_error')([groundtruth, estimate, confidence])
regularization_err = Lambda(regularization_error_layer, output_shape=(1,),
name='regularization_error')(confidence)
model = Model(inputs=[input_image, groundtruth],
outputs=[task_err, regularization_err, ang_err, mean_squared_error, estimate, confidence])
return model
| 21,691 | 39.928302 | 112 | py |
Reweight-CC | Reweight-CC-master/config.py | def get_dataset_config(dataset):
db_config = {'patches': 18,
'patch_size': (224, 224),
'confidence_threshold': 0.5}
if dataset == 'RECommended':
db_config['dataset'] = r'ColorChecker RECommended'
db_config['model_dir'] = r'pretrained_models\RECommended'
db_config['input_bits'] = 16
db_config['valid_bits'] = 12
db_config['darkness'] = 129.
db_config['brightness_scale'] = 4.
db_config['color_correction_matrix'] = [[1.7494, -0.8470, 0.0976],
[-0.1565, 1.4380, -0.2815],
[0.0786, -0.5070, 1.4284]]
elif dataset == 'MultiCam':
db_config['dataset'] = r'MultiCam'
db_config['model_dir'] = r'pretrained_models\MultiCam'
db_config['input_bits'] = 8
db_config['valid_bits'] = 8
db_config['darkness'] = 0.
db_config['brightness_scale'] = 1.
db_config['color_correction_matrix'] = None
return db_config
def get_model_config(level, confidence):
model_config = dict()
if level == 1:
model_config['network'] = r'Hierarchy-1'
model_config['input_feature_maps_names'] = ['activation_1']
model_config['reweight_maps_names'] = ['activation_2', 'activation_3']
model_config['output_feature_maps_names'] = ['multiply_1', 'multiply_2']
model_config['LR'] = 5E-5
elif level == 2:
model_config['network'] = r'Hierarchy-2'
model_config['input_feature_maps_names'] = ['activation_1', 'activation_2']
model_config['reweight_maps_names'] = ['activation_3', 'activation_4', 'activation_5']
model_config['output_feature_maps_names'] = ['multiply_1', 'multiply_2', 'multiply_3']
model_config['LR'] = 4E-5
elif level == 3:
model_config['network'] = r'Hierarchy-3'
model_config['input_feature_maps_names'] = ['activation_1', 'activation_2',
'activation_3']
model_config['reweight_maps_names'] = ['activation_4', 'activation_5',
'activation_6', 'activation_7']
model_config['output_feature_maps_names'] = ['multiply_1', 'multiply_2',
'multiply_3', 'multiply_4']
model_config['LR'] = 4E-5
elif level == 5:
model_config['network'] = r'Hierarchy-5'
model_config['input_feature_maps_names'] = ['activation_1', 'activation_2', 'max_pooling2d_1',
'activation_4', 'activation_5']
model_config['reweight_maps_names'] = ['activation_6', 'activation_7', 'activation_8',
'activation_9', 'activation_10', 'activation_11']
model_config['output_feature_maps_names'] = ['multiply_1', 'multiply_2', 'multiply_3',
'multiply_4', 'multiply_5', 'multiply_6']
model_config['LR'] = 3E-5
if confidence:
model_config['network'] += '-Confidence'
model_config['pretrained_model'] = model_config['network'] + '.h5'
return model_config
| 3,245 | 48.938462 | 102 | py |
Reweight-CC | Reweight-CC-master/normalization.py | import keras.backend as K
from keras.engine import Layer, InputSpec
from keras import initializers, regularizers, constraints
from keras.utils.generic_utils import get_custom_objects
class ChannellNormalization(Layer):
"""Channel normalization layer
Normalize the activations of the previous layer at each step,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
# Arguments
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `InstanceNormalization`.
Setting `axis=None` will normalize all values in each instance of the batch.
Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add threshold of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self,
axis=None,
epsilon=1e-7,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(ChannellNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = len(input_shape)
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
self.input_spec = InputSpec(ndim=ndim)
if self.axis is None:
shape = (1,)
else:
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
mean = K.mean(inputs, axis=self.axis, keepdims=True)
stddev = K.std(inputs, axis=self.axis, keepdims=True) + self.epsilon
normed = (inputs - mean) / stddev
broadcast_shape = [1] * len(input_shape)
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
normed = normed + broadcast_beta
return normed
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(ChannellNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
get_custom_objects().update({'ChannellNormalization': ChannellNormalization}) | 5,866 | 42.459259 | 88 | py |
Reweight-CC | Reweight-CC-master/train.py | import os
import argparse
parser = argparse.ArgumentParser(description="Training networks on RECommended dataset.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-l", "--level", type=int, choices=[1, 2, 3], default=3,
help="Select how many hierarchical levels to utilize.\n"
"-l 1/--level 1: 1-Hierarchy.\n"
"-l 1/--level 2: 2-Hierarchy.\n"
"-l 3/--level 3: 3-Hierarchy (default).\n")
parser.add_argument("-f", "--fold", type=str, metavar='N', default='123',
help="Perform training-validation on specified folds.\n"
"-f 1/--fold 1: use fold 1 as validation set and folds 2,3 as training sets.\n"
"-f 123/--fold 123: 3-fold cross validation (default).\n")
parser.add_argument("-e", "--epoch", type=int, metavar='N', default=500,
help="-e N/--epoch N: determine how many epochs to train. The default is 500.\n"
"Early-stopping will be used, so feel free to increase it.")
parser.add_argument("-b", "--batch", type=int, metavar='N', default=64,
help="-b N/--batch N: manually set the batch size to N. The default is 64.\n"
"Current training session DOES NOT support batch size smaller than 32.")
args = parser.parse_args()
if args.batch < 32:
raise argparse.ArgumentTypeError("Minimum batch size is 32")
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
import tensorflow as tf
import keras.backend as K
from keras import applications
from keras.preprocessing.image import img_to_array, load_img
from keras.optimizers import Nadam
from utils import angular_error, percentile_mean
from config import *
from model import model_builder
# load configuration based on the pre-trained dataset
dataset_config = get_dataset_config(dataset='R')
# network architecture selection
model_config = get_model_config(level=args.level, confidence=False)
# configurations
##############################
DATASET = dataset_config['dataset']
PATCHES = dataset_config['patches'] # the number of square sub-images
PATCH_SIZE = dataset_config['patch_size'] # the size of square sub-image
NETWORK = model_config['network']
LR = model_config['LR']
BATCH_SIZE = args.batch
EPOCHS = args.epoch
FOLDS = [int(f) for f in args.fold]
PATIENCE = 6
MIN_DELTA = 0.01
FACTOR = 0.9
MIN_LR = LR/10.
MAX_LR = LR
EARLY_STOP_PATIENCE = 150
EPSILON = 1E-9
##############################
def get_train_batch():
"""
generate image batch and labels iteratively
Note: we highly recommend using Keras Sequence class to create a data generator, which would be ~1.5x faster than
using this data generation function. However, Keras's 'fit_generator' method does not support callbacks for
validation data, to which end we have to use 'predict_on_batch' method and manually evaluate the accuracies.
:return: image batch as Numpy array, labels as Numpy array, and a bool indicator for continuing generating or stop
"""
global current_train_index
global continue_train
local_index = 0
img_batch = np.zeros(shape=(BATCH_SIZE, *PATCH_SIZE, 3))
label_batch = np.zeros(shape=(BATCH_SIZE, 3))
while local_index < BATCH_SIZE and continue_train:
img_ID = train_img_IDs[current_train_index]
img_batch[local_index] = img_to_array(load_img(img_ID))/255.
label_batch[local_index] = train_labels[img_ID]
local_index += 1
current_train_index += 1
if current_train_index+BATCH_SIZE >= len(train_img_IDs):
continue_train = False
return img_batch, label_batch, continue_train
def get_val_batch():
"""
generate image batch and labels iteratively
Note: the batch size for the validation set is different from BATCH_SIZE in the training phase. We collect all
sub-images from ONE full-resolution image into a batch when evaluating on the validation set. The number of
sub-images for an arbitrary full-resolution image need to be determined dynamically.
:return: image batch as Numpy array, labels as Numpy array, and a bool indicator for continuing generating or stop
"""
global current_val_index
global continue_val
local_index = 0
val_batch_size = 1
current_index = current_val_index
while val_source_img_IDs[current_index+1] == val_source_img_IDs[current_index] and current_index+1 < len(val_img_IDs)-1:
val_batch_size += 1
current_index += 1
img_batch = np.zeros(shape=(val_batch_size, *PATCH_SIZE, 3))
label_batch = np.zeros(shape=(val_batch_size, 3))
while local_index < val_batch_size and continue_val:
img_ID = val_img_IDs[current_val_index]
img_batch[local_index] = img_to_array(load_img(img_ID))/255.
label_batch[local_index] = val_labels[val_img_IDs[current_val_index]]
local_index += 1
current_val_index += 1
if current_val_index+val_batch_size >= len(val_img_IDs):
continue_val = False
return img_batch, label_batch, continue_val
# custom angular error metric
def angular_error_metric(y_true, y_pred):
return 180*tf.acos(K.clip(K.sum(K.l2_normalize(y_true, axis=-1) * K.l2_normalize(y_pred, axis=-1), axis=-1),
EPSILON, 1.-EPSILON))/np.pi
if __name__ == '__main__':
imdb_dir = r'train\RECommended\imdb'
model_dir = r'train\RECommended\models'
logs_dir = os.path.join(model_dir, NETWORK)
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
print('{network:s} architecture is selected with batch size {batch_size:02d}, trained on {dataset:s} dataset.'.
format(**{'network': NETWORK,
'dataset': DATASET,
'batch_size': BATCH_SIZE}))
for current_fold in FOLDS:
print('=' * 40)
print('Cross validation: fold {} started.'.format(current_fold), flush=True)
print('=' * 40)
logs_dir_current_fold = os.path.join(logs_dir, 'fold_{}'.format(current_fold))
if not os.path.exists(logs_dir_current_fold):
os.makedirs(logs_dir_current_fold)
# training data preparation
train_img_IDs = []
train_labels = dict()
train_file = os.path.join(imdb_dir, 'fold_{}_train.txt'.format(current_fold))
with open(train_file) as f:
for line in f:
img_ID = line.split('\t')[0]
train_img_IDs.append(img_ID)
gains = line.split('\t')[3] # string type
train_labels[img_ID] = [float(x) for x in gains.split(' ')] # convert to float type
# validation data preparation
val_img_IDs, val_source_img_IDs = [], []
val_labels = dict()
val_file = os.path.join(imdb_dir, 'fold_{}_val.txt'.format(current_fold))
with open(val_file) as f:
for line in f:
bias_angle = float(line.split('\t')[1])
# for validation, only UNBIASED sub-images will be evaluate
if bias_angle == 0:
img_ID = line.split('\t')[0]
source_img_ID = line.split('\t')[4]
val_img_IDs.append(img_ID)
val_source_img_IDs.append(source_img_ID)
gains = line.split('\t')[3] # string type
val_labels[img_ID] = [float(x) for x in gains.split(' ')] # convert to float type
print('Data is ready. {} sub-images for training, {} for validation.'.format(len(train_img_IDs), len(val_img_IDs)))
if NETWORK == 'Hierarchy-1':
conv_layers_names = ['conv2d_1']
elif NETWORK == 'Hierarchy-2':
conv_layers_names = ['conv2d_1', 'conv2d_2']
elif NETWORK == 'Hierarchy-3':
conv_layers_names = ['conv2d_1', 'conv2d_2', 'conv2d_3']
# load pre-trained weights in Inception-V3
inception_model = applications.InceptionV3()
# a dictionary records the layer name and layer weights in Inception-V3
inception_layers = {layer.name: layer for layer in inception_model.layers}
inception_weights = dict()
for layer_name in conv_layers_names:
inception_weights[layer_name] = inception_layers[layer_name].get_weights()
K.clear_session()
# create a model and initialize with inception_weights
model = model_builder(level=args.level,
input_shape=(*PATCH_SIZE, 3))
model_layers = {layer.name: layer for layer in model.layers}
for layer_name in conv_layers_names:
idx = list(model_layers.keys()).index(layer_name)
model.layers[idx].set_weights(inception_weights[layer_name])
print('Initialize {0} layer with weights in Inception v3.'.format(layer_name))
model.compile(loss='mse',
optimizer=Nadam(lr=LR),
metrics=[angular_error_metric])
model.summary()
# uncomment following lines to plot the model architecture
# from keras.utils import plot_model
# plot_model(model, to_file=os.path.join(logs_dir, 'architecture.pdf'), show_shapes=True)
# figure preparation
fig = plt.figure()
ax_mse = fig.add_subplot(111)
ax_ang = ax_mse.twinx()
eps = []
history_train_mse, history_train_angular_errors = [], []
history_val_mean_angular_errors, history_val_median_angular_errors = [], []
min_median_angular_error = float('inf')
for current_epoch in range(1, EPOCHS + 1):
start_time = timer()
print('=' * 60)
print('Epoch {}/{} started.'.format(current_epoch, EPOCHS))
# learning rate decrease
if len(history_val_median_angular_errors) > PATIENCE:
if np.min(history_val_median_angular_errors[-PATIENCE:]) > history_val_median_angular_errors[-PATIENCE-1] - MIN_DELTA:
old_lr = float(K.get_value(model.optimizer.lr))
new_lr = max(old_lr * FACTOR, MIN_LR)
K.set_value(model.optimizer.lr, new_lr)
# learning rate increase
if len(history_val_median_angular_errors) > PATIENCE * 10:
if np.min(history_val_median_angular_errors[-PATIENCE*10:]) > history_val_median_angular_errors[-PATIENCE*10-1] - MIN_DELTA:
old_lr = float(K.get_value(model.optimizer.lr))
new_lr = min(old_lr * 2, MAX_LR)
K.set_value(model.optimizer.lr, new_lr)
print('Learning rate increased!')
print('Learning rate in current epoch: {0:.2e}'.format(float(K.get_value(model.optimizer.lr))))
train_mse, train_angular_errors = [], []
val_angular_errors = []
current_train_index = 0
current_val_index = 0
continue_train = True
continue_val = True
indices = np.arange(len(train_img_IDs))
np.random.shuffle(indices)
train_img_IDs = [train_img_IDs[i] for i in indices]
# training phase
while continue_train:
b, l, continue_train = get_train_batch()
logs = model.train_on_batch(b, l)
train_mse.append(logs[0])
train_angular_errors.append(logs[1])
# validation phase
while continue_val:
b, l, continue_val = get_val_batch()
if b.shape[0] > 4: # only test on images with more than 4 crops
estimates = model.predict_on_batch(b)
estimates /= estimates[:, 1][:, np.newaxis]
estimates = np.median(estimates, axis=0)
val_angular_errors.append(angular_error(l[0, ], estimates))
else:
pass
mean_val_angular_error_current_epoch = np.mean(val_angular_errors)
median_val_angular_error_current_epoch = np.median(val_angular_errors)
tri_val_angular_error_current_epoch = (np.percentile(val_angular_errors, 25) +
2 * np.median(val_angular_errors) +
np.percentile(val_angular_errors, 75)) / 4.
b25_val_angular_error_current_epoch = percentile_mean(np.array(val_angular_errors), 0, 25)
w25_val_angular_error_current_epoch = percentile_mean(np.array(val_angular_errors), 75, 100)
print('MSE on training set: {0:.5f}(mean), {1:.5f}(median)'.format(np.mean(train_mse),
np.median(train_mse)))
print('Angular error on training set: {0:.3f}(mean), {1:.3f}(median)'.format(np.mean(train_angular_errors),
np.median(train_angular_errors)))
print('Monitored angular error on validation set: {0:.3f}(mean), {1:.3f}(median), {2:.3f}(tri), {3:.3f}(best 25), {4:.3f}(worst 25)'
.format(mean_val_angular_error_current_epoch,
median_val_angular_error_current_epoch,
tri_val_angular_error_current_epoch,
b25_val_angular_error_current_epoch,
w25_val_angular_error_current_epoch))
# historical records
history_train_mse.append(np.mean(train_mse))
history_train_angular_errors.append(np.mean(train_angular_errors))
history_val_mean_angular_errors.append(mean_val_angular_error_current_epoch)
history_val_median_angular_errors.append(median_val_angular_error_current_epoch)
# plot the loss
eps.append(current_epoch)
mse_train_line, = ax_mse.plot(eps, history_train_mse, 'r--')
ax_mse.set_xlabel('Epoch')
ax_mse.set_ylabel('MSE loss', color='r')
ax_mse.tick_params('y', colors='r')
train_angular_error_line, = ax_ang.plot(eps, history_train_angular_errors, 'b--')
val_mean_angular_error_line, = ax_ang.plot(eps, history_val_mean_angular_errors, 'b-')
val_median_angular_error_line, = ax_ang.plot(eps, history_val_median_angular_errors, 'b:')
ax_ang.set_ylabel('Angular loss', color='b')
ax_ang.tick_params('y', colors='b')
plt.legend((mse_train_line,
train_angular_error_line, val_mean_angular_error_line, val_median_angular_error_line),
('MSE training loss', 'Angular training loss',
'Angular validation loss (mean)', 'Angular validation loss (median)'))
plt.savefig(os.path.join(logs_dir_current_fold, "train_history.pdf"))
# save model
if median_val_angular_error_current_epoch < min_median_angular_error:
min_median_angular_error = median_val_angular_error_current_epoch
model.save_weights(os.path.join(logs_dir_current_fold,
'epoch{epoch:02d}_'
'{mean_angular_error:.3f}(mean)_'
'{median_angular_error:.3f}(median)_'
'{tri_angular_error:.3f}(tri)_'
'{b25_angular_error:.3f}(b25)_'
'{w25_angular_error:.3f}(w25).h5').format(
**{'epoch': current_epoch,
'mean_angular_error': mean_val_angular_error_current_epoch,
'median_angular_error': median_val_angular_error_current_epoch,
'tri_angular_error': tri_val_angular_error_current_epoch,
'b25_angular_error': b25_val_angular_error_current_epoch,
'w25_angular_error': w25_val_angular_error_current_epoch}))
end_time = timer()
print('{0:.0f}s elapsed'.format(end_time-start_time))
# early-stopping
if len(history_val_median_angular_errors) > EARLY_STOP_PATIENCE:
if np.min(history_val_median_angular_errors[-EARLY_STOP_PATIENCE:]) > history_val_median_angular_errors[-EARLY_STOP_PATIENCE - 1] - MIN_DELTA:
print('No improvement detected. Stop the training.')
print('=' * 60)
break
K.clear_session()
| 16,729 | 48.643917 | 158 | py |
Reweight-CC | Reweight-CC-master/cvmerge.py | # -*- coding: utf-8 -*-
import os
import re
import argparse
parser = argparse.ArgumentParser(description="Merge cross validation results.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("cv_dir", type=str,
help="The directory containing models for cross validation. "
"e.g., train\\RECommended\\models\\Hierarchy-1")
args = parser.parse_args()
import numpy as np
import glob
from model import model_builder
from config import get_dataset_config
from utils import (get_ground_truth_dict,
get_masks_dict,
img2batch,
angular_error,
local_estimates_aggregation_naive,
local_estimates_aggregation,
percentile_mean)
# load configuration based on the pre-trained dataset
dataset_config = get_dataset_config(dataset='R')
# configurations
##############################
DATASET = dataset_config['dataset']
PATCHES = dataset_config['patches'] # the number of square sub-images
PATCH_SIZE = dataset_config['patch_size'] # the size of square sub-image
CONFIDENCE_THRESHOLD = dataset_config['confidence_threshold']
MODEL_DIR = dataset_config['model_dir'] # pre-trained model directory
INPUT_BITS = dataset_config['input_bits'] # bit length of the input image
VALID_BITS = dataset_config['valid_bits'] # valid bit length of the data
DARKNESS = dataset_config['darkness'] # black level
COLOR_CORRECTION_MATRIX = dataset_config['color_correction_matrix']
GAMMA = None
BATCH_SIZE = 64
NB_FOLDS = 3
##############################
def search_best_epoch(models_dir):
min_median_angular_error = float('inf')
best_model_dir = None
trained_models = glob.glob(models_dir + '/*.h5')
for model_name in trained_models:
current_median_angular_error = float(re.search('\(mean\)_(.*)\(median\)', model_name).group(1))
if current_median_angular_error <= min_median_angular_error:
min_median_angular_error = current_median_angular_error
best_model_dir = model_name
return best_model_dir
def inference(model_level, model_dir, test_img_IDs):
confidence_estimation_mode = False
model = model_builder(level=model_level,
confidence=False,
input_shape=(*PATCH_SIZE, 3))
model.load_weights(model_dir)
ground_truth_dict = get_ground_truth_dict(r'train\RECommended\ground-truth.txt')
masks_dict = get_masks_dict(r'train\RECommended\masks.txt')
angular_errors_statistics = []
for (counter, test_img_ID) in enumerate(test_img_IDs):
print('Processing {}/{} images...'.format(counter + 1, len(test_img_IDs)), end='\r')
# data generator
batch, boxes, remained_boxes_indices, ground_truth = img2batch(test_img_ID,
patch_size=PATCH_SIZE,
input_bits=INPUT_BITS,
valid_bits=VALID_BITS,
darkness=DARKNESS,
ground_truth_dict=ground_truth_dict,
masks_dict=masks_dict,
gamma=GAMMA)
nb_batch = int(np.ceil(PATCHES / BATCH_SIZE))
batch_size = int(PATCHES / nb_batch) # actual batch size
local_estimates, confidences = np.empty(shape=(0, 3)), np.empty(shape=(0,))
# use batch(es) to feed into the network
for b in range(nb_batch):
batch_start_index, batch_end_index = b * batch_size, (b + 1) * batch_size
batch_tmp = batch[batch_start_index:batch_end_index, ]
if confidence_estimation_mode:
# the model requires 2 inputs when confidence estimation mode is activated
batch_tmp = [batch_tmp, np.zeros((batch_size, 3))]
outputs = model.predict(batch_tmp) # model inference
if confidence_estimation_mode:
# the model produces 6 outputs when confidence estimation mode is on. See model.py for more details
# local_estimates is the gain instead of illuminant color!
local_estimates = np.vstack((local_estimates, outputs[4]))
confidences = np.hstack((confidences, outputs[5].squeeze()))
else:
# local_estimates is the gain instead of illuminant color!
local_estimates = np.vstack((local_estimates, outputs))
confidences = None
if confidence_estimation_mode:
global_estimate = local_estimates_aggregation(local_estimates, confidences)
else:
global_estimate = local_estimates_aggregation_naive(local_estimates)
global_rgb_estimate = 1. / global_estimate # convert gain into rgb triplet
global_angular_error = angular_error(ground_truth, global_rgb_estimate)
angular_errors_statistics.append(global_angular_error)
return np.array(angular_errors_statistics)
def cross_validation_collect(cross_validation_dir):
assert 'Hierarchy-' in cross_validation_dir
fold_dirs = glob.glob(cross_validation_dir + '/*')
assert set([os.path.join(cross_validation_dir, 'fold_{}'.format(i)) for i in range(1, NB_FOLDS + 1)]) <= set(fold_dirs)
hierarchical_level = int(re.search('Hierarchy-(\d)', cross_validation_dir).group(1))
cv_statistics = np.empty((NB_FOLDS, 5)) # 5 metrics: mean, med, tri, b25, w25
for current_fold in range(1, NB_FOLDS+1):
print('Fold {}/{} started.'.format(current_fold, NB_FOLDS))
current_fold_dir = os.path.join(cross_validation_dir, 'fold_{}'.format(current_fold))
best_model_dir = search_best_epoch(current_fold_dir)
test_img_list_path = r'train\RECommended\imdb\fold_{}_val.txt'.format(current_fold)
test_img_IDs = []
with open(test_img_list_path) as f:
for line in f:
test_img_IDs.append(line.split('\t')[-1].rstrip())
test_img_IDs = list(set(test_img_IDs))
angular_errors = inference(model_level=hierarchical_level,
model_dir=best_model_dir,
test_img_IDs=test_img_IDs)
current_fold_statistics = [np.mean(angular_errors),
np.median(angular_errors),
(np.percentile(angular_errors, 25) + 2 * np.median(angular_errors) + np.percentile(angular_errors, 75)) / 4.,
percentile_mean(angular_errors, 0, 25),
percentile_mean(angular_errors, 75, 100)]
cv_statistics[current_fold - 1, :] = np.array(current_fold_statistics)
print('Validation results for fold {0}: '
'{1:.3f}(mean), {2:.3f}(median), {3:.3f}(tri), {4:.3f}(best 25), {5:.3f}(worst 25)'.
format(current_fold, *current_fold_statistics))
print('=' * 60)
with open(os.path.join(cross_validation_dir, 'cv_results.txt'), "a") as f:
f.write('Validation results for fold {0}: '
'{1:.3f}(mean), {2:.3f}(median), {3:.3f}(tri), {4:.3f}(best 25), {5:.3f}(worst 25)\n'.
format(current_fold, *current_fold_statistics))
cv_results = np.mean(cv_statistics, axis=0)
with open(os.path.join(cross_validation_dir, 'cv_results.txt'), "a") as f:
f.write('=' * 40 + '\n')
f.write('Cross validation result: '
'{0:.2f}(mean), {1:.2f}(median), {2:.2f}(tri), {3:.2f}(best 25), {4:.2f}(worst 25)\n'.format(*cv_results))
return cv_results
if __name__ == '__main__':
network = os.path.split(args.cv_dir)[1]
print('Merge cross validation statistics for {} model'.format(network))
print('=' * 60)
cv_results = cross_validation_collect(args.cv_dir)
print('\nCross validation result for {0} model: '
'{1:.2f}(mean), {2:.2f}(median), {3:.2f}(tri), {4:.2f}(best 25), {5:.2f}(worst 25)'.format(network,
*cv_results))
| 8,431 | 49.190476 | 144 | py |
nosnoc_py | nosnoc_py-main/setup.py | from distutils.core import setup
setup(name='nosnoc',
version='0.1',
python_requires='>=3.7',
description='Nonsmooth Numerical Optimal Control for Python',
# url='',
author='Jonathan Frey, Armin Nurkanovic',
# use_scm_version={
# "fallback_version": "0.1-local",
# "root": "../..",
# "relative_to": __file__
# },
license='BSD',
# packages = find_packages(),
include_package_data = True,
py_modules=[],
setup_requires=['setuptools_scm'],
install_requires=[
'numpy>=1.20.0,<2.0.0',
'scipy',
'casadi<=3.6',
'matplotlib',
]
)
| 604 | 22.269231 | 64 | py |
nosnoc_py | nosnoc_py-main/examples/cart_pole_with_friction/parametric_cart_pole_with_friction.py | import numpy as np
from casadi import SX, horzcat, vertcat, cos, sin, inv
import matplotlib.pyplot as plt
import nosnoc
def solve_paramteric_example(with_global_var=False):
# opts
opts = nosnoc.NosnocOpts()
opts.irk_scheme = nosnoc.IrkSchemes.RADAU_IIA
opts.n_s = 2
opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.SUPERLINEAR
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_DELTA
opts.equidistant_control_grid = True
opts.N_stages = 50 # number of control intervals
opts.N_finite_elements = 2 # number of finite element on every control intevral
opts.terminal_time = 4.0 # Time horizon
opts.print_level = 1
# faster options
opts.N_stages = 15 # number of control intervals
## Model defintion
q = SX.sym('q', 2)
v = SX.sym('v', 2)
x = vertcat(q, v)
u = SX.sym('u') # control
## parametric version:
# masses
m1 = SX.sym('m1') # cart
m2 = SX.sym('m2') # link
x_ref = SX.sym('x_ref', 4)
u_ref = SX.sym('u_ref', 1)
x_ref_val = np.array([0, 180 / 180 * np.pi, 0, 0]) # end upwards
u_ref_val = np.array([0.0])
p_time_var = vertcat(x_ref, u_ref)
p_time_var_val = np.tile(np.concatenate((x_ref_val, u_ref_val)), (opts.N_stages, 1))
if with_global_var:
p_global = vertcat(m2)
p_global_val = np.array([0.1])
v_global = m1
lbv_global = np.array([1.0])
ubv_global = np.array([100.0])
v_global_guess = np.array([1.5])
else:
p_global = vertcat(m1, m2)
p_global_val = np.array([1.0, 0.1])
v_global = SX.sym("v_global", 0, 1)
lbv_global = np.array([])
ubv_global = np.array([])
v_global_guess = np.array([])
# actually vary x_ref theta entry over time
# p_ind_theta = 1
# p_time_var_val[:, p_ind_theta] = np.linspace(0.0, np.pi, opts.N_stages)
link_length = 1
g = 9.81
# Inertia matrix
M = vertcat(horzcat(m1 + m2, m2 * link_length * cos(q[1])),
horzcat(m2 * link_length * cos(q[1]), m2 * link_length**2))
# Coriolis force
C = SX.zeros(2, 2)
C[0, 1] = -m2 * link_length * v[1] * sin(q[1])
# all forces = Gravity+Control+Coriolis (+Friction)
f_all = vertcat(u, -m2 * g * link_length * sin(x[1])) - C @ v
# friction between cart and ground
F_friction = 2
# Dynamics with $ v > 0$
f_1 = vertcat(v, inv(M) @ (f_all - vertcat(F_friction, 0)))
# Dynamics with $ v < 0$
f_2 = vertcat(v, inv(M) @ (f_all + vertcat(F_friction, 0)))
F = [horzcat(f_1, f_2)]
# switching function (cart velocity)
c = [v[0]]
# Sign matrix # f_1 for c=v>0, f_2 for c=v<0
S = [np.array([[1], [-1]])]
# specify initial and end state, cost ref and weight matrix
x0 = np.array([1, 0 / 180 * np.pi, 0, 0]) # start downwards
Q = np.diag([1.0, 100.0, 1.0, 1.0])
Q_terminal = np.diag([100.0, 100.0, 10.0, 10.0])
R = 1.0
# bounds
ubx = np.array([5.0, 240 / 180 * np.pi, 20.0, 20.0])
lbx = np.array([-0.0, -240 / 180 * np.pi, -20.0, -20.0])
u_max = 30.0
# Stage cost
f_q = (x - x_ref).T @ Q @ (x - x_ref) + (u - u_ref).T @ R @ (u - u_ref)
# terminal cost
f_terminal = (x - x_ref).T @ Q_terminal @ (x - x_ref)
g_terminal = []
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=x0, u=u,
p_global=p_global, p_global_val=p_global_val,
p_time_var=p_time_var,
v_global=v_global
)
lbu = -np.array([u_max])
ubu = np.array([u_max])
ocp = nosnoc.NosnocOcp(lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_terminal, g_terminal=g_terminal,
lbx=lbx, ubx=ubx, lbv_global=lbv_global, ubv_global=ubv_global, v_global_guess=v_global_guess)
# create solver
solver = nosnoc.NosnocSolver(opts, model, ocp)
# set / update parameters
solver.set('p_time_var', p_time_var_val)
solver.set('p_global', p_global_val)
# solve OCP
results = solver.solve()
return results
def main():
results = solve_paramteric_example()
plot_results(results)
def plot_results(results):
nosnoc.latexify_plot()
x_traj = np.array(results["x_traj"])
plt.figure()
# states
plt.subplot(3, 1, 1)
plt.plot(results["t_grid"], x_traj[:, 0], label='$q_1$ - cart')
plt.plot(results["t_grid"], x_traj[:, 1], label='$q_2$ - pole')
plt.legend()
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(results["t_grid"], x_traj[:, 2], label='$v_1$ - cart')
plt.plot(results["t_grid"], x_traj[:, 3], label='$v_2$ - pole')
plt.legend()
plt.grid()
# controls
plt.subplot(3, 1, 3)
plt.step(results["t_grid_u"], [results["u_traj"][0]] + results["u_traj"], label='u')
plt.legend()
plt.grid()
plt.show()
if __name__ == "__main__":
main()
| 4,896 | 29.042945 | 121 | py |
nosnoc_py | nosnoc_py-main/examples/cart_pole_with_friction/cart_pole_with_friction.py | import numpy as np
from casadi import SX, horzcat, vertcat, cos, sin, inv
import matplotlib.pyplot as plt
import nosnoc
def solve_example():
# opts
opts = nosnoc.NosnocOpts()
opts.irk_scheme = nosnoc.IrkSchemes.RADAU_IIA
opts.n_s = 2
opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.SUPERLINEAR
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_DELTA
# opts.N_stages = 50 # MATLAB setting
opts.N_stages = 15 # number of control intervals
opts.N_finite_elements = 2 # number of finite element on every control intevral
opts.terminal_time = 4.0 # Time horizon
opts.print_level = 1
## Model defintion
q = SX.sym('q', 2)
v = SX.sym('v', 2)
x = vertcat(q, v)
u = SX.sym('u') # control
m1 = 1 # cart
m2 = 0.1 # link
link_length = 1
g = 9.81
# Inertia matrix
M = vertcat(horzcat(m1 + m2, m2 * link_length * cos(q[1])),
horzcat(m2 * link_length * cos(q[1]), m2 * link_length**2))
# Coriolis force
C = SX.zeros(2, 2)
C[0, 1] = -m2 * link_length * v[1] * sin(q[1])
# all forces = Gravity+Control+Coriolis (+Friction)
f_all = vertcat(u, -m2 * g * link_length * sin(x[1])) - C @ v
# friction between cart and ground
F_friction = 2
# Dynamics with $ v > 0$
f_1 = vertcat(v, inv(M) @ (f_all - vertcat(F_friction, 0)))
# Dynamics with $ v < 0$
f_2 = vertcat(v, inv(M) @ (f_all + vertcat(F_friction, 0)))
F = [horzcat(f_1, f_2)]
# switching function (cart velocity)
c = [v[0]]
# Sign matrix # f_1 for c=v>0, f_2 for c=v<0
S = [np.array([[1], [-1]])]
# specify initial and end state, cost ref and weight matrix
x0 = np.array([1, 0 / 180 * np.pi, 0, 0]) # start downwards
x_ref = np.array([0, 180 / 180 * np.pi, 0, 0]) # end upwards
Q = np.diag([1.0, 100.0, 1.0, 1.0])
Q_terminal = np.diag([100.0, 100.0, 10.0, 10.0])
R = 1.0
# bounds
ubx = np.array([5.0, 240 / 180 * np.pi, 20.0, 20.0])
lbx = np.array([-0.0, -240 / 180 * np.pi, -20.0, -20.0])
u_max = 30.0
u_ref = 0.0
# Stage cost
f_q = (x - x_ref).T @ Q @ (x - x_ref) + (u - u_ref).T @ R @ (u - u_ref)
# terminal cost
f_terminal = (x - x_ref).T @ Q_terminal @ (x - x_ref)
g_terminal = []
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=x0, u=u)
lbu = -np.array([u_max])
ubu = np.array([u_max])
ocp = nosnoc.NosnocOcp(lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_terminal, g_terminal=g_terminal,
lbx=lbx, ubx=ubx)
## Solve OCP
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
return results
def main():
results = solve_example()
plot_results(results)
def plot_results(results):
nosnoc.latexify_plot()
x_traj = np.array(results["x_traj"])
plt.figure()
# states
plt.subplot(3, 1, 1)
plt.plot(results["t_grid"], x_traj[:, 0], label='$q_1$ - cart')
plt.plot(results["t_grid"], x_traj[:, 1], label='$q_2$ - pole')
plt.legend()
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(results["t_grid"], x_traj[:, 2], label='$v_1$ - cart')
plt.plot(results["t_grid"], x_traj[:, 3], label='$v_2$ - pole')
plt.legend()
plt.grid()
# controls
plt.subplot(3, 1, 3)
plt.step(results["t_grid_u"], [results["u_traj"][0]] + results["u_traj"], label='u')
plt.legend()
plt.grid()
plt.show()
if __name__ == "__main__":
main()
| 3,487 | 26.904 | 99 | py |
nosnoc_py | nosnoc_py-main/examples/oscillator/integration_order_experiment_oscillator.py | from matplotlib import pyplot as plt
import numpy as np
from oscillator_example import get_oscillator_model, X_SOL, TSIM
import nosnoc
import pickle
import os
import itertools
BENCHMARK_DATA_PATH = 'private_oscillator_benchmark_data'
SCHEMES = [nosnoc.IrkSchemes.GAUSS_LEGENDRE, nosnoc.IrkSchemes.RADAU_IIA]
NS_VALUES = [1, 2, 3, 4]
NFE_VALUES = [2]
NSIM_VALUES = [1, 5, 9, 10, 20, 40, 60, 100]
USE_FESD_VALUES = [True, False]
TOL = 1e-12
def pickle_results(results, filename):
# create directory if it does not exist
if not os.path.exists(BENCHMARK_DATA_PATH):
os.makedirs(BENCHMARK_DATA_PATH)
# save
file = os.path.join(BENCHMARK_DATA_PATH, filename)
with open(file, 'wb') as f:
pickle.dump(results, f)
def unpickle_results(filename):
file = os.path.join(BENCHMARK_DATA_PATH, filename)
with open(file, 'rb') as f:
results = pickle.load(f)
return results
def get_results_filename(opts: nosnoc.NosnocOpts):
filename = 'oscillator_bm_results_'
filename += 'Nfe_' + str(opts.N_finite_elements) + '_'
filename += 'ns' + str(opts.n_s) + '_'
filename += 'tol' + str(opts.comp_tol) + '_'
filename += 'dt' + str(opts.terminal_time) + '_'
filename += 'Tsim' + str(TSIM) + '_'
filename += opts.irk_scheme.name + '_'
filename += opts.pss_mode.name
if not opts.use_fesd:
filename += '_nofesd'
filename += '.pickle'
return filename
def get_opts(Nsim, n_s, N_fe, scheme, use_fesd):
opts = nosnoc.NosnocOpts()
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
opts.comp_tol = TOL
opts.sigma_N = TOL
opts.irk_scheme = scheme
opts.print_level = 0
opts.tol_ipopt = TOL
opts.n_s = n_s
opts.N_finite_elements = N_fe
opts.pss_mode = nosnoc.PssMode.STEP
opts.use_fesd = use_fesd
return opts
def run_benchmark():
for n_s, N_fe, Nsim, scheme, use_fesd in itertools.product(NS_VALUES, NFE_VALUES, NSIM_VALUES, SCHEMES, USE_FESD_VALUES):
model = get_oscillator_model()
opts = get_opts(Nsim, n_s, N_fe, scheme, use_fesd)
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim, print_level=1)
looper.run()
results = looper.get_results()
results['opts'] = opts
filename = get_results_filename(opts)
pickle_results(results, filename)
del solver, looper, results, model, opts
def count_failures(results):
status_list: list = results['status']
return len([x for x in status_list if x != nosnoc.Status.SUCCESS])
def order_plot():
nosnoc.latexify_plot()
N_fe = 2
linestyles = ['-', '--', '-.', ':', ':', '-.', '--', '-']
marker_types = ['o', 's', 'v', '^', '>', '<', 'd', 'p']
SCHEME = nosnoc.IrkSchemes.GAUSS_LEGENDRE
# SCHEME = nosnoc.IrkSchemes.RADAU_IIA
ax = plt.figure()
for use_fesd in [True, False]:
for i, n_s in enumerate(NS_VALUES):
errors = []
step_sizes = []
for Nsim in NSIM_VALUES:
opts = get_opts(Nsim, n_s, N_fe, SCHEME, use_fesd)
filename = get_results_filename(opts)
results = unpickle_results(filename)
print(f"loading filde {filename}")
x_end = results['X_sim'][-1]
n_fail = count_failures(results)
error = np.max(np.abs(x_end - X_SOL))
print("opts.n_s: ", opts.n_s, "opts.terminal_time: ", opts.terminal_time, "error: ", error, "n_fail: ", n_fail)
errors.append(error)
step_sizes.append(opts.terminal_time)
label = r'$n_s=' + str(n_s) +'$'
if results['opts'].irk_scheme == nosnoc.IrkSchemes.RADAU_IIA:
if n_s == 1:
label = 'implicit Euler: 1'
else:
label = 'Radau IIA: ' + str(2*n_s-1)
elif results['opts'].irk_scheme == nosnoc.IrkSchemes.GAUSS_LEGENDRE:
label = 'Gauss-Legendre: ' + str(2*n_s)
if use_fesd:
label += ', FESD'
else:
label += ', Standard'
plt.plot(step_sizes, errors, label=label, marker=marker_types[i], linestyle=linestyles[i])
plt.grid()
plt.xlabel('Step size')
plt.ylabel('Error')
plt.yscale('log')
plt.xscale('log')
# plt.legend(loc='center left')
plt.legend()
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.0), ncol=2, framealpha=1.0)
fig_filename = f'oscillator_benchmark_{SCHEME.name}.pdf'
plt.savefig(fig_filename, bbox_inches='tight')
print(f"Saved figure to {fig_filename}")
plt.show()
if __name__ == "__main__":
# generate data
# run_benchmark()
# evalute
order_plot()
| 4,818 | 30.703947 | 127 | py |
nosnoc_py | nosnoc_py-main/examples/oscillator/oscillator_example.py | import nosnoc
from casadi import SX, vertcat, horzcat
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
OMEGA = 2 * np.pi
A1 = np.array([[1, OMEGA], [-OMEGA, 1]])
A2 = np.array([[1, -OMEGA], [OMEGA, 1]])
R_OSC = 1
TSIM = np.pi / 2
X_SOL = np.array([np.exp(TSIM-1) * np.cos(2*np.pi * (TSIM-1)), -np.exp(TSIM-1) * np.sin(2*np.pi*(TSIM-1))])
def get_oscillator_model(use_g_Stewart=False):
# Initial Value
x0 = np.array([np.exp([-1])[0], 0])
# Variable defintion
x1 = SX.sym("x1")
x2 = SX.sym("x2")
x = vertcat(x1, x2)
# every constraint function corresponds to a sys (note that the c_i might be vector valued)
c = [x1**2 + x2**2 - R_OSC**2]
# sign matrix for the modes
S = [np.array([[1], [-1]])]
f_11 = A1 @ x
f_12 = A2 @ x
# in matrix form
F = [horzcat(f_11, f_12)]
if use_g_Stewart:
g_Stewart_list = [-S[i] @ c[i] for i in range(1)]
model = nosnoc.NosnocModel(x=x, F=F, g_Stewart=g_Stewart_list, x0=x0)
else:
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=x0)
return model
def get_default_options():
opts = nosnoc.NosnocOpts()
comp_tol = 1e-8
opts.comp_tol = comp_tol
opts.homotopy_update_slope = 0.1 # decrease rate
opts.N_finite_elements = 2
opts.n_s = 3
opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT_COMPLEMENTARITY
opts.print_level = 1
return opts
def solve_oscillator(opts=None, use_g_Stewart=False, do_plot=True):
if opts is None:
opts = get_default_options()
model = get_oscillator_model(use_g_Stewart)
Nsim = 29
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
error = np.max(np.abs(X_SOL - results["X_sim"][-1]))
print(f"error wrt exact solution {error:.2e}")
if do_plot:
plot_oscillator(results["X_sim"], results["t_grid"], switch_times=results["switch_times"])
# nosnoc.plot_timings(results["cpu_nlp"])
# store solution
# import json
# json_file = 'oscillator_results_ref.json'
# with open(json_file, 'w') as f:
# json.dump(results['w_sim'], f, indent=4, sort_keys=True, default=make_object_json_dumpable)
# print(f"saved results in {json_file}")
return results
def main_least_squares():
# load reference solution
# import json
# json_file = 'oscillator_results_ref.json'
# with open(json_file, 'r') as f:
# w_sim_ref = json.load(f)
opts = nosnoc.NosnocOpts()
comp_tol = 1e-7
opts.comp_tol = comp_tol
opts.print_level = 2
# opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.SUPERLINEAR
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER
opts.constraint_handling = nosnoc.ConstraintHandling.LEAST_SQUARES
opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT
opts.initialization_strategy = nosnoc.InitializationStrategy.ALL_XCURRENT_W0_START
opts.initialization_strategy = nosnoc.InitializationStrategy.RK4_SMOOTHENED
opts.sigma_0 = 1e0
# opts.gamma_h = np.inf
# opts.nlp_max_iter = 0
# opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.SUPERLINEAR
opts.homotopy_update_slope = 0.1
model = get_oscillator_model()
Nsim = 29
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
solver = nosnoc.NosnocSolver(opts, model)
solver.print_problem()
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
# looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim, w_init=w_sim_ref)
looper.run()
results = looper.get_results()
print(f"max cost_val = {max(results['cost_vals']):.2e}")
error = np.max(np.abs(X_SOL - results["X_sim"][-1]))
print(f"error wrt exact solution {error:.2e}")
breakpoint()
plot_oscillator(results["X_sim"], results["t_grid"])
# nosnoc.plot_timings(results["cpu_nlp"])
def main_polishing():
opts = get_default_options()
opts.comp_tol = 1e-4
opts.do_polishing_step = True
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT
opts.constraint_handling = nosnoc.ConstraintHandling.LEAST_SQUARES
opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER
opts.print_level = 3
results = solve_oscillator(opts, do_plots=False)
print(f"max cost_val = {max(results['cost_vals']):.2e}")
nosnoc.plot_timings(results["cpu_nlp"])
def plot_oscillator(X_sim, t_grid, latexify=True, switch_times=None):
if latexify:
nosnoc.latexify_plot()
# trajectory
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(t_grid, X_sim)
plt.ylabel("$x$")
plt.xlabel("$t$")
if switch_times is not None:
for t in switch_times:
plt.axvline(t, linestyle="dashed", color="k")
plt.grid()
# state space plot
ax = plt.subplot(1, 2, 2)
plt.ylabel("$x_2$")
plt.xlabel("$x_1$")
x1 = [x[0] for x in X_sim]
x2 = [x[1] for x in X_sim]
plt.plot(x1, x2)
ax.add_patch(plt.Circle((0, 0), 1.0, color="r", fill=False))
# vector field
width = 2.0
n_grid = 20
x, y = np.meshgrid(np.linspace(-width, width, n_grid), np.linspace(-width, width, n_grid))
indicator = np.sign(x**2 + y**2 - R_OSC**2)
u = (A1[0, 0] * x + A1[0, 1] * y) * 0.5 * (indicator + 1) + (
A2[0, 0] * x + A2[0, 1] * y) * 0.5 * (1 - indicator)
v = (A1[1, 0] * x + A1[1, 1] * y) * 0.5 * (indicator + 1) + (
A2[1, 0] * x + A2[1, 1] * y) * 0.5 * (1 - indicator)
plt.quiver(x, y, u, v)
plt.show()
def make_object_json_dumpable(input):
if isinstance(input, (np.ndarray)):
return input.tolist()
else:
raise TypeError(f"Cannot make input of type {type(input)} dumpable.")
if __name__ == "__main__":
solve_oscillator(use_g_Stewart=False, do_plot=True)
# main_least_squares()
# main_polishing()
| 6,178 | 29.289216 | 107 | py |
nosnoc_py | nosnoc_py-main/examples/friction_block/friction_block_example.py | import nosnoc
from casadi import SX, vertcat, horzcat, cos
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
## Info
# This is an example from
# Stewart, D.E., 1996. A numerical method for friction problems with multiple contacts. The ANZIAM Journal, 37(3), pp.288-308.
# It considers 3 independent switching functions
def get_blocks_with_friction_model():
## Initial value
x0 = np.array([-1, 1, -1, -1, 1, 1, 0])
# u0 = 0 # guess for control variables
## Numer of ODE layers
# n_sys = 3;# number of Cartesian products in the model ("independet switches"), we call this layer
# # number of modes in every sys
# m_1 = 2
# m_2 = 2
# m_3 = 2
# m_vec = [m_1 m_2 m_3];
## Variable defintion
# differential states
q1 = SX.sym('q1')
q2 = SX.sym('q2')
q3 = SX.sym('q3')
v1 = SX.sym('v1')
v2 = SX.sym('v2')
v3 = SX.sym('v3')
t = SX.sym('t')
q = vertcat(q1, q2, q3)
v = vertcat(v1, v2, v3)
x = vertcat(q, v, t)
## Control
# u = MX.sym('u')
# n_u = 1; # number of parameters, we model it as control variables and merge them with simple equality constraints
#
# # Guess and Bounds
# u0 = 0
# lbu = -20*0
# ubu = 20*0;
## Switching Functions
# every constraint function corresponds to a sys (note that the c_i might be vector valued)
c1 = v1
c2 = v2
c3 = v3
# sign matrix for the modes
S1 = np.array([[1], [-1]])
S2 = np.array([[1], [-1]])
S3 = np.array([[1], [-1]])
# discrimnant functions
S = [S1, S2, S3]
c = [c1, c2, c3]
## Modes of the ODEs layers (for all i = 1,...,n_sys)
# part independet of the nonsmoothness
F_external = 0
# external force, e.g., control
F_input = 10
# variable force exicting
f_base = vertcat(v1, v2, v3, (-q1) + (q2 - q1) - v1, (q1 - q2) + (q3 - q2) - v2,
(q2 - q3) - v3 + F_external + F_input * (1 * 0 + 1 * cos(np.pi * t)), 1)
# for c1
f_11 = f_base + vertcat(0, 0, 0, -0.3, 0, 0, 0)
f_12 = f_base + vertcat(0, 0, 0, +0.3, 0, 0, 0)
# for c2
f_21 = vertcat(0, 0, 0, 0, -0.3, 0, 0)
f_22 = vertcat(0, 0, 0, 0, 0.3, 0, 0)
# for c3
f_31 = vertcat(0, 0, 0, 0, 0, -0.3, 0)
f_32 = vertcat(0, 0, 0, 0, 0, 0.3, 0)
# in matrix form
F1 = horzcat(f_11, f_12)
F2 = horzcat(f_21, f_22)
F3 = horzcat(f_31, f_32)
F = [F1, F2, F3]
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=x0)
return model
def main():
# Simulation setings
opts = nosnoc.NosnocOpts()
opts.n_s = 2
opts.comp_tol = 1e-6
opts.homotopy_update_slope = .1
opts.N_finite_elements = 3
Tsim = 5
Nsim = 120
T_step = Tsim / Nsim
opts.terminal_time = T_step
opts.pss_mode = nosnoc.PssMode.STEWART
opts.irk_representation = nosnoc.IrkRepresentation.DIFFERENTIAL
# opts.initialization_strategy = nosnoc.InitializationStrategy.RK4_SMOOTHENED
# model
model = get_blocks_with_friction_model()
# solver
solver = nosnoc.NosnocSolver(opts, model)
n_exec = 1
for i in range(n_exec):
# simulation loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
if i == 0:
timings = results["cpu_nlp"]
else:
timings = np.minimum(timings, results["cpu_nlp"])
# evaluation
mean_timing = np.mean(np.sum(timings, axis=1))
print(f"mean timing solver call {mean_timing:.5f} s")
# plot timings
plot_title = f"{opts.irk_representation.name.lower()} IRK, init {opts.initialization_strategy.name.lower()}" # {opts.homotopy_update_rule.name}"
nosnoc.plot_timings(timings, title=plot_title)
# plot trajectory
plot_blocks(results["X_sim"], results["t_grid"])
import pdb
pdb.set_trace()
def plot_blocks(X_sim, t_grid, latexify=True):
# latexify plot
if latexify:
params = {
# "backend": "TkAgg",
"text.latex.preamble": r"\usepackage{gensymb} \usepackage{amsmath}",
"axes.labelsize": 10,
"axes.titlesize": 10,
"legend.fontsize": 10,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"text.usetex": True,
"font.family": "serif",
}
matplotlib.rcParams.update(params)
plt.figure()
plt.plot(t_grid, X_sim)
plt.grid()
plt.show()
if __name__ == "__main__":
main()
| 4,517 | 25.892857 | 149 | py |
nosnoc_py | nosnoc_py-main/examples/auto_modeler/auto_model_friction.py | import nosnoc
from casadi import SX, vertcat, horzcat, sign
import numpy as np
import matplotlib.pyplot as plt
# example opts
illustrate_regions = True
TERMINAL_CONSTRAINT = True
LINEAR_CONTROL = True
X0 = np.array([0, 0, 0, 0, 0])
X_TARGET = np.array([0.01, 0, 0.01, 0, 0])
def get_motor_with_friction_ocp_description_with_auto_model():
# Parameters
m1 = 1.03 # slide mass
m2 = 0.56 # load mass
k = 2.4e3 # spring constant N/m
c_damping = 0.00 # damping
u_max = 5 # voltage Back-EMF, U = K_s*v_1
R = 2 # coil resistance ohm
L = 2e-3 # inductivity, henry
K_F = 12 # force constant N/A, F_L = K_F*I # Lorenz force
K_S = 12 # Vs/m (not provided in the paper above)
F_R = 2.1 # guide friction force, N
# model equations
# Variable defintion
x1 = SX.sym("x1")
x2 = SX.sym("x2")
v1 = SX.sym("v1")
v2 = SX.sym("v2")
I = SX.sym("I")
# electric current
x = vertcat(x1, v1, x2, v2, I)
n_x = nosnoc.casadi_length(x)
# control
u = SX.sym("u")
# the motor voltage
# Dynamics
A = np.array([
[0, 1, 0, 0, 0],
[-k / m1, -c_damping / m1, k / m1, c_damping / m1, K_F / m1],
[0, 0, 0, 1, 0],
[k / m2, c_damping / m2, -k / m2, -c_damping / m2, 0],
[0, -K_S / L, 0, 0, -R / L],
])
B = np.zeros((n_x, 1))
B[-1, 0] = 1 / L
C1 = np.array([0, -F_R / m1, 0, 0, 0]) # v1 >0
# switching dynamics with different friction froces
f_nonsmooth = A @ x + B @ u + C1*sign(v1)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth, x0=X0, u=u)
model = am.reformulate()
# constraints
lbu = -u_max * np.ones((1,))
ubu = u_max * np.ones((1,))
g_terminal = x - X_TARGET
# Stage cost
f_q = u**2
ocp = nosnoc.NosnocOcp(lbu=lbu, ubu=ubu, f_q=f_q, g_terminal=g_terminal)
return model, ocp
def get_default_options():
opts = nosnoc.NosnocOpts()
# opts.pss_mode = nosnoc.PssMode.STEP
opts.use_fesd = True
comp_tol = 1e-6
opts.comp_tol = comp_tol
opts.homotopy_update_slope = 0.1
opts.n_s = 2
opts.step_equilibration = nosnoc.StepEquilibrationMode.L2_RELAXED_SCALED
opts.print_level = 1
opts.N_stages = 30
opts.N_finite_elements = 2
opts.pss_mode = nosnoc.PssMode.STEWART
return opts
def solve_ocp(opts=None):
if opts is None:
opts = get_default_options()
[model, ocp] = get_motor_with_friction_ocp_description_with_auto_model()
opts.terminal_time = 0.08
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
# print(f"{results['u_traj']=}")
# print(f"{results['time_steps']=}")
return results
def example(plot=True):
results = solve_ocp()
if plot:
plot_motor_with_friction(
results["x_traj"],
results["u_traj"],
results["t_grid"],
results["t_grid_u"],
)
plot_time_steps(results["time_steps"])
def plot_motor_with_friction(x_traj, u_traj, t_grid, t_grid_u, latexify=True):
x_traj = np.array(x_traj)
if latexify:
nosnoc.latexify_plot()
plt.figure()
plt.subplot(4, 1, 1)
plt.plot(t_grid, x_traj[:, 0], label="x1")
plt.plot(t_grid, x_traj[:, 2], label="x2")
plt.ylabel("x")
plt.xlabel("time [s]")
plt.legend()
plt.grid()
plt.subplot(4, 1, 2)
plt.plot(t_grid, x_traj[:, 1], label="v1")
plt.plot(t_grid, x_traj[:, 3], label="v2")
plt.ylabel("v")
plt.xlabel("time [s]")
plt.legend()
plt.grid()
plt.subplot(4, 1, 3)
plt.plot(t_grid, x_traj[:, 4], label="I")
plt.ylabel("I")
plt.xlabel("time [s]")
plt.legend()
plt.grid()
plt.subplot(4, 1, 4)
plt.step(t_grid_u, [u_traj[0]] + u_traj, label="u")
plt.ylabel("u")
plt.xlabel("time [s]")
plt.grid()
plt.legend()
plt.show()
def plot_time_steps(t_steps):
n = len(t_steps)
plt.figure()
plt.step(list(range(n)), t_steps[0] + t_steps)
plt.grid()
plt.ylabel("time_step [s]")
plt.ylabel("time_step index")
plt.show()
if __name__ == "__main__":
example()
| 4,151 | 23.714286 | 78 | py |
nosnoc_py | nosnoc_py-main/examples/relay/relay_feedback_system.py | import nosnoc
from casadi import SX, horzcat
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
OMEGA = 25
XI = 0.05
SIGMA = 1
NX = 3
# Initial value
X0 = np.array([0, -0.001, -0.02])
## Info
# Simulation example from
# Piiroinen, Petri T., and Yuri A. Kuznetsov. "An event-driven method to simulate Filippov systems with
# accurate computing of sliding motions." ACM Transactions on Mathematical Software (TOMS) 34.3 (2008): 1-24.
# Equation (32)
# see also:
# M. di Bernardo, K. H. Johansson, and F. Vasca. Self-oscillations and sliding in
# relay feedback systems: Symmetry and bifurcations. International Journal of
# Bifurcations and Chaos, 11(4):1121-1140, 2001
def get_relay_feedback_system_model():
# Variables
x = SX.sym("x", 3)
A = np.array([[-(2 * XI * OMEGA + 1), 1, 0], [-(2 * XI * OMEGA + OMEGA**2), 0, 1],
[-OMEGA**2, 0, 0]])
b = np.array([[1], [-2 * SIGMA], [1]])
c = [x[0]]
S = [np.array([[-1], [1]])]
f_11 = A @ x + b
f_12 = A @ x - b
F = [horzcat(f_11, f_12)]
return nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0)
def get_default_options() -> nosnoc.NosnocOpts:
opts = nosnoc.NosnocOpts()
opts.use_fesd = True
opts.pss_mode = nosnoc.PssMode.STEWART
opts.irk_scheme = nosnoc.IrkSchemes.RADAU_IIA
opts.N_finite_elements = 2
opts.n_s = 2
opts.mpcc_mode = nosnoc.MpccMode.SCHOLTES_INEQ
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.SUM_LAMBDAS_COMPLEMENT_WITH_EVERY_THETA
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.comp_tol = 1e-6
opts.print_level = 0
opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.LINEAR
# opts.homotopy_update_exponent = 1.4
# opts.initialization_strategy = nosnoc.InitializationStrategy.RK4_SMOOTHENED
opts.irk_representation = nosnoc.IrkRepresentation.INTEGRAL
return opts
def main():
Tsim = 10
Nsim = 200
Tstep = Tsim / Nsim
opts = get_default_options()
opts.terminal_time = Tstep
model = get_relay_feedback_system_model()
solver = nosnoc.NosnocSolver(opts, model)
n_exec = 1
for i in range(n_exec):
# simulation loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
if i == 0:
timings = results["cpu_nlp"]
else:
timings = np.minimum(timings, results["cpu_nlp"])
# for isim in range(0, Nsim, 20):
# w_all_0 = results["w_all"][isim]
# nosnoc.plot_iterates(solver.problem, w_all_0[:2] + [w_all_0[-1]], title_list=['init RK4', '1st iter', f'solution {isim}'], figure_filename=f'relay_RK4_init_{isim}.pdf')
# plot trajectory
X_sim = results["X_sim"]
t_grid = results["t_grid"]
plot_system_trajectory(X_sim, t_grid=t_grid, figure_filename='relay_traj.pdf')
plot_algebraic_variables(results, figure_filename='relay_algebraic_traj.pdf')
# plot_system_3d(results)
# plot timings
filename = ""
filename = f"relay_timings_{datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S.%f')}.pdf"
plot_title = f"{opts.irk_representation.name.lower()} IRK, init {opts.initialization_strategy.name.lower()}" # {opts.homotopy_update_rule.name}"
nosnoc.plot_timings(results["cpu_nlp"], title=plot_title, figure_filename=filename)
plt.show()
def main_least_squares():
Tsim = 10
Nsim = 200
Tstep = Tsim / Nsim
opts = get_default_options()
opts.terminal_time = Tstep
# LSQ
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER_IP_AUG
opts.constraint_handling = nosnoc.ConstraintHandling.LEAST_SQUARES
opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT
opts.initialization_strategy = nosnoc.InitializationStrategy.ALL_XCURRENT_W0_START
opts.print_level = 1
opts.sigma_0 = 1e0
opts.comp_tol = 1e-8
model = get_relay_feedback_system_model()
solver = nosnoc.NosnocSolver(opts, model)
n_exec = 1
for i in range(n_exec):
# simulation loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
if i == 0:
timings = results["cpu_nlp"]
else:
timings = np.minimum(timings, results["cpu_nlp"])
print(f"max cost_val = {max(results['cost_vals'])}")
# plot trajectory
X_sim = results["X_sim"]
t_grid = results["t_grid"]
plot_system_trajectory(X_sim, t_grid=t_grid, figure_filename='relay_traj.pdf')
plot_algebraic_variables(results, figure_filename='relay_algebraic_traj.pdf')
# plot_system_3d(results)
# plot timings
filename = ""
filename = f"relay_timings_{datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S.%f')}.pdf"
plot_title = f"{opts.irk_representation.name.lower()} IRK, init {opts.initialization_strategy.name.lower()}" # {opts.homotopy_update_rule.name}"
nosnoc.plot_timings(results["cpu_nlp"], title=plot_title, figure_filename=filename)
plt.show()
def main_rk4_simulation():
"""
This examples uses a smoothened STEP representation of the system and simulates it with an RK4 integrator.
"""
opts = nosnoc.NosnocOpts()
opts.use_fesd = True
opts.pss_mode = nosnoc.PssMode.STEWART
Tsim = 10
Nsim = 200
Nsim = 20000
# Tsim = 1
# Nsim = 20
Tstep = Tsim / Nsim
opts.terminal_time = Tstep
model = get_relay_feedback_system_model()
opts.preprocess()
model.preprocess_model(opts)
model.add_smooth_step_representation(smoothing_parameter=1e-4)
# smooth dynamics based on STEP
X_sim, t_grid = nosnoc.rk4(model.f_x_smooth_fun, model.x0, Tsim, Nsim)
#
plot_system_trajectory(X_sim, t_grid)
plt.show()
def plot_system_3d(results):
nosnoc.latexify_plot()
X_sim = results["X_sim"]
x1 = [x[0] for x in X_sim]
x2 = [x[1] for x in X_sim]
x3 = [x[2] for x in X_sim]
# plot 3d curve
plt.figure()
ax = plt.axes(projection="3d")
ax.plot3D(x1, x2, x3)
ax.set_xlabel("$x_1$")
ax.set_ylabel("$x_2$")
ax.set_zlabel("$x_3$")
ax.grid()
plt.show()
def plot_system_trajectory(X_sim, t_grid, figure_filename=''):
nosnoc.latexify_plot()
# state trajectory plot
plt.figure()
for i in range(NX):
plt.subplot(1, NX, i + 1)
plt.plot(t_grid, [x[i] for x in X_sim])
plt.grid()
plt.xlabel("$t$")
plt.ylabel(f"$x_{i+1}(t)$")
if figure_filename != '':
plt.savefig(figure_filename)
print(f'stored figure as {figure_filename}')
def plot_algebraic_variables(results, figure_filename=''):
nosnoc.latexify_plot()
# algebraic variables
plt.figure()
plt.subplot(2, 1, 1)
thetas = nosnoc.flatten_layer(results['theta_sim'], 0)
thetas = [thetas[0]] + thetas
lambdas = nosnoc.flatten_layer(results['lambda_sim'], 0)
lambdas = [lambdas[0]] + lambdas
n_lam = len(lambdas[0])
for i in range(n_lam):
plt.plot(results["t_grid"], [x[i] for x in lambdas], label=f'$\lambda_{i+1}$')
plt.grid()
plt.legend()
plt.subplot(2, 1, 2)
for i in range(n_lam):
plt.plot(results["t_grid"], [x[i] for x in thetas], label=r'$\theta_' + f'{i+1}$')
plt.grid()
plt.legend()
if figure_filename != '':
plt.savefig(figure_filename)
print(f'stored figure as {figure_filename}')
if __name__ == "__main__":
main()
# main_least_squares()
# main_rk4_simulation()
| 7,661 | 28.13308 | 182 | py |
nosnoc_py | nosnoc_py-main/examples/motor_with_friction/motor_with_friction_ocp.py | import nosnoc
from casadi import SX, vertcat, horzcat
import numpy as np
import matplotlib.pyplot as plt
# example opts
illustrate_regions = True
TERMINAL_CONSTRAINT = True
LINEAR_CONTROL = True
X0 = np.array([0, 0, 0, 0, 0])
X_TARGET = np.array([0.01, 0, 0.01, 0, 0])
def get_motor_with_friction_ocp_description():
# Parameters
m1 = 1.03 # slide mass
m2 = 0.56 # load mass
k = 2.4e3 # spring constant N/m
c_damping = 0.00 # damping
u_max = 5 # voltage Back-EMF, U = K_s*v_1
R = 2 # coil resistance ohm
L = 2e-3 # inductivity, henry
K_F = 12 # force constant N/A, F_L = K_F*I # Lorenz force
K_S = 12 # Vs/m (not provided in the paper above)
F_R = 2.1 # guide friction force, N
# model equations
# Variable defintion
x1 = SX.sym("x1")
x2 = SX.sym("x2")
v1 = SX.sym("v1")
v2 = SX.sym("v2")
I = SX.sym("I")
# electric current
x = vertcat(x1, v1, x2, v2, I)
n_x = nosnoc.casadi_length(x)
# control
u = SX.sym("u")
# the motor voltage
# Dynamics
A = np.array([
[0, 1, 0, 0, 0],
[-k / m1, -c_damping / m1, k / m1, c_damping / m1, K_F / m1],
[0, 0, 0, 1, 0],
[k / m2, c_damping / m2, -k / m2, -c_damping / m2, 0],
[0, -K_S / L, 0, 0, -R / L],
])
B = np.zeros((n_x, 1))
B[-1, 0] = 1 / L
C1 = np.array([0, -F_R / m1, 0, 0, 0]) # v1 >0
C2 = -C1 # v1<0
# switching dynamics with different friction froces
f_1 = A @ x + B @ u + C1
# v1>0
f_2 = A @ x + B @ u + C2
# v1<0
# All modes
F = [horzcat(f_1, f_2)]
# Switching function
c = [v1]
S = [np.array([[1], [-1]])]
# constraints
lbu = -u_max * np.ones((1,))
ubu = u_max * np.ones((1,))
g_terminal = x - X_TARGET
# Stage cost
f_q = u**2
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0, u=u)
ocp = nosnoc.NosnocOcp(lbu=lbu, ubu=ubu, f_q=f_q, g_terminal=g_terminal)
return model, ocp
def get_default_options():
opts = nosnoc.NosnocOpts()
# opts.pss_mode = nosnoc.PssMode.STEP
opts.use_fesd = True
comp_tol = 1e-6
opts.comp_tol = comp_tol
opts.homotopy_update_slope = 0.1
opts.n_s = 2
opts.step_equilibration = nosnoc.StepEquilibrationMode.L2_RELAXED_SCALED
opts.print_level = 1
opts.N_stages = 30
opts.N_finite_elements = 2
return opts
def solve_ocp(opts=None):
if opts is None:
opts = get_default_options()
[model, ocp] = get_motor_with_friction_ocp_description()
opts.terminal_time = 0.08
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
# print(f"{results['u_traj']=}")
# print(f"{results['time_steps']=}")
return results
def example(plot=True):
results = solve_ocp()
if plot:
plot_motor_with_friction(
results["x_traj"],
results["u_traj"],
results["t_grid"],
results["t_grid_u"],
)
plot_time_steps(results["time_steps"])
def plot_motor_with_friction(x_traj, u_traj, t_grid, t_grid_u, latexify=True):
x_traj = np.array(x_traj)
if latexify:
nosnoc.latexify_plot()
plt.figure()
plt.subplot(4, 1, 1)
plt.plot(t_grid, x_traj[:, 0], label="x1")
plt.plot(t_grid, x_traj[:, 2], label="x2")
plt.ylabel("x")
plt.xlabel("time [s]")
plt.legend()
plt.grid()
plt.subplot(4, 1, 2)
plt.plot(t_grid, x_traj[:, 1], label="v1")
plt.plot(t_grid, x_traj[:, 3], label="v2")
plt.ylabel("v")
plt.xlabel("time [s]")
plt.legend()
plt.grid()
plt.subplot(4, 1, 3)
plt.plot(t_grid, x_traj[:, 4], label="I")
plt.ylabel("I")
plt.xlabel("time [s]")
plt.legend()
plt.grid()
plt.subplot(4, 1, 4)
plt.step(t_grid_u, [u_traj[0]] + u_traj, label="u")
plt.ylabel("u")
plt.xlabel("time [s]")
plt.grid()
plt.legend()
plt.show()
def plot_time_steps(t_steps):
n = len(t_steps)
plt.figure()
plt.step(list(range(n)), t_steps[0] + t_steps)
plt.grid()
plt.ylabel("time_step [s]")
plt.ylabel("time_step index")
plt.show()
if __name__ == "__main__":
example()
| 4,192 | 22.823864 | 78 | py |
nosnoc_py | nosnoc_py-main/examples/periodic_stick_slip/periodic_stick_slip.py | import nosnoc
from casadi import SX, horzcat, vertcat
import numpy as np
import matplotlib.pyplot as plt
def get_periodic_slip_stick_model_codim1():
# Initial value
x0 = np.array([0.04, -0.01])
# Variables
x = SX.sym("x", 2)
c = [x[1] - 0.2]
S = [np.array([[-1], [1]])]
f_11 = vertcat(x[1], -x[0] + 1 / (1.2 - x[1]))
f_12 = vertcat(x[1], -x[0] - 1 / (0.8 + x[1]))
F = [horzcat(f_11, f_12)]
return nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=x0)
def get_periodic_slip_stick_model_codim2():
# Initial value
x0 = np.array([0.04, -0.01, -0.02])
# Variables
x = SX.sym("x", 3)
c = [vertcat(x[1] - 0.2, x[2] - 0.4)]
S = [SX(np.array([[-1, -1], [-1, 1], [1, -1], [1, 1]]))]
f_11 = vertcat((x[1] + x[2]) / 2, -x[0] + 1 / (1.2 - x[1]), -x[0] + 1 / (1.4 - x[2]))
f_12 = vertcat((x[1] + x[2]) / 2, -x[0] + 1 / (1.2 - x[1]), -x[0] + 1 / (0.6 + x[2]))
f_13 = vertcat((x[1] + x[2]) / 2, -x[0] - 1 / (0.8 + x[1]), -x[0] + 1 / (1.4 - x[2]))
f_14 = vertcat((x[1] + x[2]) / 2 + x[0] * (x[1] + 0.8) * (x[2] + 0.6), -x[0] - 1 / (0.8 + x[1]),
-x[0] - 1 / (0.6 + x[2]))
F = [horzcat(f_11, f_12, f_13, f_14)]
return nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=x0)
def main_codim1():
opts = nosnoc.NosnocOpts()
opts.use_fesd = True
opts.pss_mode = nosnoc.PssMode.STEWART
opts.irk_scheme = nosnoc.IrkSchemes.RADAU_IIA
opts.N_finite_elements = 2
opts.n_s = 2
opts.mpcc_mode = nosnoc.MpccMode.SCHOLTES_INEQ
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.SUM_LAMBDAS_COMPLEMENT_WITH_EVERY_THETA
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.comp_tol = 1e-6
opts.equidistant_control_grid = False
opts.print_level = 1
Tsim = 40
Nsim = 100
Tstep = Tsim / Nsim
opts.terminal_time = Tstep
model = get_periodic_slip_stick_model_codim1()
solver = nosnoc.NosnocSolver(opts, model)
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
plot_system_codim1(results["X_sim"], results["t_grid"])
nosnoc.plot_timings(results["cpu_nlp"])
def main_codim2():
opts = nosnoc.NosnocOpts()
opts.use_fesd = True
opts.pss_mode = nosnoc.PssMode.STEWART
opts.irk_scheme = nosnoc.IrkSchemes.RADAU_IIA
opts.N_finite_elements = 3
opts.n_s = 4
opts.mpcc_mode = nosnoc.MpccMode.SCHOLTES_INEQ
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.SUM_LAMBDAS_COMPLEMENT_WITH_EVERY_THETA
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.comp_tol = 1e-9
opts.equidistant_control_grid = False
opts.print_level = 1
Tsim = 20
Nsim = 100
Tstep = Tsim / Nsim
opts.terminal_time = Tstep
model = get_periodic_slip_stick_model_codim2()
solver = nosnoc.NosnocSolver(opts, model)
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
plot_system_codim2(results["X_sim"], results["t_grid"])
nosnoc.plot_timings(results["cpu_nlp"])
def plot_system_codim1(X_sim, t_grid):
x1 = [x[0] for x in X_sim]
x2 = [x[1] for x in X_sim]
# state trajectory plot
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(t_grid, x1)
plt.xlabel("$t$")
plt.ylabel("$x_1(t)$")
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(t_grid, x2)
plt.xlabel("$t$")
plt.ylabel("$x_2(t)$")
plt.grid()
# TODO figure for theta/alpha
plt.figure()
plt.plot(x1, x2)
plt.xlabel("$x_1(t)$")
plt.ylabel("$x_2(t)$")
plt.grid()
plt.show()
def plot_system_codim2(X_sim, t_grid):
x1 = [x[0] for x in X_sim]
x2 = [x[1] for x in X_sim]
x3 = [x[2] for x in X_sim]
# state trajectory plot
plt.figure()
plt.subplot(1, 3, 1)
plt.plot(t_grid, x1)
plt.xlabel("$t$")
plt.ylabel("$x_1(t)$")
plt.grid()
plt.subplot(1, 3, 2)
plt.plot(t_grid, x2)
plt.xlabel("$t$")
plt.ylabel("$x_2(t)$")
plt.grid()
plt.subplot(1, 3, 3)
plt.plot(t_grid, x3)
plt.xlabel("$t$")
plt.ylabel("$x_3(t)$")
plt.grid()
# TODO figure for theta/alpha
plt.figure()
plt.plot(x1, x3)
plt.xlabel("$x_1(t)$")
plt.ylabel("$x_3(t)$")
plt.grid()
plt.show()
if __name__ == "__main__":
#main_codim1()
main_codim2()
| 4,410 | 24.947059 | 100 | py |
nosnoc_py | nosnoc_py-main/examples/hysteresis_car_control/hysteresis_car_control_time_optimal.py | """
Gearbox example with two modes.
This is the original gearbox example as described in the matlab implementation
and described in the paper:
Continuous Optimization for Control of Hybrid Systems with Hysteresis via Time-Freezing
A. Nurkanović, M. Diehl
IEEE Control Systems Letters (2022)
It is extended to follow a trajectory in addition to going to one end-position.
"""
import nosnoc
import casadi as ca
import numpy as np
import matplotlib.pyplot as plt
# Hystheresis parameters
v1 = 10
v2 = 15
# Model parameters
q_goal = 150
v_goal = 0
v_max = 30
u_max = 5
# fuel costs of turbo and nominal
Pn = 1
Pt = 2.5
def create_options():
"""Create nosnoc options."""
opts = nosnoc.NosnocOpts()
opts.print_level = 2
# Degree of interpolating polynomial
opts.n_s = 3
# === MPCC settings ===
# upper bound for elastic variables
opts.s_elastic_max = 1e1
# in penalty methods 1: J = J+(1/p)*J_comp (direct) , 0 : J = p*J+J_comp (inverse)
opts.objective_scaling_direct = 0
# === Penalty/Relaxation paraemetr ===
# initial smoothing parameter
opts.sigma_0 = 1e1
# end smoothing parameter
opts.sigma_N = 1e-3 # 1e-10
# decrease rate
opts.homotopy_update_slope = 0.1
# number of steps
opts.comp_tol = 1e-14
# IPOPT Settings
opts.nlp_max_iter = 500
# New setting: time freezing settings
opts.initial_theta = 0.5
opts.time_freezing = False
opts.pss_mode = nosnoc.PssMode.STEWART
opts.mpcc_mode = nosnoc.MpccMode.ELASTIC_TWO_SIDED
return opts
def create_gearbox_voronoi(u=None, q_goal=None, traject=None, use_traject=False):
"""Create a gearbox."""
if not use_traject and q_goal is None:
raise Exception("You should provide a traject or a q_goal")
# State variables:
q = ca.SX.sym("q") # position
v = ca.SX.sym("v") # velocity
L = ca.SX.sym("L") # Fuel usage
w = ca.SX.sym('w') # Auxillary variable
t = ca.SX.sym('t') # Time variable
X = ca.vertcat(q, v, L, w, t)
X0 = np.array([0, 0, 0, 0, 0]).T
lbx = np.array([-ca.inf, 0, -ca.inf, -1, 0]).T
ubx = np.array([ca.inf, v_max, ca.inf, 2, ca.inf]).T
if use_traject:
p_traj = ca.SX.sym('traject')
else:
p_traj = ca.SX.sym('dummy', 0, 1)
# Controls
if u is None:
u = ca.SX.sym('u') # drive
s = ca.SX.sym('s') # Length of time
U = ca.vertcat(u, s)
lbu = np.array([-u_max, 0.5])
ubu = np.array([u_max, 20])
else:
s = 1
lbu = u
ubu = u
U = [u, s]
# Tracking gearbox:
psi = (v-v1)/(v2-v1)
z = ca.vertcat(psi, w)
Z = [
np.array([1 / 4, -1 / 4]),
np.array([1 / 4, 1 / 4]),
np.array([3 / 4, 3 / 4]),
np.array([3 / 4, 5 / 4])
]
g_ind = [ca.vertcat(*[
ca.norm_2(z - zi)**2 for zi in Z
])]
# Traject
f_q = 0
if use_traject:
print("use trajectory as cost")
f_q = 0.001 * (p_traj - q)**2
g_terminal = ca.vertcat(q-p_traj, v-v_goal)
else:
g_terminal = ca.vertcat(q-q_goal, v-v_goal)
f_terminal = t
# System dynamics
f_A = ca.vertcat(
v, u, Pn, 0, 1
)
f_B = ca.vertcat(
v, 3*u, Pt, 0, 1
)
a_push = 2
push_down_eq = -a_push * (psi - 1) ** 2 / (1 + (psi - 1)**2)
f_push_down = ca.vertcat(0, 0, 0, push_down_eq, 0)
push_up_eq = a_push * (psi)**2 / (1 + (psi)**2)
f_push_up = ca.vertcat(0, 0, 0, push_up_eq, 0)
f_11 = s * (2 * f_A - f_push_down)
f_12 = s * (f_push_down)
f_13 = s * (f_push_up)
f_14 = s * (2 * f_B - f_push_up)
F = [ca.horzcat(f_11, f_12, f_13, f_14)]
if isinstance(U, ca.SX):
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, u=U, t_var=t,
p_time_var=p_traj,
p_time_var_val=traject,
name="gearbox"
)
else:
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, t_var=t,
name="gearbox"
)
return model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_terminal
def plot(x_list, t_grid, u_list, t_grid_u):
"""Plot."""
q = [x[0] for x in x_list]
v = [x[1] for x in x_list]
aux = [x[-2] for x in x_list]
t = [x[-1] for x in x_list]
plt.figure()
plt.subplot(1, 3, 1)
plt.plot(t_grid, x_list, label=[
"$q$ (position)", "$v$ (speed)", "$L$ (cost)",
"$w$ (auxillary variable)", "$t$ (time)"
])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
if u_list is not None:
plt.subplot(1, 3, 2)
plt.plot(t_grid_u[:-1], u_list, label=["u", "s"])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(t, q, label="Position vs actual time")
plt.xlabel("Actual Time [$s$]")
plt.figure()
plt.plot([-2, 1], [0, 0], 'k')
plt.plot([0, 2], [1, 1], 'k')
plt.plot([-1, 0, 1, 2], [1.5, 1, 0, -.5], 'k')
psi = [(vi - v1) / (v2 - v1) for vi in v]
im = plt.scatter(psi, aux, c=t_grid, cmap=plt.hot())
im.set_label('Time')
plt.colorbar(im)
plt.xlabel("$\\psi(x)$")
plt.ylabel("$w$")
plt.show()
def simulation(u=25, Tsim=3, Nsim=30, with_plot=True):
"""Simulate the temperature control system with a fixed input."""
opts = create_options()
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_terminal = create_gearbox_voronoi(u=u, q_goal=q_goal)
Tstep = Tsim / Nsim
opts.N_finite_elements = 2
opts.N_stages = 1
opts.terminal_time = Tstep
opts.sigma_N = 1e-2
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
plot(results["X_sim"], results["t_grid"], None, None)
def control():
"""Execute one Control step."""
N = 3
traject = np.array([[q_goal * (i + 1) / N for i in range(N)]]).T
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_terminal = create_gearbox_voronoi(
q_goal=q_goal, traject=traject, use_traject=True
)
opts = create_options()
opts.N_finite_elements = 6
opts.n_s = 3
opts.N_stages = N
opts.terminal_time = 5
opts.time_freezing = False
opts.time_freezing_tolerance = 0.1
opts.nlp_max_iter = 10000
ocp = nosnoc.NosnocOcp(
lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_terminal,
g_terminal=g_terminal,
lbx=lbx, ubx=ubx
)
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
plot(
results["x_traj"], results["t_grid"],
results["u_list"], results["t_grid_u"]
)
if __name__ == "__main__":
control()
| 6,720 | 26.100806 | 103 | py |
nosnoc_py | nosnoc_py-main/examples/hysteresis_car_control/hysteresis_car_control_time_optimal_2d.py | """
Gearbox example with multiple modes.
Extension of the original model with two modes to three modes. The modes are still
given using one auxillary variable and one switching function. The voronoi regions
are thus given in a 2D space. It can easily be extended to N modes but the
hysteresis curves may not overlap.
"""
import nosnoc
from nosnoc.plot_utils import plot_voronoi_2d
import casadi as ca
import numpy as np
import matplotlib.pyplot as plt
from enum import Enum
import pickle
# Hystheresis parameters
v1 = 5
v2 = 10
# Model parameters
q_goal = 300
v_goal = 0
v_max = 30
u_max = 3
# fuel costs:
C = [1, 1.8, 2.5, 3.2]
# ratios
n = [1, 2, 3, 4]
class Stages(Enum):
"""Z Mode."""
TYPE_1_0 = 1
TYPE_2_0 = 2
TYPE_PAPER = 3
def create_options():
"""Create nosnoc options."""
opts = nosnoc.NosnocOpts()
opts.print_level = 2
# Degree of interpolating polynomial
opts.n_s = 2
# === MPCC settings ===
# upper bound for elastic variables
opts.s_elastic_max = 1e1
# in penalty methods 1: J = J+(1/p)*J_comp (direct) , 0 : J = p*J+J_comp (inverse)
opts.objective_scaling_direct = 0
# === Penalty/Relaxation paraemetr ===
# initial smoothing parameter
opts.sigma_0 = 1e0
# end smoothing parameter
opts.sigma_N = 1e-6 # 1e-10
# decrease rate
opts.homotopy_update_slope = 0.1
opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.SUPERLINEAR
opts.comp_tol = 1e-6
# IPOPT Settings
opts.nlp_max_iter = 1500
opts.initial_theta = 0.5
opts.time_freezing = False
opts.pss_mode = nosnoc.PssMode.STEWART
opts.mpcc_mode = nosnoc.MpccMode.ELASTIC_TWO_SIDED
return opts
def push_equation(a_push, psi, zero_point):
"""Eval push equation."""
return a_push * (psi - zero_point) ** 2 / (1 + (psi - zero_point)**2)
def gamma_eq(a_push, x):
"""Gamma equation."""
return a_push * x**2 / (1 + x**2)
def create_gearbox_voronoi(u=None, q_goal=None, mode=Stages.TYPE_PAPER,
psi_shift_2=1.5):
"""Create a gearbox."""
# State variables:
q = ca.SX.sym("q") # position
v = ca.SX.sym("v") # velocity
L = ca.SX.sym("L") # Fuel usage
w = ca.SX.sym('w') # Auxillary variable
t = ca.SX.sym('t') # Time variable
X = ca.vertcat(q, v, L, w, t)
X0 = np.array([0, 0, 0, 0, 0]).T
lbx = np.array([-ca.inf, -v_max, -ca.inf, -1, 0]).T
ubx = np.array([ca.inf, v_max, ca.inf, 2, ca.inf]).T
# Controls
if u is None:
u = ca.SX.sym('u') # drive
s = ca.SX.sym('s') # Length of time
U = ca.vertcat(u)
lbu = np.array([-u_max])
ubu = np.array([u_max])
else:
s = 1
lbu = u
ubu = u
U = [u, s]
# Tracking gearbox:
if mode == Stages.TYPE_1_0:
Z = [
np.array([1 / 4, -1 / 4]),
np.array([1 / 4, 1 / 4]),
np.array([3 / 4, 3 / 4]),
np.array([3 / 4, 5 / 4])
]
psi = (v-v1)/(v2-v1)
elif mode == Stages.TYPE_2_0:
if psi_shift_2 <= 1.0:
print("Due to overlapping hysteresis curves, "
"this method might give a wrong result!")
if psi_shift_2 <= 0.51:
raise Exception("Regions overlap and this method will fail")
Z = [
np.array([1/4, -1/4]), # Original mode 1
np.array([1/4, 1/4]), # Original mode 2
np.array([3/4, 3/4]), # Original mode 3
np.array([3/4, 5/4]), # Original mode 4
np.array([psi_shift_2 + 1/4, 1 + -1/4]), # Similar to mode 1
np.array([psi_shift_2 + 1/4, 1 + 1/4]), # Similar to mode 2
np.array([psi_shift_2 + 3/4, 1 + 3/4]), # Similar to mode 3
np.array([psi_shift_2 + 3/4, 1 + 5/4]), # Similar to mode 4
]
psi = (v-v1)/(v2-v1)
elif mode == Stages.TYPE_PAPER:
Z = [
np.array([1/4, -1/4]), # Original mode 1
np.array([1/4, 1/4]), # Original mode 2
np.array([3/4, 3/4]), # Original mode 3
np.array([3/4, 5/4]), # Original mode 4
np.array([psi_shift_2 + 1/4, 1 + -1/4]), # Similar to mode 1
np.array([psi_shift_2 + 1/4, 1 + 1/4]), # Similar to mode 2
np.array([psi_shift_2 + 3/4, 1 + 3/4]), # Similar to mode 3
np.array([psi_shift_2 + 3/4, 1 + 5/4]), # Similar to mode 4
np.array([2 * psi_shift_2 + 1/4, 2 + -1/4]), # Similar to mode 1
np.array([2 * psi_shift_2 + 1/4, 2 + 1/4]), # Similar to mode 2
np.array([2 * psi_shift_2 + 3/4, 2 + 3/4]), # Similar to mode 3
np.array([2 * psi_shift_2 + 3/4, 2 + 5/4]), # Similar to mode 4
]
psi = (v-v1)/(v2-v1)
z = ca.vertcat(psi, w)
g_ind = [ca.vertcat(*[
ca.norm_2(z - zi)**2 for zi in Z
])]
# Traject
f_q = 0
g_path = 0
g_terminal = ca.vertcat(q-q_goal, v-v_goal)
f_terminal = t
# System dynamics
f_A = ca.vertcat(
v, n[0]*u, C[0], 0, 1
)
f_B = ca.vertcat(
v, n[1]*u, C[1], 0, 1
)
f_C = ca.vertcat(
v, n[2]*u, C[2], 0, 1
)
f_D = ca.vertcat(
v, n[3]*u, C[3], 0, 1
)
a_push = 4
push_down_eq = push_equation(-a_push, psi, 1)
push_up_eq = push_equation(a_push, psi, 0)
f_push_down = ca.vertcat(0, 0, 0, push_down_eq, 0)
f_push_up = ca.vertcat(0, 0, 0, push_up_eq, 0)
if mode == Stages.TYPE_1_0:
f_1 = [
s * (2 * f_A - f_push_down),
s * (f_push_down),
s * (f_push_up),
s * (2 * f_B - f_push_up)
]
elif mode == Stages.TYPE_2_0 or mode == Stages.TYPE_PAPER:
push_down_eq = push_equation(-a_push, psi, 1+psi_shift_2)
push_up_eq = push_equation(a_push, psi, psi_shift_2)
f_push_up_1 = ca.vertcat(0, 0, 0, push_up_eq, 0)
f_push_down_1 = ca.vertcat(0, 0, 0, push_down_eq, 0)
f_1 = [
s * (2 * f_A - f_push_down),
s * (f_push_down),
s * (f_push_up),
s * (2 * f_B - f_push_up),
s * (2 * f_B - f_push_down_1),
s * (f_push_down_1),
s * (f_push_up_1),
s * (2 * f_C - f_push_up_1),
]
if mode == Stages.TYPE_PAPER:
push_down_eq = push_equation(-a_push, psi, 1+2*psi_shift_2)
push_up_eq = push_equation(a_push, psi, 2*2*psi_shift_2)
f_push_up_2 = ca.vertcat(0, 0, 0, push_up_eq, 0)
f_push_down_2 = ca.vertcat(0, 0, 0, push_down_eq, 0)
f_1.extend([
s * (2 * f_C - f_push_down_2),
s * (f_push_down_2),
s * (f_push_up_2),
s * (2 * f_D - f_push_up_2),
])
F = [ca.horzcat(*f_1)]
if isinstance(U, ca.SX):
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, u=U, t_var=t,
v_global=s,
name="gearbox"
)
else:
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, t_var=t,
name="gearbox"
)
return model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z
def plot(x_list, t_grid, u_list, t_grid_u, Z):
"""Plot."""
q = [x[0] for x in x_list]
v = [x[1] for x in x_list]
aux = [x[-2] for x in x_list]
t = [x[-1] for x in x_list]
plt.figure()
plt.subplot(1, 3, 1)
plt.plot(t_grid, x_list, label=[
"$q$ (position)", "$v$ (speed)", "$L$ (cost)",
"$w$ (auxillary variable)", "$t$ (time)"
])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
if u_list is not None:
plt.subplot(1, 3, 2)
plt.plot(t_grid_u[:-1], u_list, label=["u", "s"])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(t, q, label="Position vs actual time")
plt.xlabel("Actual Time [$s$]")
ax = plot_voronoi_2d(Z, show=False, annotate=True)
psi = [(vi - v1) / (v2 - v1) for vi in v]
im = ax.scatter(psi, aux, c=t_grid, cmap=plt.hot())
im.set_label('Time')
plt.colorbar(im, ax=ax)
ax.set_xlabel("$\\psi(x)$")
ax.set_ylabel("$w$")
plt.show()
def simulation(u=25, Tsim=6, Nsim=30, with_plot=True):
"""Simulate the temperature control system with a fixed input."""
opts = create_options()
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z = create_gearbox_voronoi(
u=u, q_goal=q_goal
)
Tstep = Tsim / Nsim
opts.N_finite_elements = 2
opts.N_stages = 1
opts.terminal_time = Tstep
opts.sigma_N = 1e-2
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
print(f"Ends in zone: {np.argmax(results['theta_sim'][-1][-1])}")
print(results['theta_sim'][-1][-1])
plot(results["X_sim"], results["t_grid"], None, None, Z)
def control():
"""Execute one Control step."""
N = 15
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z = create_gearbox_voronoi(
q_goal=q_goal,
)
opts = create_options()
opts.N_finite_elements = 6
opts.N_stages = N
opts.terminal_time = 10
opts.nlp_max_iter = 3000
ocp = nosnoc.NosnocOcp(
lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_terminal,
lbv_global=np.array([0.1]), ubv_global=np.array([1e3]),
g_terminal=g_terminal,
lbx=lbx, ubx=ubx
)
solver = nosnoc.NosnocSolver(opts, model, ocp)
solver.set('v_global', np.array([1]))
opts.initialization_strategy = nosnoc.InitializationStrategy.EXTERNAL
solver.set('x', np.vstack((
np.linspace(0, q_goal, N),
np.linspace(0, v_goal, N),
np.zeros((2, N)),
np.ones((1, N))
)).T)
solver.set('u', np.vstack((
np.zeros((1, N)),
)).T)
results = solver.solve()
plot(
results["x_traj"], results["t_grid"],
results["u_list"], results["t_grid_u"], Z
)
with open("data_2d.pickle", "wb") as f:
pickle.dump(results, f)
if __name__ == "__main__":
control()
| 10,197 | 28.994118 | 95 | py |
nosnoc_py | nosnoc_py-main/examples/hysteresis_car_control/general_plot.py | from sys import argv
import matplotlib.pyplot as plt
import numpy as np
import pickle
from nosnoc.plot_utils import plot_colored_line_3d
def interp0(x, xp, yp):
"""Zeroth order hold interpolation w/ same
(base) signature as numpy.interp."""
def func(x0):
if x0 <= xp[0]:
return yp[0]
if x0 >= xp[-1]:
return yp[-1]
k = 0
while k < len(xp) and x0 > xp[k]:
k += 1
return yp[k-1]
if isinstance(x,float):
return func(x)
elif isinstance(x, list):
return [func(x) for x in x]
elif isinstance(x, np.ndarray):
return np.asarray([func(x) for x in x])
else:
raise TypeError('argument must be float, list, or ndarray')
def filter_t(x, t):
"""Filter based on dt."""
dt = [t2 - t1 for t1, t2 in zip(t, t[1:])]
out = []
for xi, dti in zip(x, dt):
if dti > 1e-3:
out.append(xi)
out.append(x[-1])
return out
def plot(x_list, t_grid, u_list, t_grid_u):
"""Plot."""
q = [x[0] for x in x_list]
v = [x[1] for x in x_list]
w1 = [x[-2] for x in x_list]
w2 = [x[-3] for x in x_list]
aux = [x[-2] + x[-3] for x in x_list]
t = [x[-1] for x in x_list]
u = [u[0] for u in u_list]
print("Error")
print(np.sqrt(300-q[-1])**2 + v[-1]**2)
try:
s = [u[1] for u in u_list]
print(s)
except Exception:
pass
plt.figure()
plt.plot(t, q)
v_aux = [max(q[i+1] - q[i] / max(1e-9, t[i+1] - t[i]), 0) for i in range(len(t)-1)]
ax = plot_colored_line_3d(v, w1, w2, t)
ax.set_ylim(-1, 2)
ax.set_zlim(0, 2)
ax.set_xlabel("$v(t)$")
ax.set_ylabel("$w_1(t)$")
ax.set_zlabel("$w_2(t)$")
plt.figure()
plt.plot(t[:-1], v_aux)
plt.plot(t, v)
plt.figure()
plt.subplot(2, 2, 1)
plt.plot(filter_t(t, t), filter_t(v, t))
plt.xlabel("$t$")
plt.ylabel("$v(t)$")
plt.subplot(2, 2, 2)
plt.plot(filter_t(t, t), filter_t(interp0(t_grid, t_grid_u, u), t))
plt.xlabel("$t$")
plt.ylabel("$u(t)$")
plt.subplot(2, 2, 3)
plt.plot(t, aux)
plt.xlabel("$t$")
plt.ylabel("$w_1(t) + w_2(t)$")
plt.subplot(2, 2, 4)
plt.plot(v, aux)
plt.xlabel("$v$")
plt.ylabel("$w_1(t) + w_2(t)$")
plt.show()
if len(argv) <= 1:
file = "data_3d.pickle"
else:
file = argv[1]
with open(file, "rb") as f:
results = pickle.load(f)
print(f"{results['v_global']=}")
plot(
results["x_traj"], results["t_grid"],
results["u_list"], results["t_grid_u"]
)
| 2,563 | 22.962617 | 87 | py |
nosnoc_py | nosnoc_py-main/examples/hysteresis_car_control/hysteresis_car_control_time_optimal_3d.py | """
Gearbox example with multiple modes.
Extension of the original model with two modes to three modes. The modes are
given by two auxillary variables and one switching function. The voronoi-regions
are thus given in a 3D space. The hysteresis curves can overlap in this
3D space and are solved faster than the 2D version.
"""
import pickle
import nosnoc
import casadi as ca
import numpy as np
import matplotlib.pyplot as plt
from enum import Enum
from nosnoc.plot_utils import plot_voronoi_2d, plot_colored_line_3d
# Hystheresis parameters
v1 = 5
v2 = 10
# Model parameters
q_goal = 300
v_goal = 0
v_max = 30
u_max = 3
# fuel costs:
C = [1, 1.8, 2.5, 3.2]
# ratios
n = [1, 2, 3, 4]
def calc_dist(a, b):
"""Calculate distance."""
print(np.norm_2(a - b))
class Stages(Enum):
"""Z Mode."""
TYPE_1_0 = 1
TYPE_2_0 = 2
TYPE_PAPER = 3
def create_options():
"""Create nosnoc options."""
opts = nosnoc.NosnocOpts()
opts.print_level = 2
# Degree of interpolating polynomial
opts.n_s = 2
# === MPCC settings ===
# upper bound for elastic variables
opts.s_elastic_max = 1e1
# in penalty methods 1: J = J+(1/p)*J_comp (direct) , 0 : J = p*J+J_comp (inverse)
opts.objective_scaling_direct = 0
# === Penalty/Relaxation paraemetr ===
# initial smoothing parameter
opts.sigma_0 = 1e0
# end smoothing parameter
opts.sigma_N = 1e-6 # 1e-10
# decrease rate
opts.homotopy_update_slope = 0.1
opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.SUPERLINEAR
opts.comp_tol = 1e-6
# IPOPT Settings
opts.nlp_max_iter = 1500
opts.initial_theta = 0.5
opts.time_freezing = False
opts.pss_mode = nosnoc.PssMode.STEWART
opts.mpcc_mode = nosnoc.MpccMode.ELASTIC_TWO_SIDED
return opts
def push_equation(a_push, psi, zero_point):
"""Eval push equation."""
multip = 1
return a_push * multip * (psi - zero_point) ** 2 / (1 + multip * (psi - zero_point)**2)
def create_gearbox_voronoi(use_simulation=False, q_goal=None, traject=None,
use_traject=False, use_traject_constraint=True,
shift=0.5, mode=Stages.TYPE_PAPER):
"""Create a gearbox."""
if not use_traject and q_goal is None:
raise Exception("You should provide a traject or a q_goal")
# State variables:
q = ca.SX.sym("q") # position
v = ca.SX.sym("v") # velocity
L = ca.SX.sym("L") # Fuel usage
w1 = ca.SX.sym('w1') # Auxillary variable
w2 = ca.SX.sym('w2') # Auxillary variable
t = ca.SX.sym('t') # Time variable
X = ca.vertcat(q, v, L, w1, w2, t)
X0 = np.array([0, 0, 0, 0, 0, 0]).T
lbx = np.array([0, -v_max, -ca.inf, 0, 0, 0]).T
ubx = np.array([ca.inf, v_max, ca.inf, 2, 2, ca.inf]).T
if use_traject:
p_traj = ca.SX.sym('traject')
else:
p_traj = ca.SX.sym('dummy', 0, 1)
# Controls
if not use_simulation:
u = ca.SX.sym('u') # drive
s = ca.SX.sym('s') # Length of time
U = ca.vertcat(u)
lbu = np.array([-u_max])
ubu = np.array([u_max])
else:
u = ca.SX.sym('u') # drive
s = 1
lbu = u
ubu = u
U = [u]
# Tracking gearbox:
psi = (v-v1)/(v2-v1)
z = ca.vertcat(psi, w1, w2)
a = 1/4
b = 1/4
if mode == Stages.TYPE_1_0:
Z = [
np.array([b, -a, 0]),
np.array([b, a, 0]),
np.array([1-b, 1-a, 0]),
np.array([1-b, 1+a, 0])
]
elif mode == Stages.TYPE_2_0:
Z = [
np.array([b, -a, 0]),
np.array([b, a, 0]),
np.array([1-b, 1-a, 0]),
np.array([1-b, 1+a, 0]),
np.array([b + shift, 1, -a]),
np.array([b + shift, 1, a]),
np.array([1-b + shift, 1, 1-a]),
np.array([1-b + shift, 1, 1+a])
]
elif mode == Stages.TYPE_PAPER:
# Shift can be 0.5 or 2
Z = [
np.array([b, -a, 0]),
np.array([b, a, 0]),
np.array([1-b, 1-a, 0]),
np.array([1-b, 1+a, 0]),
np.array([b + shift, 1, -a]),
np.array([b + shift, 1, a]),
np.array([1 - b + shift, 1, 1-a]),
np.array([1 - b + shift, 1, 1+a]),
np.array([b + 2 * shift, 1-a, 1]),
np.array([b + 2 * shift, 1+a, 1]),
np.array([1 - b + 2 * shift, 2-a, 1]),
np.array([1 - b + 2 * shift, 2+a, 1])
]
print(f"{mode=} {shift=}")
g_ind = [ca.vertcat(*[
ca.norm_2(z - zi)**2 for zi in Z
])]
# Traject
f_q = 0
g_path = 0
if use_traject:
if use_traject_constraint:
print("use trajectory as constraint")
g_path = p_traj - q
else:
print("use trajectory as cost")
f_q = 0.001 * (p_traj - q)**2
g_terminal = ca.vertcat(q-p_traj, v-v_goal)
else:
g_terminal = ca.vertcat(q-q_goal, v-v_goal)
f_terminal = t
# System dynamics
f_A = ca.vertcat(
v, n[0]*u, C[0], 0, 0, 1
)
f_B = ca.vertcat(
v, n[1]*u, C[1], 0, 0, 1
)
f_C = ca.vertcat(
v, n[2]*u, C[2], 0, 0, 1
)
f_D = ca.vertcat(
v, n[3]*u, C[3], 0, 0, 1
)
a_push = 2
push_down_eq = push_equation(-a_push, psi, 1)
push_up_eq = push_equation(a_push, psi, 0)
f_push_down_w1 = ca.vertcat(0, 0, 0, push_down_eq, 0, 0)
f_push_up_w1 = ca.vertcat(0, 0, 0, push_up_eq, 0, 0)
if mode == Stages.TYPE_1_0:
f_1 = [
s * (2 * f_A - f_push_down_w1),
s * (f_push_down_w1),
s * (f_push_up_w1),
s * (2 * f_B - f_push_up_w1)
]
elif mode == Stages.TYPE_2_0:
push_down_eq = push_equation(-a_push, psi, 1 + shift)
push_up_eq = push_equation(a_push, psi, 0 + shift)
f_push_down_w2 = ca.vertcat(0, 0, 0, 0, push_down_eq, 0)
f_push_up_w2 = ca.vertcat(0, 0, 0, 0, push_up_eq, 0)
f_1 = [
s * (2 * f_A - f_push_down_w1),
s * (f_push_down_w1),
s * (f_push_up_w1),
s * (2 * f_B - f_push_up_w1),
s * (2 * f_B - f_push_down_w2),
s * (f_push_down_w2),
s * (f_push_up_w2),
s * (2 * f_C - f_push_up_w2),
]
elif mode == Stages.TYPE_PAPER:
push_down_eq = push_equation(-a_push, psi, 1 + shift)
push_up_eq = push_equation(a_push, psi, 0 + shift)
f_push_down_w2 = ca.vertcat(0, 0, 0, 0, push_down_eq, 0)
f_push_up_w2 = ca.vertcat(0, 0, 0, 0, push_up_eq, 0)
push_down_eq = push_equation(-a_push, psi, 1 + 2 * shift)
push_up_eq = push_equation(a_push, psi, 0 + 2 * shift)
f_push_down_w3 = ca.vertcat(0, 0, 0, push_down_eq, 0, 0)
f_push_up_w3 = ca.vertcat(0, 0, 0, push_up_eq, 0, 0)
f_1 = [
s * (2 * f_A - f_push_down_w1),
s * (f_push_down_w1),
s * (f_push_up_w1),
s * (2 * f_B - f_push_up_w1),
s * (2 * f_B - f_push_down_w2),
s * (f_push_down_w2),
s * (f_push_up_w2),
s * (2 * f_C - f_push_up_w2),
s * (2 * f_C - f_push_down_w3),
s * (f_push_down_w3),
s * (f_push_up_w3),
s * (2 * f_D - f_push_up_w3),
]
F = [ca.horzcat(*f_1)]
if not use_simulation:
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, u=U, t_var=t,
p_time_var=p_traj,
p_time_var_val=traject, v_global=s,
name="gearbox"
)
else:
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, t_var=t,
p_global=u, p_global_val=np.array([0]),
name="gearbox"
)
return model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z
def plot(x_list, t_grid, u_list, t_grid_u, Z):
"""Plot."""
q = [x[0] for x in x_list]
v = [x[1] for x in x_list]
w1 = [x[-3] for x in x_list]
w2 = [x[-2] for x in x_list]
aux = [x[-2] + x[-3] for x in x_list]
t = [x[-1] for x in x_list]
plt.figure()
plt.subplot(1, 3, 1)
plt.plot(t_grid, x_list, label=[
"$q$ (position)", "$v$ (speed)", "$L$ (cost)",
"$w$ (auxillary variable)", "$w_2$", "$t$ (time)"
])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
if u_list is not None:
plt.subplot(1, 3, 2)
plt.plot(t_grid_u[:-1], u_list, label=["u", "s"])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(t, q, label="Position vs actual time")
plt.xlabel("Actual Time [$s$]")
Z_2d = [[z[0], z[1] + z[2]] for z in Z]
psi = [(vi - v1) / (v2 - v1) for vi in v]
ax = plot_voronoi_2d(Z_2d, show=False)
im = ax.scatter(psi, aux, c=t_grid, cmap=plt.hot())
im.set_label('Time')
plt.colorbar(im, ax=ax)
plt.xlabel("$\\psi(x)$")
plt.ylabel("$w$")
plot_colored_line_3d(psi, w1, w2, t)
plt.show()
def simulation(Tsim=6, Nsim=30, with_plot=True, shift=0.5, mode=Stages.TYPE_PAPER):
"""Simulate the temperature control system with a fixed input."""
opts = create_options()
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z = create_gearbox_voronoi(
use_simulation=True, q_goal=q_goal, mode=mode, shift=shift
)
Tstep = Tsim / Nsim
opts.N_finite_elements = 2
opts.N_stages = 1
opts.terminal_time = Tstep
opts.sigma_N = 1e-2
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim, p_values=np.array([[
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
10, 10, 10, 10, 10, 10, -10, -10, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
]]).T)
looper.run()
results = looper.get_results()
print(f"Ends in zone: {np.argmax(results['theta_sim'][-1][-1])}")
print(results['theta_sim'][-1][-1])
plot(results["X_sim"], results["t_grid"], None, None, Z=Z)
def custom_callback(prob, w_opt, lambda0, x0, iteration):
"""Make feasible."""
if iteration < 3:
return w_opt
ind_x_all = nosnoc.utils.flatten_outer_layers(prob.ind_x, 2)
x_sol = w_opt[ind_x_all]
w1 = x_sol[:, -3]
w2 = x_sol[:, -2]
w = w1 + w2
# Bring w to the plane:
w = np.clip(w, 0, 6)
w1 = w // 2 + np.clip(w % 2, 0, 1)
w2 = w // 2 + np.clip(w % 2 - 1, 0, 1)
x_sol[:, 1] = w1
x_sol[:, 2] = w2
w_opt[ind_x_all] = x_sol
return w_opt
def control(shift=1.5, mode=Stages.TYPE_PAPER):
"""Execute one Control step."""
N = 15
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z = create_gearbox_voronoi(
q_goal=q_goal, shift=shift, mode=mode
)
opts = create_options()
opts.N_finite_elements = 6
opts.N_stages = N
opts.terminal_time = 10
opts.initialization_strategy = nosnoc.InitializationStrategy.EXTERNAL
ocp = nosnoc.NosnocOcp(
lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_terminal,
g_terminal=g_terminal,
lbv_global=np.array([0.1]), ubv_global=np.array([1e3]),
lbx=lbx, ubx=ubx
)
solver = nosnoc.NosnocSolver(opts, model, ocp)
# solver.callback = custom_callback
solver.set('v_global', np.array([1]))
solver.set('x', np.vstack((
np.linspace(0, q_goal, N),
np.linspace(0, v_goal, N),
np.zeros((3, N)),
np.ones((1, N))
)).T)
solver.set('u', np.vstack((
np.zeros((1, N)),
)).T)
results = solver.solve()
solver.model.w0 = results['w_sol']
plot(
results["x_traj"], results["t_grid"],
results["u_list"], results["t_grid_u"], Z=Z
)
with open("data_3d.pickle", "wb") as f:
pickle.dump(results, f)
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
print("TYPE 2")
control(shift=float(argv[1]), mode=Stages.TYPE_2_0)
elif len(argv) > 2:
print("TYPE 3")
control(shift=float(argv[1]), mode=Stages.TYPE_PAPER)
else:
control()
| 12,179 | 28.42029 | 95 | py |
nosnoc_py | nosnoc_py-main/examples/hysteresis_car_control/hysteresis_car_control_time_optimal_3d_best_now_2stage.py | """
Gearbox example with multiple modes.
Extension of the original model with two modes to three modes. The modes are
given by two auxillary variables and one switching function. The voronoi-regions
are thus given in a 3D space. The hysteresis curves can overlap in this
3D space and are solved faster than the 2D version.
"""
import pickle
import nosnoc
import casadi as ca
import numpy as np
from math import ceil, log
import matplotlib.pyplot as plt
from enum import Enum
from nosnoc.plot_utils import plot_voronoi_2d, plot_colored_line_3d
# Hystheresis parameters
v1 = 10
v2 = 14
# Model parameters
q_goal = 150
v_goal = 0
v_max = 25
u_max = 5
# fuel costs:
C = [1, 1.8, 2.5, 3.2]
# ratios
n = [1, 2, 3, 4]
def calc_dist(a, b):
"""Calculate distance."""
print(np.norm_2(a - b))
class ZMode(Enum):
"""Z Mode."""
TYPE_1_0 = 1
TYPE_2_0 = 2
PAPER_TYPE_2 = 3
def create_options():
"""Create nosnoc options."""
opts = nosnoc.NosnocOpts()
opts.print_level = 2
# Degree of interpolating polynomial
opts.n_s = 3
# === MPCC settings ===
# upper bound for elastic variables
opts.s_elastic_max = 1e1
# in penalty methods 1: J = J+(1/p)*J_comp (direct) , 0 : J = p*J+J_comp (inverse)
opts.objective_scaling_direct = 0
# === Penalty/Relaxation paraemetr ===
# initial smoothing parameter
opts.sigma_0 = 1.0
# end smoothing parameter
opts.sigma_N = 1e-2 # 1e-10
# decrease rate
opts.homotopy_update_slope = 0.1
# number of steps
opts.N_homotopy = ceil(abs(
log(opts.sigma_N / opts.sigma_0) / log(opts.homotopy_update_slope))) + 1
opts.comp_tol = 1e-14
# IPOPT Settings
opts.nlp_max_iter = 5000
# New setting: time freezing settings
opts.pss_mode = nosnoc.PssMode.STEWART
opts.mpcc_mode = nosnoc.MpccMode.SCHOLTES_INEQ
return opts
def push_equation(a_push, psi, zero_point):
"""Eval push equation."""
multip = 1
return a_push * multip * (psi - zero_point) ** 2 / (1 + multip * (psi - zero_point)**2) + 1e-6
def create_gearbox_voronoi(use_simulation=False, q_goal=None, traject=None,
use_traject=False, use_traject_constraint=True,
shift=0.5, mode=ZMode.PAPER_TYPE_2):
"""Create a gearbox."""
if not use_traject and q_goal is None:
raise Exception("You should provide a traject or a q_goal")
# State variables:
q = ca.SX.sym("q") # position
v = ca.SX.sym("v") # velocity
L = ca.SX.sym("L") # Fuel usage
w1 = ca.SX.sym('w1') # Auxillary variable
w2 = ca.SX.sym('w2') # Auxillary variable
t = ca.SX.sym('t') # Time variable
X = ca.vertcat(q, v, L, w1, w2, t)
X0 = np.array([0, 0, 0, 0, 0, 0]).T
lbx = np.array([0, 0, -ca.inf, 0, 0, 0]).T
ubx = np.array([ca.inf, v_max, ca.inf, 2, 2, ca.inf]).T
if use_traject:
p_traj = ca.SX.sym('traject')
else:
p_traj = ca.SX.sym('dummy', 0, 1)
# Controls
if not use_simulation:
u = ca.SX.sym('u') # drive
s = ca.SX.sym('s') # Length of time
U = ca.vertcat(u)
lbu = np.array([-u_max])
ubu = np.array([u_max])
else:
u = ca.SX.sym('u') # drive
s = 1
lbu = u
ubu = u
U = [u]
# Tracking gearbox:
psi = (v-v1)/(v2-v1)
z = ca.vertcat(psi, w1, w2)
a = 1/4
b = 1/4
if mode == ZMode.TYPE_1_0:
Z = [
np.array([b, -a, 0]),
np.array([b, a, 0]),
np.array([1-b, 1-a, 0]),
np.array([1-b, 1+a, 0])
]
elif mode == ZMode.TYPE_2_0:
Z = [
np.array([b, -a, 0]),
np.array([b, a, 0]),
np.array([1-b, 1-a, 0]),
np.array([1-b, 1+a, 0]),
np.array([b + shift, 1, -a]),
np.array([b + shift, 1, a]),
np.array([1-b + shift, 1, 1-a]),
np.array([1-b + shift, 1, 1+a])
]
elif mode == ZMode.PAPER_TYPE_2:
# Shift can be 0.5 or 2
Z = [
np.array([b, -a, 0]),
np.array([b, a, 0]),
np.array([1-b, 1-a, 0]),
np.array([1-b, 1+a, 0]),
np.array([b + shift, 1, -a]),
np.array([b + shift, 1, a]),
np.array([1 - b + shift, 1, 1-a]),
np.array([1 - b + shift, 1, 1+a]),
np.array([b + 2 * shift, 1-a, 1]),
np.array([b + 2 * shift, 1+a, 1]),
np.array([1 - b + 2 * shift, 2-a, 1]),
np.array([1 - b + 2 * shift, 2+a, 1])
]
g_ind = [ca.vertcat(*[
ca.norm_2(z - zi)**2 for zi in Z
])]
# Traject
f_q = 0
g_path = 0
if use_traject:
if use_traject_constraint:
print("use trajectory as constraint")
g_path = p_traj - q
else:
print("use trajectory as cost")
f_q = 0.001 * (p_traj - q)**2
g_terminal = ca.vertcat(q-p_traj, v-v_goal)
else:
g_terminal = ca.vertcat(q-q_goal, v-v_goal)
f_terminal = t
# System dynamics
f_A = ca.vertcat(
v, n[0]*u, C[0], 0, 0, 1
)
f_B = ca.vertcat(
v, n[1]*u, C[1], 0, 0, 1
)
f_C = ca.vertcat(
v, n[2]*u, C[2], 0, 0, 1
)
f_D = ca.vertcat(
v, n[3]*u, C[3], 0, 0, 1
)
a_push = 2
push_down_eq = push_equation(-a_push, psi, 1)
push_up_eq = push_equation(a_push, psi, 0)
f_push_down_w1 = ca.vertcat(0, 0, 0, push_down_eq, 0, 0)
f_push_up_w1 = ca.vertcat(0, 0, 0, push_up_eq, 0, 0)
if mode == ZMode.TYPE_1_0:
f_1 = [
s * (2 * f_A - f_push_down_w1),
s * (f_push_down_w1),
s * (f_push_up_w1),
s * (2 * f_B - f_push_up_w1)
]
elif mode == ZMode.TYPE_2_0:
push_down_eq = push_equation(-a_push, psi, 1 + shift)
push_up_eq = push_equation(a_push, psi, 0 + shift)
f_push_down_w2 = ca.vertcat(0, 0, 0, 0, push_down_eq, 0)
f_push_up_w2 = ca.vertcat(0, 0, 0, 0, push_up_eq, 0)
f_1 = [
s * (2 * f_A - f_push_down_w1),
s * (f_push_down_w1),
s * (f_push_up_w1),
s * (2 * f_B - f_push_up_w1),
s * (2 * f_B - f_push_down_w2),
s * (f_push_down_w2),
s * (f_push_up_w2),
s * (2 * f_C - f_push_up_w2),
]
elif mode == ZMode.PAPER_TYPE_2:
push_down_eq = push_equation(-a_push, psi, 1 + shift)
push_up_eq = push_equation(a_push, psi, 0 + shift)
f_push_down_w2 = ca.vertcat(0, 0, 0, 0, push_down_eq, 0)
f_push_up_w2 = ca.vertcat(0, 0, 0, 0, push_up_eq, 0)
push_down_eq = push_equation(-a_push, psi, 1 + 2 * shift)
push_up_eq = push_equation(a_push, psi, 0 + 2 * shift)
f_push_down_w3 = ca.vertcat(0, 0, 0, push_down_eq, 0, 0)
f_push_up_w3 = ca.vertcat(0, 0, 0, push_up_eq, 0, 0)
f_1 = [
s * (2 * f_A - f_push_down_w1),
s * (f_push_down_w1),
s * (f_push_up_w1),
s * (2 * f_B - f_push_up_w1),
s * (2 * f_B - f_push_down_w2),
s * (f_push_down_w2),
s * (f_push_up_w2),
s * (2 * f_C - f_push_up_w2),
s * (2 * f_C - f_push_down_w3),
s * (f_push_down_w3),
s * (f_push_up_w3),
s * (2 * f_D - f_push_up_w3),
]
F = [ca.horzcat(*f_1)]
if not use_simulation:
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, u=U, t_var=t,
p_time_var=p_traj,
p_time_var_val=traject, v_global=s,
name="gearbox"
)
else:
model = nosnoc.NosnocModel(
x=X, F=F, g_Stewart=g_ind, x0=X0, t_var=t,
p_global=u, p_global_val=np.array([0]),
name="gearbox"
)
return model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z
def plot(x_list, t_grid, u_list, t_grid_u, Z):
"""Plot."""
q = [x[0] for x in x_list]
v = [x[1] for x in x_list]
w1 = [x[-3] for x in x_list]
w2 = [x[-2] for x in x_list]
aux = [x[-2] + x[-3] for x in x_list]
t = [x[-1] for x in x_list]
plt.figure()
plt.subplot(1, 3, 1)
plt.plot(t_grid, x_list, label=[
"$q$ (position)", "$v$ (speed)", "$L$ (cost)",
"$w$ (auxillary variable)", "$w_2$", "$t$ (time)"
])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
if u_list is not None:
plt.subplot(1, 3, 2)
plt.plot(t_grid_u[:-1], u_list, label=["u", "s"])
plt.xlabel("Simulation Time [$s$]")
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(t, q, label="Position vs actual time")
plt.xlabel("Actual Time [$s$]")
Z_2d = [[z[0], z[1] + z[2]] for z in Z]
psi = [(vi - v1) / (v2 - v1) for vi in v]
ax = plot_voronoi_2d(Z_2d, show=False)
im = ax.scatter(psi, aux, c=t_grid, cmap=plt.hot())
im.set_label('Time')
plt.colorbar(im, ax=ax)
plt.xlabel("$\\psi(x)$")
plt.ylabel("$w$")
plot_colored_line_3d(psi, w1, w2, t)
plt.show()
def simulation(Tsim=6, Nsim=30, with_plot=True, shift=0.5, mode=ZMode.PAPER_TYPE_2):
"""Simulate the temperature control system with a fixed input."""
opts = create_options()
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z = create_gearbox_voronoi(
use_simulation=True, q_goal=q_goal, mode=mode, shift=shift
)
Tstep = Tsim / Nsim
opts.N_finite_elements = 2
opts.N_stages = 1
opts.terminal_time = Tstep
opts.sigma_N = 1e-2
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim, p_values=np.array([[
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
10, 10, 10, 10, 10, 10, -10, -10, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
]]).T)
looper.run()
results = looper.get_results()
print(f"Ends in zone: {np.argmax(results['theta_sim'][-1][-1])}")
print(results['theta_sim'][-1][-1])
plot(results["X_sim"], results["t_grid"], None, None, Z=Z)
def custom_callback(prob, w_opt, lambda0, x0, iteration):
"""Make feasible."""
if iteration < 1:
return w_opt
ind_x_all = nosnoc.utils.flatten_outer_layers(prob.ind_x, 2)
x_sol = w_opt[ind_x_all]
w1 = x_sol[:, -3]
w2 = x_sol[:, -2]
w = w1 + w2
# Bring w to the plane:
w = np.clip(w, 0, 6)
w1 = w // 2 + np.clip(w % 2, 0, 1)
w2 = w // 2 + np.clip(w % 2 - 1, 0, 1)
x_sol[:, 1] = w1
x_sol[:, 2] = w2
w_opt[ind_x_all] = x_sol
return w_opt
def control(shift=0.5, mode=ZMode.PAPER_TYPE_2):
"""Execute one Control step."""
N = 10
# traject = np.array([[q_goal * (i + 1) / N for i in range(N)]]).T
model, lbx, ubx, lbu, ubu, f_q, f_terminal, g_path, g_terminal, Z = create_gearbox_voronoi(
q_goal=q_goal, shift=shift, mode=mode
)
opts = create_options()
opts.N_finite_elements = 3
opts.n_s = 2
opts.N_stages = N
opts.terminal_time = 10
opts.initialization_strategy = nosnoc.InitializationStrategy.EXTERNAL
ocp = nosnoc.NosnocOcp(
lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_terminal,
g_terminal=g_terminal,
lbv_global=np.array([0.1]), ubv_global=np.array([1e3]),
lbx=lbx, ubx=ubx
)
solver = nosnoc.NosnocSolver(opts, model, ocp)
# solver.callback = custom_callback
solver.set('v_global', np.array([1]))
solver.set('x', np.vstack((
np.linspace(0, q_goal, N),
np.linspace(0, v_goal, N),
np.zeros((3, N)),
np.ones((1, N))
)).T)
solver.set('u', np.vstack((
np.zeros((1, N)),
)).T)
results = solver.solve()
solver.model.w0 = results['w_sol']
plot(
results["x_traj"], results["t_grid"],
results["u_list"], results["t_grid_u"], Z=Z
)
with open("data_3d.pickle", "wb") as f:
pickle.dump(results, f)
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
print("TYPE 2")
control(shift=float(argv[1]), mode=ZMode.TYPE_2_0)
elif len(argv) > 2:
print("TYPE 3")
control(shift=float(argv[1]), mode=ZMode.PAPER_TYPE_2)
else:
control()
| 12,312 | 28.527578 | 98 | py |
nosnoc_py | nosnoc_py-main/examples/hysteresis_car_control/minlp/general_plot.py | """General plotting."""
from sys import argv
import matplotlib.pyplot as plt
import numpy as np
import pickle
def interp0(x, xp, yp):
"""Create zeroth order hold interpolation w/ same base signature as numpy.interp."""
def func(x0):
if x0 <= xp[0]:
return yp[0]
if x0 >= xp[-1]:
return yp[-1]
k = 0
while k < len(xp) and k < len(yp) and x0 > xp[k]:
k += 1
return yp[k-1]
if isinstance(x, float):
return func(x)
elif isinstance(x, list):
return [func(x) for x in x]
elif isinstance(x, np.ndarray):
return np.asarray([func(x) for x in x])
else:
raise TypeError('argument must be float, list, or ndarray')
def plot(x_list, y_list, T_final, u):
"""Plot."""
q = x_list[:, 0]
v = x_list[:, 1]
print("error")
print(np.sqrt(300-q[-1])**2 + v[-1]**2)
# L = x_list[:,2]
aux = np.zeros((y_list.shape[0],))
for i in range(y_list.shape[1]):
aux += i * y_list[:, i]
N = x_list.shape[0]
t = [T_final / (N - 1) * i for i in range(N)]
N_fe = int((len(t) - 1) / (len(aux) - 1))
t_grid_aux = [t[i] for i in range(0, len(t), N_fe)][:-1]
aux = aux[1:]
plt.figure()
plt.plot(t, q)
v_aux = [max((q[i+1] - q[i]) / max(1e-9, t[i+1] - t[i]), 0)
for i in range(len(t)-1)]
plt.figure()
plt.plot(t[:-1], v_aux, label="V recalc")
plt.plot(t, v, label="V actual")
plt.legend()
plt.figure()
plt.subplot(2, 2, 1)
plt.plot(t, v)
plt.xlabel("$t$")
plt.ylabel("$v(t)$")
plt.subplot(2, 2, 2)
plt.plot(t, interp0(t, t_grid_aux, u))
plt.xlabel("$t$")
plt.ylabel("$u(t)$")
plt.subplot(2, 2, 3)
plt.scatter(t_grid_aux, aux)
plt.plot(t_grid_aux, aux)
plt.xlabel("$t$")
plt.ylabel("$w_1(t) + w_2(t)$")
plt.subplot(2, 2, 4)
plt.scatter([v[i] for i in range(0, len(t)-1, N_fe)], aux)
plt.plot([v[i] for i in range(0, len(t)-1, N_fe)], aux)
plt.xlabel("$v$")
plt.ylabel("$w_1(t) + w_2(t)$")
plt.show()
if len(argv) <= 1:
file = "data_minlp.pickle"
else:
file = argv[1]
with open(file, "rb") as f:
results = pickle.load(f)
try:
print(f"{results['runtime']}")
except:
pass
# breakpoint()
# results['slack']
plt.subplot(6, 1, 1)
plt.plot(np.array(results["Xk"])[:, 1])
for i, name in enumerate(["LknUp", "LkUp", "LknDown", "LkDown", "Yk"]):
plt.subplot(6, 1, i+2)
var = np.array(results[name])
plt.plot(
var, label=[f"{name}{j}" for j in range(var.shape[1])]
)
plt.legend()
plt.show()
print(results['T_final'])
plot(
np.array(results["Xk"]),
np.array(results["Yk"]),
results['T_final'],
np.array(results["U"]),
)
| 2,757 | 23.40708 | 88 | py |
nosnoc_py | nosnoc_py-main/examples/hysteresis_car_control/minlp/car_example_dsc.py | """Create MINLP for the hysteresis car problem."""
from typing import Union, List, Optional, Dict
from nosnoc.rk_utils import generate_butcher_tableu_integral, IrkSchemes
import casadi as ca
from casadi import MX, SX, vertcat, inf
import numpy as np
import pickle
from sys import argv
from time import perf_counter
def tic():
"""Tic."""
global perf_ti
perf_ti = perf_counter()
def toc():
"""Toc."""
global perf_ti
tim = perf_counter()
dt = tim - perf_ti
print(" Elapsed time: %s s." % (dt))
perf_ti = tim
return dt
def make_list(value, nr=1):
"""Make list."""
if not isinstance(value, list):
return [value] * nr
else:
return value
class Description:
"""Description for Casadi."""
def __init__(self, variable_type=SX):
"""Create description."""
self.g = []
self.ubg = []
self.lbg = []
self.w = []
self.w0 = []
self.indices = {} # Names with indices
self.p = []
self.p0 = []
self.indices_p = {} # Names with indices
self.lbw = []
self.ubw = []
self.discrete = []
self.f = 0
self.solver = None
self.solution = None
self.variable_type = variable_type
# For big M reformulations:
self.M = 1e4
self.eps = 1e-9
def add_g(self, mini: float, equation: Union[SX, MX], maxi: float) -> int:
"""
Add to g.
:param mini: minimum
:param equation: constraint equation
:param maxi: maximum
:return: index of constraint
"""
nr = equation.shape[0] * equation.shape[1]
self.lbg += make_list(mini, nr)
self.g += make_list(equation)
self.ubg += make_list(maxi, nr)
return len(self.ubg) - 1
def leq(self, op1, op2) -> int:
"""Lower or equal."""
if isinstance(op1, (float, int, list)):
op1 = make_list(op1)
nr = len(op1)
return self.add_g(op1, op2, [inf] * nr)
elif isinstance(op2, (float, int, list)):
op2 = make_list(op2)
nr = len(op2)
return self.add_g([-inf] * nr, op1, op2)
else:
diff = op1 - op2
assert (diff.shape[1] == 1)
nr = diff.shape[0]
return self.add_g([-inf] * nr, diff, [0] * nr)
def eq(self, op1, op2) -> int:
"""Equal."""
diff = op1 - op2
assert (diff.shape[1] == 1)
nr = diff.shape[0]
return self.add_g([0] * nr, op1 - op2, [0] * nr)
def equal_if_on(self, trigger, equality):
"""Big M formulation."""
diff = (1 - trigger)
self.leq(-self.M * diff, equality)
self.leq(equality, self.M * diff)
def higher_if_on(self, trigger, equation):
"""Trigger = 1 if equation > -eps else lower."""
self.leq(-(1-trigger) * self.M, equation - self.eps)
self.leq(equation + self.eps, self.M * trigger)
def sym_bool(
self, name: str, nr: int = 1,
) -> Union[MX, SX]:
"""Create a symbolic boolean."""
return self.sym(name, nr, 0, 1, x0=1, discrete=True)
def sym(
self,
name: str,
nr: int,
lb: Union[float, List[float]],
ub: Union[float, List[float]],
x0: Optional[Union[float, List[float]]] = None,
discrete: bool = False,
) -> Union[MX, SX]:
"""Create a symbolic variable."""
# Gather Data
if name not in self.indices:
self.indices[name] = []
idx_list = self.indices[name]
# Create
x = self.variable_type.sym("%s[%d]" % (name, len(idx_list)), nr)
lb = make_list(lb, nr)
ub = make_list(ub, nr)
if x0 is None:
x0 = lb
x0 = make_list(x0, nr)
if len(lb) != nr:
raise Exception("Lower bound error!")
if len(ub) != nr:
breakpoint()
raise Exception("Upper bound error!")
if len(x0) != nr:
raise Exception("Estimation length error (x0 %d vs nr %d)!"
% (len(x0), nr))
# Collect
out = self.add_w(lb, x, ub, x0, discrete, name=name)
idx_list.append(out)
return x
def add_w_discrete(
self,
lb: Union[float, List[float]],
w: Union[MX, SX],
ub: Union[float, List[float]],
name=None
) -> List:
"""Add a discrete, existing symbolic variable."""
return self.add_w(lb, w, ub, True, name=name)
def add_w(
self,
lb: Union[float, List[float]],
w: Union[MX, SX],
ub: Union[float, List[float]],
x0: Optional[Union[float, List[float]]],
discrete: bool = False,
name=None
):
"""Add an existing symbolic variable."""
idx = [i + len(self.lbw) for i in range(len(lb))]
self.w += [w]
self.lbw += lb
self.ubw += ub
self.w0 += x0
self.discrete += [1 if discrete else 0] * len(lb)
return idx
def add_parameters(self, name, nr, values=0):
"""Add some parameters."""
# Gather Data
if name not in self.indices_p:
self.indices_p[name] = []
idx_list = self.indices_p[name]
# Create
p = self.variable_type.sym("%s[%d]" % (name, len(idx_list)), nr)
values = make_list(values, nr)
if len(values) != nr:
raise Exception("Values error!")
# Create & Collect
new_idx = [i + len(self.p0) for i in range(nr)]
self.p += [p]
self.p0 += values
self.indices_p[name].extend(new_idx)
return p
def set_parameters(self, name, values):
"""Set parameters."""
idx = self.indices_p[name]
values = make_list(values, len(idx))
if len(idx) != len(values):
raise Exception(
"Idx (%d) and values (%d) should be equally long for %s!" %
(len(idx), len(values), name)
)
for i, v in zip(idx, values):
self.p0[i] = v
def get_nlp(self) -> Dict:
"""Get nlp description."""
res = {'x': vertcat(*(self.w)),
'f': self.f,
'g': vertcat(*(self.g))}
if len(self.p0) > 0:
res['p'] = vertcat(*(self.p))
return res
def get_options(self, is_discrete=True, **kwargs):
"""Get options."""
if is_discrete and 1 in self.discrete:
out = {'discrete': self.discrete}
out.update(kwargs)
return out
return kwargs
def set_solver(self, solver, name: str = None, options: Dict = None,
is_discrete=True, **kwargs):
"""
Set the solver.
Set the related solver using a simular line as this:
nlpsol('solver', 'ipopt', dsc.get_nlp(), dsc.get_options())
:param solver: solver type (for example nlpsol from casadi)
:param name: Name of the actual solver
:param options: Dictionary of extra options
"""
if options is None:
options = {}
if name is None:
self.solver = solver
else:
self.solver = solver(
'solver', name, self.get_nlp(**kwargs),
self.get_options(is_discrete=is_discrete, **options)
)
def get_solver(self):
"""Return solver."""
return self.solver
def get_indices(self, name: str):
"""
Get indices of a certain variable.
:param name: name
"""
return self.indices[name]
def solve(self, auto_update=False, **kwargs):
"""Solve the problem."""
params = {
'lbx': self.lbw,
'ubx': self.ubw,
'lbg': self.lbg,
'ubg': self.ubg,
'x0': self.w0
}
params.update(kwargs)
if len(self.p0) > 0 and "p0" not in params:
params["p"] = self.p0
self.solution = self.solver(**params)
self.stats = self.solver.stats()
if auto_update:
self.w0 = self.solution['x']
return self.solution
def is_solved(self):
"""Check if it is solved."""
return self.stats['success']
def get(self, name: str):
"""Get solution for a name."""
if name not in self.indices:
raise Exception(
f"{name} not found, options {self.indices.keys()}"
)
if self.solution is not None:
out = []
for el in self.indices[name]:
if isinstance(el, list):
out.append([float(self.solution['x'][i]) for i in el])
else:
out.append(float(self.solution['x'][el]))
while isinstance(out, list) and len(out) == 1:
out = out[0]
return out
def create_problem(time_as_parameter=False, use_big_M=False, more_stages=True, problem1=True):
"""Create problen."""
# Parameters
if more_stages:
N_stages = 15 * 2
N_finite_elements = 3
else:
N_stages = 15
N_finite_elements = 3 * 2
N_control_intervals = 1
n_s = 2
use_collocation = True
B_irk, C_irk, D_irk, tau = generate_butcher_tableu_integral(
n_s, IrkSchemes.RADAU_IIA
)
# Hystheresis parameters
if problem1:
psi_on = [10, 17.5, 25]
psi_off = [5, 12.5, 20]
else:
psi_on = [10, 12.5, 15]
psi_off = [5, 7.5, 10]
# Model parameters
q_goal = 300
v_goal = 0
v_max = 30
u_max = 3
# fuel costs:
C = [1, 1.8, 2.5, 3.2]
# ratios
n = [1, 2, 3, 4]
# State variables:
q = ca.SX.sym("q") # position
v = ca.SX.sym("v") # velocity
L = ca.SX.sym("L") # Fuel usage
X = ca.vertcat(q, v, L)
X0 = [0, 0, 0]
lbx = [0, 0, -ca.inf]
ubx = [ca.inf, v_max, ca.inf]
n_x = 3
# Binaries to represent the problem:
n_y = len(n)
Y0 = np.zeros(n_y)
Y0[0] = 1
u = ca.SX.sym('u') # drive
n_u = 1
U0 = np.zeros(n_u)
lbu = np.array([-u_max])
ubu = np.array([u_max])
# x = q v L
X = ca.vertcat(q, v, L)
F_dyn = [
ca.Function(f'f_dyn_{i}', [X, u], [ca.vertcat(
v, n[i]*u, C[i]
)]) for i in range(len(n))
]
psi = v
psi_fun = ca.Function('psi', [X], [psi])
# Create problem:
opti = Description()
# Time optimal control
if not time_as_parameter:
T_final = opti.sym("T_final", 1, lb=5, ub=1e2, x0=20)
# Cost: time only
J = T_final
eps = 0.01
else:
T_final = opti.add_parameters("T_final", 1, values=15)
J = 0
eps = 0
h = T_final/(N_stages*N_control_intervals*N_finite_elements)
Xk = opti.sym("Xk", n_x, lb=X0, ub=X0, x0=X0)
Yk = opti.sym_bool("Yk", n_y)
for i in range(n_y):
opti.eq(Yk[i], Y0[i])
for _ in range(N_stages):
Ykp = Yk
Yk = opti.sym_bool("Yk", n_y)
opti.add_g(1-eps, ca.sum1(Yk), 1+eps) # SOS1
# Transition condition
LknUp = opti.sym_bool("LknUp", n_y-1)
LknDown = opti.sym_bool("LknDown", n_y-1)
# Transition
LkUp = opti.sym_bool("LkUp", n_y-1)
LkDown = opti.sym_bool("LkDown", n_y-1)
psi = psi_fun(Xk)
for i in range(n_y-1):
# Trigger
# If psi > psi_on -> psi - psi_on >= 0 -> LknUp = 1
opti.higher_if_on(LknUp[i], psi - psi_on[i])
opti.higher_if_on(LknDown[i], psi_off[i] - psi)
# Only if trigger is ok, go up
opti.leq(LkUp[i], LknUp[i])
opti.leq(LkDown[i], LknDown[i])
# # Only go up if active - already constraint using the
# # Yk[i] = Ykprev + ...
opti.leq(LkUp[i], Ykp[i])
opti.leq(LkDown[i], Ykp[i+1])
# Force going up if trigger = 1 and in right state!
opti.leq(LknUp[i] + Ykp[i] - 1, LkUp[i])
opti.leq(LknDown[i] + Ykp[i + 1] - 1, LkDown[i])
# # Also force jump of two:
# if i > 0:
# opti.leq(LknUp[i] + LknUp[i-1] + Ykp[i-1] - 2, LkUp[i])
# if i < n_y - 2:
# opti.leq(LknDown[i] + LknDown[i+1] + Ykp[i+2] - 2, LkDown[i])
for i in range(n_y):
prev = Ykp[i]
if i > 0:
prev += LkUp[i-1] - LkDown[i-1]
if i < n_y - 1:
prev += LkDown[i] - LkUp[i]
opti.eq(prev, Yk[i])
for i in range(n_y):
# # Add bounds to avoid late switch! (tolerance of 1)
eps_bnd = 1
if i > 0:
opti.leq(Yk[i] - 1, (psi + eps_bnd - psi_on[i-1]) / v_max)
for _ in range(N_control_intervals):
Uk = opti.sym("U", n_u, lb=lbu, ub=ubu, x0=U0)
for j in range(N_finite_elements):
if not use_collocation:
# NEWTON FWD
Xkp = Xk
Xk = opti.sym("Xk", n_x, lb=lbx, ub=ubx, x0=X0)
if use_big_M:
for ii in range(n_y):
eq = Xk - (Xkp + h * F_dyn[ii](Xk, Uk))
for iii in range(n_x):
opti.equal_if_on(Yk[ii], eq[iii])
else:
eq = 0
for ii in range(n_y):
eq += Yk[ii] * F_dyn[ii](Xk, Uk)
opti.eq(Xk - Xkp, h * eq)
else:
print("Collocation")
Xk_end = 0
X_fe = [
opti.sym("Xc", n_x, lb=lbx, ub=ubx, x0=X0)
for _ in range(n_s)
]
for j in range(n_s):
xj = C_irk[0, j + 1] * Xk
for r in range(n_s):
xj += C_irk[r + 1, j + 1] * X_fe[r]
Xk_end += D_irk[j + 1] * X_fe[j]
if use_big_M:
print("with big M")
for iy in range(n_y):
eq = h * F_dyn[iy](X_fe[j], Uk) - xj
for iii in range(n_x):
opti.equal_if_on(Yk[iy], eq[iii])
else:
eq = 0
for iy in range(n_y):
eq += Yk[iy] * F_dyn[iy](xj, Uk)
opti.add_g(-1e-7, h * eq - xj, 1e-7)
# J = J + L*B_irk*h;
Xk = opti.sym("Xk", n_x, lb=lbx, ub=ubx, x0=X0)
opti.eq(Xk, Xk_end)
Ykp = Yk
# Terminal constraints:
slack1 = opti.sym('slack', 1, lb=0, ub=1)
slack2 = opti.sym('slack', 1, lb=0, ub=1)
opti.leq(Xk[0] - q_goal, slack1)
opti.leq(q_goal - Xk[0], slack1)
opti.leq(Xk[1] - v_goal, slack2)
opti.leq(v_goal - Xk[1], slack2)
opti.eq(Yk[0], 1)
# J: value function = time
opti.f = J + slack1 + slack2
return opti
def run_bonmin(problem1=True):
"""Run bonmin."""
opti = create_problem(use_big_M=True, more_stages=False, problem1=problem1)
opti.set_solver(ca.nlpsol, 'bonmin', is_discrete=True,
options={"bonmin.time_limit": 7200})
tic()
opti.solve()
opti.runtime = toc()
return opti, opti.get("T_final")
def run_ipopt():
"""Run ipopt."""
opti = create_problem()
opti.set_solver(ca.nlpsol, 'ipopt', is_discrete=False)
tic()
opti.solve()
opti.runtime = toc()
return opti, opti.get("T_final")
def run_gurobi(problem1=True):
"""Run gurobi."""
opti = create_problem(time_as_parameter=True, use_big_M=True, more_stages=True, problem1=problem1)
opti.set_solver(
ca.qpsol, "gurobi", is_discrete=True,
options={
"error_on_fail": False,
# "gurobi": {
# "Threads": 1,
# }
}
)
T_max = 40
T_min = 1
tolerance = 1e-5
lb_k = [T_min]
ub_k = [T_max]
solution = None
T_opt = None
itr = 0
tic()
while ub_k[-1] - lb_k[-1] > tolerance:
itr += 1
T_new = (ub_k[-1] + lb_k[-1]) / 2
opti.set_parameters("T_final", T_new)
opti.solve()
if opti.is_solved():
print(f"SUCCES {T_new=}")
# Success
ub_k.append(T_new)
T_opt = T_new
solution = opti.solution
else:
print(f"INF {T_new=}")
lb_k.append(T_new)
runtime = toc()
print(f"TOLERANCE {ub_k[-1] - lb_k[-1]} - {itr=}")
opti.solution = solution
opti.runtime = runtime
return opti, T_opt
if __name__ == "__main__":
if len(argv) < 3:
print("Usage: gurobi/bonmin <1/2> <outputfile>")
exit(1)
gurobi = "gur" in argv[1]
problem1 = "1" in argv[2]
outputfile = argv[3]
print(f"{gurobi=} {problem1=} {outputfile=}")
if gurobi:
opti, T_final = run_gurobi(problem1=problem1)
else:
opti, T_final = run_bonmin(problem1=problem1)
print(T_final)
Xk = np.array(opti.get("Xk"))
# plt.plot(Xk)
# plt.show()
print(opti.get("Yk"))
data = {
key: opti.get(key)
for key in opti.indices.keys()
}
data["T_final"] = T_final
data["runtime"] = opti.runtime
with open(outputfile, "wb") as f:
pickle.dump(data, f)
| 17,657 | 28.09061 | 102 | py |
nosnoc_py | nosnoc_py-main/examples/simplest/simplest_example.py | import numpy as np
from casadi import SX, horzcat
import matplotlib.pyplot as plt
import nosnoc
TOL = 1e-9
# Analytic solution
EXACT_SWITCH_TIME = 1 / 3
TSIM = np.pi / 4
# Initial Value
X0 = np.array([-1.0])
def get_default_options():
opts = nosnoc.NosnocOpts()
opts.comp_tol = TOL
opts.N_finite_elements = 2
opts.n_s = 2
return opts
def get_simplest_model_sliding():
# Variable defintion
x1 = SX.sym("x1")
x = x1
# every constraint function corresponds to a sys (note that the c_i might be vector valued)
c = [x1]
# sign matrix for the modes
S = [np.array([[-1], [1]])]
f_11 = 3
f_12 = -1
# in matrix form
F = [horzcat(f_11, f_12)]
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0, name='simplest_sliding')
return model
def get_simplest_model_switch():
# Variable defintion
x1 = SX.sym("x1")
x = x1
# every constraint function corresponds to a sys (note that the c_i might be vector valued)
c = [x1]
# sign matrix for the modes
S = [np.array([[-1], [1]])]
f_11 = 3
f_12 = 1
# in matrix form
F = [horzcat(f_11, f_12)]
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0, name='simplest_switch')
return model
def solve_simplest_example(opts=None, model=None):
if opts is None:
opts = get_default_options()
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.pss_mode = nosnoc.PssMode.STEWART
if model is None:
model = get_simplest_model_sliding()
Nsim = 1
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, X0, Nsim)
looper.run()
results = looper.get_results()
# solver.print_problem()
# plot_results(results)
return results
def plot_results(results):
nosnoc.latexify_plot()
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(results["t_grid"], results["X_sim"], label='x', marker='o')
plt.legend()
plt.grid()
# algebraic variables
thetas = nosnoc.flatten_layer(results['theta_sim'], 0)
thetas = [thetas[0]] + thetas
lambdas = nosnoc.flatten_layer(results['lambda_sim'], 0)
lambdas = [lambdas[0]] + lambdas
n_lam = len(lambdas[0])
plt.subplot(3, 1, 2)
n_lam = len(lambdas[0])
for i in range(n_lam):
plt.plot(results["t_grid"], [x[i] for x in lambdas], label=f'lambda_{i}')
plt.grid()
plt.legend()
plt.subplot(3, 1, 3)
for i in range(n_lam):
plt.plot(results["t_grid"], [x[i] for x in thetas], label=f'theta_{i}')
plt.grid()
plt.vlines(results["t_grid"], ymin=0.0, ymax=1.0, linestyles='dotted')
plt.legend()
plt.show()
# EXAMPLE
def example():
model = get_simplest_model_sliding()
model = get_simplest_model_switch()
opts = get_default_options()
opts.print_level = 1
results = solve_simplest_example(opts=opts, model=model)
plot_results(results)
if __name__ == "__main__":
example()
| 3,065 | 22.052632 | 95 | py |
nosnoc_py | nosnoc_py-main/examples/simplest/parametric_example.py | import numpy as np
from casadi import SX, horzcat
import matplotlib.pyplot as plt
from simplest_example import get_default_options, plot_results
import nosnoc
TOL = 1e-9
# Analytic solution
EXACT_SWITCH_TIME = 1 / 3
TSIM = np.pi / 4
# Initial Value
X0 = np.array([-1.0])
def get_simplest_parametric_model_switch():
# Variable defintion
x1 = SX.sym("x1")
x = x1
# every constraint function corresponds to a sys (note that the c_i might be vector valued)
c = [x1]
# sign matrix for the modes
S = [np.array([[-1], [1]])]
p = SX.sym('p')
p_val = np.array([-1.0])
f_11 = 3
f_12 = p
# in matrix form
F = [horzcat(f_11, f_12)]
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0, name='simplest_switch', p=p, p_val = p_val)
return model
# EXAMPLE
def example():
model = get_simplest_parametric_model_switch()
opts = get_default_options()
opts.print_level = 1
Nsim = 1
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, X0, Nsim)
looper.run()
results = looper.get_results()
plot_results(results)
if __name__ == "__main__":
example()
| 1,242 | 18.730159 | 101 | py |
nosnoc_py | nosnoc_py-main/examples/temperature_control/time_freezing_hysteresis_temperature_control.py | """
Time freezing example with an hysteresis curve.
In this example the temperature is controlled using a radiator.
The desired temperature is between 17 & 21 degrees and with an optimum of
19 degrees.
"""
import nosnoc
from casadi import SX, vertcat, inf, norm_2, horzcat
from math import ceil, log
import numpy as np
import matplotlib.pyplot as plt
# jump points in x in the hysteresis function
y1 = 17
y2 = 21
def create_options():
"""Create nosnoc options."""
opts = nosnoc.NosnocOpts()
opts.print_level = 2
# Degree of interpolating polynomial
opts.n_s = 3
# === MPCC settings ===
# upper bound for elastic variables
opts.s_elastic_max = 1e1
# in penalty methods 1: J = J+(1/p)*J_comp (direct) , 0 : J = p*J+J_comp (inverse)
opts.objective_scaling_direct = 0
# === Penalty/Relaxation paraemetr ===
# initial smoothing parameter
opts.sigma_0 = 1e1
# end smoothing parameter
opts.sigma_N = 1e-3 # 1e-10
# decrease rate
opts.homotopy_update_slope = 0.1
# number of steps
opts.N_homotopy = ceil(abs(
log(opts.sigma_N / opts.sigma_0) / log(opts.homotopy_update_slope))) + 1
opts.comp_tol = 1e-14
# IPOPT Settings
opts.nlp_max_iter = 500
# New setting: time freezing settings
opts.initial_theta = 0.5
opts.time_freezing = False
opts.pss_mode = nosnoc.PssMode.STEWART
return opts
def create_temp_control_model_voronoi(u=None):
"""
Create temperature control model.
:param u: input of the radiator, if this is given a simulation
model is generated.
"""
global y1, y2
# Discretization parameters
# N_stages = 2
# N_finite_elements = 1
# T = 0.1 # (here determined latter depeding on omega)
# h = T/N_stages
# inital value
t0 = 0
w0 = 0
y0 = 20
cost0 = 0
lambda_cool_down = -0.2 # cool down time constant of lin dynamics
# Points:
z1 = np.array([1 / 4, -1 / 4])
z2 = np.array([1 / 4, 1 / 4])
z3 = np.array([3 / 4, 3 / 4])
z4 = np.array([3 / 4, 5 / 4])
# Define model dimensions, equations, constraint functions, regions an so on.
# number of Cartesian products in the model ("independent switches"), we call this layer
# Variable defintion
y = SX.sym('y')
w = SX.sym('w') # Auxillary variable
t = SX.sym('t') # Time variable
cost = SX.sym('cost')
x = vertcat(y, w, t, cost)
# Inital Value
X0 = np.array([y0, w0, t0, cost0]).T
# Range
lbx = np.array([y1-1, 0, 0, 0])
ubx = np.array([inf, 1, inf, inf])
# linear transformation for rescaling of the switching function.
psi = (y - y1) / (y2 - y1)
z = vertcat(psi, w)
# control
if not u:
u = SX.sym('u')
s = SX.sym('s') # Length of time
u_comb = vertcat(u, s)
else:
u_comb = None
s = 1
lbu = np.array([0.1, 0.5])
ubu = np.array([100, 20])
# discriminant functions via voronoi
g_11 = norm_2(z - z1)**2
g_12 = norm_2(z - z2)**2
g_13 = norm_2(z - z3)**2
g_14 = norm_2(z - z4)**2
g_ind = [vertcat(g_11, g_12, g_13, g_14)]
# System dynamics:
# Heating:
# y_des = 19
f_A = vertcat(lambda_cool_down * y + u, 0, 1, u)
f_B = vertcat(lambda_cool_down * y, 0, 1, 0)
a_push = 5
f_push_down = vertcat(0, -a_push * (psi - 1)**2 / (1 + (psi - 1)**2), 0, 0)
f_push_up = vertcat(0, a_push * (psi)**2 / (1 + (psi)**2), 0, 0)
f_11 = s * (2 * f_A - f_push_down)
f_12 = s * (f_push_down)
f_13 = s * (f_push_up)
f_14 = s * (2 * f_B - f_push_up)
F = [horzcat(f_11, f_12, f_13, f_14)]
# Desired temperature is 19 degrees
f_q = 0
f_terminal = cost
if u_comb is not None:
model = nosnoc.NosnocModel(x=x,
F=F,
g_Stewart=g_ind,
x0=X0,
u=u_comb,
t_var=t,
name='simplest_sliding')
return model, lbx, ubx, lbu, ubu, f_q, f_terminal, X0
else:
model = nosnoc.NosnocModel(
x=x, F=F, g_Stewart=g_ind, x0=X0, name='simplest_sliding')
return model
def plot(model, X, t_grid, U=None, t_grid_u=None):
"""Plot the results."""
temperature = [x[0] for x in X]
aux = [x[1] for x in X]
time = [x[2] for x in X]
plt.figure()
plt.subplot(2, 2, 1)
plt.plot(t_grid, [y1 for _ in t_grid], 'k--')
plt.plot(t_grid, [y2 for _ in t_grid], 'k--')
plt.plot(t_grid, temperature, 'k', label="Temperature",)
plt.plot(t_grid, aux, label="Auxillary variable")
plt.plot(t_grid, time, label="Time")
plt.ylabel("$x$")
plt.xlabel("$t$")
plt.legend()
plt.grid()
if U is not None:
plt.subplot(2, 2, 2)
plt.plot(t_grid_u, [u[0] for u in U], label="Control")
plt.plot(t_grid_u, [u[1] for u in U], label="Time scaling")
plt.ylabel("$u$")
plt.xlabel("$t$")
plt.legend()
plt.grid()
plt.subplot(2, 2, 3)
plt.ylabel("Temperature")
plt.xlabel("Real time")
plt.plot(time, temperature, label="Temperature in real time")
plt.subplot(2, 2, 4)
plt.ylabel("G")
plt.xlabel("Time")
g = horzcat(*[model.g_Stewart_fun(x, 0) for x in X]).T
plt.plot(t_grid, g, label=[f"mode {i}" for i in range(g.shape[1])])
plt.legend()
plt.figure()
plt.plot([-2, 1], [0, 0], 'k')
plt.plot([0, 2], [1, 1], 'k')
plt.plot([-1, 0, 1, 2], [1.5, 1, 0, -.5], 'k')
psi = [(x[0] - y1) / (y2 - y1) for x in X]
im = plt.scatter(psi, aux, c=t_grid, cmap=plt.hot())
im.set_label('Time')
plt.colorbar(im)
plt.xlabel("$\\psi(x)$")
plt.ylabel("$w$")
plt.show()
def simulation(u=20, Tsim=3, Nsim=30, with_plot=True):
"""Simulate the temperature control system with a fixed input."""
opts = create_options()
model = create_temp_control_model_voronoi(u=u)
Tstep = Tsim / Nsim
opts.N_finite_elements = 2
opts.N_stages = 1
opts.terminal_time = Tstep
opts.sigma_N = 1e-2
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
if with_plot:
plot(model, results["X_sim"], results["t_grid"])
return results["X_sim"], results["t_grid"]
def control(with_plot=True):
"""Control the system."""
t_end = 5
opts = create_options()
model, lbx, ubx, lbu, ubu, f_q, f_terminal, X0 = create_temp_control_model_voronoi()
opts.N_finite_elements = 3
opts.n_s = 3
opts.N_stages = 10
opts.terminal_time = t_end
opts.time_freezing = True
opts.time_freezing_tolerance = 0.1
ocp = nosnoc.NosnocOcp(
lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_terminal,
lbx=lbx, ubx=ubx
)
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
print("Dominant modes:")
print([np.argmax(i) for i in results["theta_list"]])
if with_plot:
plot(model, results["x_list"], results["t_grid"][1:], results["u_list"],
results["t_grid_u"][:-1])
return model, opts, solver, results
if __name__ == "__main__":
simulation()
control()
| 7,328 | 27.406977 | 92 | py |
nosnoc_py | nosnoc_py-main/examples/Acary2014/irma_integration_order_experiment.py | from matplotlib import pyplot as plt
import numpy as np
from irma import get_irma_model, get_default_options, SWITCH_ON, LIFTING, X0
import nosnoc
import pickle
import os
import itertools
BENCHMARK_DATA_PATH = 'private_irma_benchmark_data'
REF_RESULTS_FILENAME = 'irma_benchmark_results.pickle'
SCHEMES = [nosnoc.IrkSchemes.GAUSS_LEGENDRE, nosnoc.IrkSchemes.RADAU_IIA]
NS_VALUES = [1, 2, 3, 4]
NFE_VALUES = [3]
# NSIM_VALUES = [1, 3, 10, 20, 50, 100, 300] # convergence issues for Legendre
NSIM_VALUES = [1, 3, 9, 18, 50, 100, 300]
USE_FESD_VALUES = [True, False]
# USE_FESD_VALUES = [False]
# # NOTE: this has convergence issues
# NS_VALUES = [2]
# NSIM_VALUES = [10]
# SCHEME = nosnoc.IrkSchemes.GAUSS_LEGENDRE
TOL = 1e-12
TSIM = 100
def pickle_results(results, filename):
# create directory if it does not exist
if not os.path.exists(BENCHMARK_DATA_PATH):
os.makedirs(BENCHMARK_DATA_PATH)
# save
file = os.path.join(BENCHMARK_DATA_PATH, filename)
with open(file, 'wb') as f:
pickle.dump(results, f)
def unpickle_results(filename):
file = os.path.join(BENCHMARK_DATA_PATH, filename)
with open(file, 'rb') as f:
results = pickle.load(f)
return results
def generate_reference_solution():
opts = get_default_options()
opts.n_s = 5
opts.N_finite_elements = 3
Nsim = 500
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
opts.comp_tol = TOL * 1e-2
opts.sigma_N = TOL * 1e-2
opts.do_polishing_step = False
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
model = get_irma_model(SWITCH_ON, LIFTING)
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim, print_level=1)
looper.run()
results = looper.get_results()
results['opts'] = opts
pickle_results(results, REF_RESULTS_FILENAME)
def get_results_filename(opts):
filename = 'irma_bm_results_'
filename += 'Nfe_' + str(opts.N_finite_elements) + '_'
filename += 'ns' + str(opts.n_s) + '_'
filename += 'tol' + str(opts.comp_tol) + '_'
filename += 'dt' + str(opts.terminal_time) + '_'
filename += 'Tsim' + str(TSIM) + '_'
filename += opts.irk_scheme.name
if not opts.use_fesd:
filename += '_nofesd'
filename += '.pickle'
return filename
def get_opts(Nsim, n_s, N_fe, scheme, use_fesd):
opts = get_default_options()
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
opts.comp_tol = TOL
opts.sigma_N = TOL
opts.irk_scheme = scheme
opts.print_level = 1
opts.use_fesd = use_fesd
opts.n_s = n_s
opts.N_finite_elements = N_fe
# opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT
# opts.irk_representation = nosnoc.IrkRepresentation.DIFFERENTIAL
return opts
def run_benchmark():
for n_s, N_fe, Nsim, scheme, use_fesd in itertools.product(NS_VALUES, NFE_VALUES, NSIM_VALUES, SCHEMES, USE_FESD_VALUES):
model = get_irma_model(SWITCH_ON, LIFTING)
opts = get_opts(Nsim, n_s, N_fe, scheme, use_fesd)
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim, print_level=1)
looper.run()
results = looper.get_results()
results['opts'] = opts
filename = get_results_filename(opts)
pickle_results(results, filename)
del solver, looper, results, model, opts
def get_reference_solution():
return unpickle_results(REF_RESULTS_FILENAME)
def count_failures(results):
status_list: list = results['status']
return len([x for x in status_list if x != nosnoc.Status.SUCCESS])
def evaluate_reference_solution():
results = get_reference_solution()
n_fail = count_failures(results)
print(f"Reference solution got {n_fail} failing subproblems")
def order_plot():
nosnoc.latexify_plot()
N_fe = 3
ref_results = get_reference_solution()
x_end_ref = ref_results['X_sim'][-1]
linestyles = ['-', '--', '-.', ':', ':', '-.', '--', '-']
marker_types = ['o', 's', 'v', '^', '>', '<', 'd', 'p']
# SCHEME = nosnoc.IrkSchemes.RADAU_IIA
SCHEME = nosnoc.IrkSchemes.RADAU_IIA
ax = plt.figure()
for use_fesd in [True, False]:
for i, n_s in enumerate(NS_VALUES):
errors = []
step_sizes = []
for Nsim in NSIM_VALUES:
opts = get_opts(Nsim, n_s, N_fe, SCHEME, use_fesd)
filename = get_results_filename(opts)
results = unpickle_results(filename)
print(f"loading filde {filename}")
x_end = results['X_sim'][-1]
n_fail = count_failures(results)
error = np.max(np.abs(x_end - x_end_ref))
print("opts.n_s: ", opts.n_s, "opts.terminal_time: ", opts.terminal_time, "error: ", error, "n_fail: ", n_fail)
errors.append(error)
step_sizes.append(opts.terminal_time)
label = r'$n_s=' + str(n_s) +'$'
if results['opts'].irk_scheme == nosnoc.IrkSchemes.RADAU_IIA:
if n_s == 1:
label = 'implicit Euler: 1'
else:
label = 'Radau IIA: ' + str(2*n_s-1)
elif results['opts'].irk_scheme == nosnoc.IrkSchemes.GAUSS_LEGENDRE:
label = 'Gauss-Legendre: ' + str(2*n_s)
if use_fesd:
label += ', FESD'
else:
label += ', Standard'
plt.plot(step_sizes, errors, label=label, marker=marker_types[i], linestyle=linestyles[i])
plt.grid()
plt.xlabel('Step size')
plt.ylabel('Error')
plt.yscale('log')
plt.xscale('log')
# plt.legend(loc='center left')
ax.legend(loc='upper left', bbox_to_anchor=(0.05, .97), ncol=2, framealpha=1.0)
fig_filename = f'irma_benchmark_{SCHEME.name}.pdf'
plt.savefig(fig_filename, bbox_inches='tight')
print(f"Saved figure to {fig_filename}")
plt.show()
if __name__ == "__main__":
# generate data
run_benchmark()
generate_reference_solution()
# evalute
evaluate_reference_solution()
order_plot()
| 6,190 | 30.426396 | 127 | py |
nosnoc_py | nosnoc_py-main/examples/Acary2014/two_gene.py | import numpy as np
from casadi import SX, horzcat, vertcat
import matplotlib.pyplot as plt
import nosnoc
# Example gene network from:
# Numerical simulation of piecewise-linear models of gene regulatory networks using complementarity systems
# V. Acary, H. De Jong, B. Brogliato
TOL = 1e-9
TSIM = 1
# Thresholds
thresholds_1 = np.array([4, 8])
thresholds_2 = np.array([4, 8])
# Synthesis
kappa = np.array([40, 40])
# Degradation
gamma = np.array([4.5, 1.5])
X0 = [3, 3]
LIFTING = True
def get_default_options():
opts = nosnoc.NosnocOpts()
opts.comp_tol = TOL
opts.N_finite_elements = 2
opts.n_s = 2
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.pss_mode = nosnoc.PssMode.STEP
return opts
def get_two_gene_model(x0, lifting):
# Variable defintion
x = SX.sym("x", 2)
# alphas for general inclusions
alpha = SX.sym('alpha', 4)
# Switching function
c = [vertcat(x[0]-thresholds_1, x[1]-thresholds_2)]
# Switching multipliers
s = vertcat((1-alpha[1])*alpha[2], alpha[0]*(1-alpha[3]))
if lifting:
beta = SX.sym('beta', 2)
g_z = beta - s
f_x = [-gamma*x + kappa*beta]
model = nosnoc.NosnocModel(x=x, f_x=f_x, z=beta, g_z=g_z, alpha=[alpha], c=c, x0=x0, name='two_gene')
else:
f_x = [-gamma*x + kappa*s]
model = nosnoc.NosnocModel(x=x, f_x=f_x, alpha=[alpha], c=c, x0=x0, name='two_gene')
return model
def solve_two_gene(opts=None, model=None):
if opts is None:
opts = get_default_options()
if model is None:
model = get_two_gene_model(X0, False)
Nsim = 20
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
return results
def plot_results(results):
nosnoc.latexify_plot()
plt.figure()
for result in results:
plt.plot(result["X_sim"][:, 0], result["X_sim"][:, 1])
plt.quiver(result["X_sim"][:-1, 0],
result["X_sim"][:-1, 1],
np.diff(result["X_sim"][:, 0]),
np.diff(result["X_sim"][:, 1]),
scale=100,
width=0.01)
plt.vlines(thresholds_1, ymin=-15.0, ymax=15.0, linestyles='dotted')
plt.hlines(thresholds_2, xmin=-15.0, xmax=15.0, linestyles='dotted')
plt.ylim(0, 13)
plt.xlim(0, 13)
plt.xlabel('x_1')
plt.ylabel('x_2')
plt.show()
# EXAMPLE
def example():
opts = get_default_options()
opts.print_level = 0
results = []
for x1 in [3, 6, 9, 12]:
for x2 in [3, 6, 9, 12]:
model = get_two_gene_model([x1, x2], LIFTING)
results.append(solve_two_gene(opts=opts, model=model))
plot_results(results)
if __name__ == "__main__":
example()
| 2,907 | 24.068966 | 109 | py |
nosnoc_py | nosnoc_py-main/examples/Acary2014/irma.py | import numpy as np
from casadi import SX, horzcat, sum2, vertcat
import matplotlib.pyplot as plt
import nosnoc
# Example synthetic benchmark from:
# Numerical simulation of piecewise-linear models of gene regulatory networks using complementarity systems
# V. Acary, H. De Jong, B. Brogliato
TOL = 1e-5
SWITCH_ON = 1
TSIM = 1000
# Thresholds
thresholds = [np.array([0.01]), np.array([0.01, 0.06, 0.08]).T, np.array([0.035]), np.array([0.04]), np.array([0.01])]
# Synthesis
kappa = np.array([[1.1e-4, 9e-4],
[3e-4, 0.15],
[6e-4, 0.018],
[5e-4, 0.03],
[7.5e-4, 0.015]])
# Degradation
gamma = np.array([0.05, 0.04, 0.05, 0.02, 0.6])
X0 = [0.011, 0.09, 0.04, 0.05, 0.015]
LIFTING = True
def get_default_options():
opts = nosnoc.NosnocOpts()
opts.comp_tol = TOL
opts.N_finite_elements = 3
opts.n_s = 2
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.pss_mode = nosnoc.PssMode.STEP
opts.print_level = 0
opts.homotopy_update_rule = nosnoc.HomotopyUpdateRule.LINEAR
return opts
def get_irma_model(switch_on, lifting):
# Variable defintion
x = SX.sym("x", 5)
# alphas for general inclusions
alpha = SX.sym('alpha', 7)
# Switching function
c = [nosnoc.casadi_vertcat_list([x[i]-thresholds[i] for i in range(len(X0))])]
if lifting:
if switch_on:
beta = SX.sym('beta', 1)
g_z = beta - alpha[1]*(1-alpha[4])
s = horzcat(nosnoc.casadi_vertcat_list([1, 1, 1, alpha[1], 1]),
nosnoc.casadi_vertcat_list([alpha[5], alpha[0], alpha[2], beta, alpha[3]]))
else:
beta = SX.sym('beta', 2)
g_z = beta - vertcat(alpha[0]*(1-alpha[6]), alpha[1]*(1-alpha[4]))
s = horzcat(nosnoc.casadi_vertcat_list([1, 1, 1, alpha[1], 1]),
nosnoc.casadi_vertcat_list([alpha[5], beta[0], alpha[2], beta[1], alpha[3]]))
f_x = [-gamma*x + sum2(kappa*s)]
model = nosnoc.NosnocModel(x=x, f_x=f_x, g_z=g_z, z=beta, alpha=[alpha], c=c, x0=X0, name='irma')
else:
# Switching multipliers
s = horzcat(nosnoc.casadi_vertcat_list([1, 1, 1, alpha[1], 1]),
nosnoc.casadi_vertcat_list([alpha[5], alpha[0]*(1-(1-switch_on)*(alpha[6])), alpha[2], alpha[1]*(1-alpha[4]), alpha[3]]))
f_x = [-gamma*x + sum2(kappa*s)]
model = nosnoc.NosnocModel(x=x, f_x=f_x, alpha=[alpha], c=c, x0=X0, name='irma')
return model
def solve_irma(opts=None, model=None):
if opts is None:
opts = get_default_options()
if model is None:
model = get_irma_model(SWITCH_ON, LIFTING)
Nsim = 500
Tstep = TSIM / Nsim
opts.terminal_time = Tstep
solver = nosnoc.NosnocSolver(opts, model)
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
return results
def plot_trajectory(results, figure_filename=None):
nosnoc.latexify_plot()
n_subplot = len(X0)
fig, axs = plt.subplots(n_subplot, 1)
print(results["switch_times"])
xnames = ['Gal4', 'Swi5', 'Ash1', 'Cbf1', 'Gal80']
for i in range(n_subplot):
axs[i].plot(results["t_grid"], results["X_sim"][:, i], linewidth=2)
axs[i].hlines(thresholds[i], xmin=0, xmax=TSIM, linestyles='dotted', linewidth=1)
axs[i].set_xlim(0, TSIM)
axs[i].set_ylim(0, 1.1*max(results["X_sim"][:, i]))
axs[i].set_ylabel(xnames[i])
axs[i].grid()
for t in results['switch_times']:
axs[i].axvline(t, linestyle="dashed", color="r", linewidth=0.5)
if i == n_subplot - 1:
plt.xlabel('$t$ [min]')
else:
axs[i].xaxis.set_ticklabels([])
if figure_filename is not None:
plt.savefig(figure_filename)
print(f'stored figure as {figure_filename}')
plt.show()
def plot_algebraic_traj(results, figure_filename=None):
nosnoc.latexify_plot()
alpha_sim = np.array([results['alpha_sim'][0][0]] + nosnoc.flatten_layer(results['alpha_sim']))
n_subplot = len(alpha_sim[0])
fig, axs = plt.subplots(n_subplot, 1)
for i in range(n_subplot):
axs[i].plot(results["t_grid"], alpha_sim[:,i])
# axs[i].hlines(thresholds[i], xmin=0, xmax=TSIM, linestyles='dotted')
axs[i].set_xlim(0, TSIM)
axs[i].set_ylim(0, 1.1*max(alpha_sim[:, i]))
axs[i].set_ylabel(r'$\alpha_' + f'{i+1}$')
# axs[i].set_xlabel('$t$ [min]')
axs[i].grid()
if i == n_subplot - 1:
axs[i].set_xlabel('$t$ [min]')
else:
axs[i].xaxis.set_ticklabels([])
if figure_filename is not None:
plt.savefig(figure_filename)
print(f'stored figure as {figure_filename}')
plt.show()
# EXAMPLE
def example():
opts = get_default_options()
opts.print_level = 1
model = get_irma_model(SWITCH_ON, LIFTING)
results = solve_irma(opts=opts, model=model)
plot_algebraic_traj(results)
plot_trajectory(results)
# plot_algebraic_traj(results, figure_filename='irma_algebraic_traj.pdf')
# plot_trajectory(results, figure_filename='irma_traj.pdf')
if __name__ == "__main__":
example()
| 5,276 | 30.981818 | 141 | py |
nosnoc_py | nosnoc_py-main/examples/hopper_robot/hopper_ocp.py | # Hopper OCP
# example inspired by https://github.com/KY-Lin22/NIPOCPEC and https://github.com/thowell/motion_planning/blob/main/models/hopper.jl
# The methods and time-freezing refomulation are detailed in https://arxiv.org/abs/2111.06759
import nosnoc as ns
import casadi as ca
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from matplotlib.animation import FuncAnimation
import matplotlib.patches as patches
from functools import partial
LONG = False
DENSE = True
HEIGHT = 1.0
def get_hopper_ocp_description(opts, x_goal, dense, multijump=False):
# hopper model
# model vars
q = ca.SX.sym('q', 4)
v = ca.SX.sym('v', 4)
t = ca.SX.sym('t')
x = ca.vertcat(q, v)
u = ca.SX.sym('u', 3)
sot = ca.SX.sym('sot')
theta = ca.SX.sym('theta', 8)
# dims
n_q = 4
n_v = 4
n_x = n_q + n_v
# state equations
mb = 1 # body
ml = 0.1 # link
Ib = 0.25 # body
Il = 0.025 # link
mu = 0.45 # friction coeficient
g = 9.81
# inertia matrix
M = np.diag([mb + ml, mb + ml, Ib + Il, ml])
# coriolis and gravity
C = np.array([0, (mb + ml)*g, 0, 0]).T
# Control input matrix
B = ca.vertcat(ca.horzcat(0, -np.sin(q[2])),
ca.horzcat(0, np.cos(q[2])),
ca.horzcat(1, 0),
ca.horzcat(0, 1))
f_c_normal = ca.vertcat(0, 1, q[3]*ca.sin(q[2]), -ca.cos(q[2]))
f_c_tangent = ca.vertcat(1, 0, q[3]*ca.cos(q[2]), ca.sin(q[2]))
v_normal = f_c_normal.T@v
v_tangent = f_c_tangent.T@v
f_v = (-C + B@u[0:2])
f_c = q[1] - q[3]*ca.cos(q[2])
if LONG:
x_goal = 1.3
x0 = np.array([0.1, 0.5, 0, 0.5, 0, 0, 0, 0])
x_mid1 = np.array([0.4, 0.65, 0, 0.2, 0, 0, 0, 0])
x_mid2 = np.array([0.6, 0.5, 0, 0.5, 0, 0, 0, 0])
x_mid3 = np.array([0.9, 0.65, 0, 0.2, 0, 0, 0, 0])
x_end = np.array([x_goal, 0.5, 0, 0.5, 0, 0, 0, 0])
interpolator1 = CubicSpline([0, 0.5, 1], [x0, x_mid1, x_mid2])
interpolator2 = CubicSpline([0, 0.5, 1], [x_mid2, x_mid3, x_end])
x_ref1 = interpolator1(np.linspace(0, 1, int(np.floor(opts.N_stages/2))))
x_ref2 = interpolator2(np.linspace(0, 1, int(np.floor(opts.N_stages/2))))
x_ref = np.concatenate([x_ref1, x_ref2])
else:
if multijump:
n_jumps = int(np.round(x_goal-0.1))
x_step = (x_goal-0.1)/n_jumps
x0 = np.array([0.1, 0.5, 0, 0.5, 0, 0, 0, 0])
x_ref = np.empty((0, n_x))
x_start = x0
# TODO: if N_stages not divisible by n_jumps then this doesn't work... oh well
for ii in range(n_jumps):
# Parameters
x_mid = np.array([x_start[0] + x_step/2, HEIGHT, 0, 0.1, 0, 0, 0, 0])
x_end = np.array([x_start[0] + x_step, 0.5, 0, 0.5, 0, 0, 0, 0])
interpolator = CubicSpline([0, 0.5, 1], [x_start, x_mid, x_end])
t_pts = np.linspace(0, 1, int(np.floor(opts.N_stages/n_jumps))+1)
x_ref = np.concatenate((x_ref, interpolator(t_pts[:-1])))
x_start = x_end
x_ref = np.concatenate((x_ref, np.expand_dims(x_end, axis=0)))
else:
x0 = np.array([0.1, 0.5, 0, 0.5, 0, 0, 0, 0])
x_mid = np.array([(x_goal-0.1)/2+0.1, HEIGHT, 0, 0.1, 0, 0, 0, 0])
x_end = np.array([x_goal, 0.5, 0, 0.5, 0, 0, 0, 0])
interpolator = CubicSpline([0, 0.5, 1], [x0, x_mid, x_end])
x_ref = interpolator(np.linspace(0, 1, opts.N_stages+1))
# The control u[2] is a slack for modelling of nonslipping constraints.
ubu = np.array([50, 50, 100, 20])
lbu = np.array([-50, -50, 0, 0.1])
u_guess = np.array([0, 0, 0, 1])
ubx = np.array([x_goal+0.1, 1.5, np.pi, 0.50, 10, 10, 5, 5, np.inf])
lbx = np.array([0, 0, -np.pi, 0.1, -10, -10, -5, -5, -np.inf])
Q = np.diag([100, 100, 20, 50, 0.1, 0.1, 0.1, 0.1])
Q_terminal = np.diag([300, 300, 300, 300, 0.1, 0.1, 0.1, 0.1])
R = np.diag([0.01, 0.01, 1e-5])
# path comp to avoid slipping
g_comp_path = ca.horzcat(ca.vertcat(v_tangent, -v_tangent), ca.vertcat(theta[-1]+theta[-2], theta[-1]+theta[-2]))
# Hand create least squares cost
p_x_ref = ca.SX.sym('x_ref', n_x)
f_q = sot*(ca.transpose(x - p_x_ref)@Q@(x-p_x_ref) + ca.transpose(u)@R@u)
f_q_T = ca.transpose(x - x_end)@Q_terminal@(x - x_end)
# hand crafted time freezing :)
a_n = 100
J_normal = f_c_normal
J_tangent = f_c_tangent
inv_M = ca.inv(M)
f_ode = sot * ca.vertcat(v, inv_M@f_v, 1)
inv_M_aux = inv_M
f_aux_pos = ca.vertcat(ca.SX.zeros(n_q, 1), inv_M_aux@(J_normal-J_tangent*mu)*a_n, 0)
f_aux_neg = ca.vertcat(ca.SX.zeros(n_q, 1), inv_M_aux@(J_normal+J_tangent*mu)*a_n, 0)
if dense:
F = [ca.horzcat(f_ode, f_ode, f_ode, f_ode, f_ode, f_ode, f_aux_pos, f_aux_neg)]
S = [np.array([[1, 1, 1],
[1, 1, -1],
[1, -1, 1],
[1, -1, -1],
[-1, 1, 1],
[-1, 1, -1],
[-1, -1, 1],
[-1, -1, -1]])]
else:
F = [ca.horzcat(f_ode, f_ode, f_aux_pos, f_aux_neg)]
S = [np.array([[1, 0, 0], [-1, 1, 0], [-1, -1, 1], [-1, -1, -1]])]
c = [ca.vertcat(f_c, v_normal, v_tangent)]
model = ns.NosnocModel(x=ca.vertcat(x, t), F=F, S=S, c=c, x0=np.concatenate((x0, [0])),
u=ca.vertcat(u, sot), p_time_var=p_x_ref, p_time_var_val=x_ref[1:, :], t_var=t, theta=[theta])
ocp = ns.NosnocOcp(lbu=lbu, ubu=ubu, u_guess=u_guess, f_q=f_q, f_terminal=f_q_T, g_path_comp=g_comp_path, lbx=lbx, ubx=ubx)
v_tangent_fun = ca.Function('v_normal_fun', [x], [v_tangent])
v_normal_fun = ca.Function('v_normal_fun', [x], [v_normal])
f_c_fun = ca.Function('f_c_fun', [x], [f_c])
return model, ocp, x_ref, v_tangent_fun, v_normal_fun, f_c_fun
def get_default_options():
opts = ns.NosnocOpts()
opts.pss_mode = ns.PssMode.STEWART
opts.use_fesd = True
comp_tol = 1e-9
opts.comp_tol = comp_tol
opts.homotopy_update_slope = 0.1
opts.sigma_0 = 100.
opts.homotopy_update_rule = ns.HomotopyUpdateRule.LINEAR
opts.n_s = 2
opts.step_equilibration = ns.StepEquilibrationMode.HEURISTIC_MEAN
opts.mpcc_mode = ns.MpccMode.SCHOLTES_INEQ
#opts.cross_comp_mode = ns.CrossComplementarityMode.SUM_LAMBDAS_COMPLEMENT_WITH_EVERY_THETA
opts.print_level = 1
opts.opts_casadi_nlp['ipopt']['max_iter'] = 4000
opts.opts_casadi_nlp['ipopt']['acceptable_tol'] = 1e-6
opts.time_freezing = True
opts.equidistant_control_grid = True
if LONG:
opts.N_stages = 30
else:
opts.N_stages = 20
opts.N_finite_elements = 3
opts.max_iter_homotopy = 6
return opts
def solve_ocp(opts=None, plot=True, dense=DENSE, ref_as_init=False, x_goal=1.0, multijump=False):
if opts is None:
opts = get_default_options()
opts.terminal_time = 5.0
opts.N_stages = 50
model, ocp, x_ref, v_tangent_fun, v_normal_fun, f_c_fun = get_hopper_ocp_description(opts, x_goal, dense, multijump)
solver = ns.NosnocSolver(opts, model, ocp)
# Calculate time steps and initialize x to [xref, t]
if ref_as_init:
opts.initialization_strategy = ns.InitializationStrategy.EXTERNAL
t_steps = np.linspace(0, opts.terminal_time, opts.N_stages)
solver.set('x', np.c_[x_ref[1:, :], t_steps])
results = solver.solve()
if plot:
plot_results(results, opts, x_ref, v_tangent_fun, v_normal_fun, f_c_fun, x_goal)
return results
def init_func(htrail, ftrail):
htrail.set_data([], [])
ftrail.set_data([], [])
return htrail, ftrail
def animate_robot(state, head, foot, body, ftrail, htrail):
x_head, y_head = state[0], state[1]
x_foot, y_foot = state[0] - state[3]*np.sin(state[2]), state[1] - state[3]*np.cos(state[2])
head.set_offsets([x_head, y_head])
foot.set_offsets([x_foot, y_foot])
body.set_data([x_foot, x_head], [y_foot, y_head])
ftrail.set_data(np.append(ftrail.get_xdata(orig=False), x_foot), np.append(ftrail.get_ydata(orig=False), y_foot))
htrail.set_data(np.append(htrail.get_xdata(orig=False), x_head), np.append(htrail.get_ydata(orig=False), y_head))
return head, foot, body, ftrail, htrail
def plot_results(results, opts, x_ref, v_tangent_fun, v_normal_fun, f_c_fun, x_goal):
fig, ax = plt.subplots()
if LONG:
ax.set_xlim(0, 1.5)
ax.set_ylim(-0.1, 1.1)
patch = patches.Rectangle((-0.1, -0.1), 1.6, 0.1, color='grey')
ax.add_patch(patch)
else:
ax.set_xlim(0, x_goal+0.1)
ax.set_ylim(-0.1, HEIGHT+0.5)
patch = patches.Rectangle((-0.1, -0.1), x_goal+0.2, 0.1, color='grey')
ax.add_patch(patch)
ax.plot(x_ref[:, 0], x_ref[:, 1], color='lightgrey')
head = ax.scatter([0], [0], color='b', s=[100])
foot = ax.scatter([0], [0], color='r', s=[50])
body, = ax.plot([], [], 'k')
ftrail, = ax.plot([], [], color='r', alpha=0.5)
htrail, = ax.plot([], [], color='b', alpha=0.5)
ani = FuncAnimation(fig, partial(animate_robot, head=head, foot=foot, body=body, htrail=htrail, ftrail=ftrail),
init_func=partial(init_func, htrail=htrail, ftrail=ftrail),
frames=results['x_traj'], blit=True, repeat=False)
try:
ani.save('hopper.gif', writer='imagemagick', fps=10)
except Exception:
print("install imagemagick to save as gif")
# Plot Trajectory
plt.figure()
x_traj = np.array(results['x_traj'])
t = x_traj[:, -1]
x = x_traj[:, 0]
y = x_traj[:, 1]
theta = x_traj[:, 2]
leg_len = x_traj[:, 3]
plt.subplot(4, 1, 1)
plt.plot(results['t_grid'], t)
plt.subplot(4, 1, 2)
plt.plot(results['t_grid'], x)
plt.plot(results['t_grid'], y)
plt.subplot(4, 1, 3)
plt.plot(results['t_grid'], theta)
plt.subplot(4, 1, 4)
plt.plot(results['t_grid'], leg_len)
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(results['t_grid'], f_c_fun(x_traj[:, :-1].T).full().T)
plt.subplot(3, 1, 2)
plt.plot(results['t_grid'], v_tangent_fun(x_traj[:, :-1].T).full().T)
plt.subplot(3, 1, 3)
plt.plot(results['t_grid'], v_normal_fun(x_traj[:, :-1].T).full().T)
# Plot Controls
plt.figure()
u_traj = np.array(results['u_traj'])
reaction = u_traj[:, 0]
leg_force = u_traj[:, 1]
slack = u_traj[:, 2]
sot = u_traj[:, 3]
plt.subplot(4, 1, 1)
plt.step(results['t_grid_u'], np.concatenate((reaction, [reaction[-1]])))
plt.subplot(4, 1, 2)
plt.step(results['t_grid_u'], np.concatenate((leg_force, [leg_force[-1]])))
plt.subplot(4, 1, 3)
plt.step(results['t_grid_u'], np.concatenate((slack, [slack[-1]])))
plt.subplot(4, 1, 4)
plt.step(results['t_grid_u'], np.concatenate((sot, [sot[-1]])))
plt.show()
if __name__ == '__main__':
solve_ocp(x_goal=5.0, multijump=True, ref_as_init=True)
| 11,107 | 35.903654 | 132 | py |
nosnoc_py | nosnoc_py-main/examples/hopper_robot/stage_experiment.py | import nosnoc as ns
import numpy as np
import pickle
from multiprocessing import Pool, cpu_count
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from time import gmtime, strftime
from hopper_ocp_step import solve_ocp_step, get_default_options_step
from hopper_ocp import solve_ocp, get_default_options
import sys
X_GOAL = 3.0
TERMINAL_TIME = 5.0
N_S = 2
N_EXPR = [60, 66, 72, 78, 84, 90, 96, 102, 108]
def run_sparse(n_stages):
opts = get_default_options_step()
opts.terminal_time = TERMINAL_TIME
opts.N_stages = n_stages
opts.n_s = N_S
results = solve_ocp_step(opts=opts, plot=False, x_goal=X_GOAL, multijump=True, lift_algebraic=True, ref_as_init=True)
return results, sum(results['cpu_time_nlp']), sum(results['nlp_iter'])
def run_dense(n_stages):
opts = get_default_options()
opts.terminal_time = TERMINAL_TIME
opts.N_stages = n_stages
opts.n_s = N_S
opts.pss_mode = ns.PssMode.STEWART
results = solve_ocp(opts=opts, plot=False, dense=True, x_goal=X_GOAL, multijump=True, ref_as_init=True)
return results, sum(results['cpu_time_nlp']), sum(results['nlp_iter'])
def stage_experiment_mp():
# Try running solver with multiple Nfe with both dense and sparse S
cpu_times_dense = []
cpu_times_sparse = []
nlp_iter_dense = []
nlp_iter_sparse = []
results_dense = []
results_sparse = []
n_expr = N_EXPR
with Pool(cpu_count() - 2) as p:
sparse = p.map_async(run_sparse, n_expr)
dense = p.map_async(run_dense, n_expr)
sparse.wait()
dense.wait()
cpu_times_sparse = [e[1] for e in sparse.get()]
cpu_times_dense = [e[1] for e in dense.get()]
nlp_iter_sparse = [e[2] for e in sparse.get()]
nlp_iter_dense = [e[2] for e in dense.get()]
results_sparse = [e[0] for e in sparse.get()]
results_dense = [e[0] for e in dense.get()]
# pickle
with open(strftime("%Y-%m-%d-%H-%M-%S-n-stages-experiment.pkl", gmtime()), 'wb') as f:
experiment_results = {'results_sparse': results_sparse,
'results_dense': results_dense,
'cpu_times_sparse': cpu_times_sparse,
'cpu_times_dense': cpu_times_dense,
'nlp_iter_sparse': nlp_iter_sparse,
'nlp_iter_dense': nlp_iter_dense,
'n_expr': n_expr}
pickle.dump(experiment_results, f)
plot_for_paper(cpu_times_sparse, cpu_times_dense, nlp_iter_sparse, nlp_iter_dense, n_expr)
def plot_from_pickle(fname):
with open(fname, 'rb') as f:
experiment_results = pickle.load(f)
plot_for_paper(experiment_results['cpu_times_sparse'],
experiment_results['cpu_times_dense'],
experiment_results['nlp_iter_sparse'],
experiment_results['nlp_iter_dense'],
experiment_results['n_expr'])
def plot_for_paper(cpu_times_sparse, cpu_times_dense, nlp_iter_sparse, nlp_iter_dense, n_expr):
ns.latexify_plot()
plt.figure()
plt.plot(n_expr, np.array(cpu_times_sparse)/60, 'Xb-', label="Step")
plt.plot(n_expr, np.array(cpu_times_dense)/60, 'Xr-', label="Stewart")
plt.xlabel('$N$')
plt.ylabel('cpu time [m]')
plt.legend(loc='best')
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True, steps=[5]))
plt.figure()
plt.plot(n_expr, np.array(cpu_times_sparse)/np.array(nlp_iter_sparse), 'Xb-', label="Step")
plt.plot(n_expr, np.array(cpu_times_dense)/np.array(nlp_iter_dense), 'Xr-', label="Stewart")
plt.xlabel('$N$')
plt.ylabel(r'cpu time/iteration [$s$]')
plt.legend(loc='best')
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True, steps=[5]))
plt.show()
if __name__ == '__main__':
if len(sys.argv) == 2:
plot_from_pickle(sys.argv[1])
else:
stage_experiment_mp()
| 3,979 | 35.851852 | 121 | py |
nosnoc_py | nosnoc_py-main/examples/hopper_robot/pickle_plotter.py | # Some tools to plot the pickled results in order to make sure our experiments are converging to good results
import nosnoc as ns
import casadi as ca
import numpy as np
import sys
import pickle
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from matplotlib.animation import FuncAnimation
import matplotlib.patches as patches
from functools import partial
def init_func(htrail, ftrail):
htrail.set_data([], [])
ftrail.set_data([], [])
return htrail, ftrail
def animate_robot(state, head, foot, body, ftrail, htrail):
x_head, y_head = state[0], state[1]
x_foot, y_foot = state[0] - state[3]*np.sin(state[2]), state[1] - state[3]*np.cos(state[2])
head.set_offsets([x_head, y_head])
foot.set_offsets([x_foot, y_foot])
body.set_data([x_foot, x_head], [y_foot, y_head])
ftrail.set_data(np.append(ftrail.get_xdata(orig=False), x_foot), np.append(ftrail.get_ydata(orig=False), y_foot))
htrail.set_data(np.append(htrail.get_xdata(orig=False), x_head), np.append(htrail.get_ydata(orig=False), y_head))
return head, foot, body, ftrail, htrail
def plot_results(results):
fig, ax = plt.subplots()
ax.set_xlim(0, 4.0)
ax.set_ylim(-0.1, 1.1)
head = ax.scatter([0], [0], color='b', s=[100])
foot = ax.scatter([0], [0], color='r', s=[50])
body, = ax.plot([], [], 'k')
ftrail, = ax.plot([], [], color='r', alpha=0.5)
htrail, = ax.plot([], [], color='b', alpha=0.5)
ani = FuncAnimation(fig, partial(animate_robot, head=head, foot=foot, body=body, htrail=htrail, ftrail=ftrail),
init_func=partial(init_func, htrail=htrail, ftrail=ftrail),
frames=results['x_traj'], blit=True)
# try:
# ani.save('hopper.gif', writer='imagemagick', fps=10)
# except Exception:
# print("install imagemagick to save as gif")
# Plot Trajectory
plt.figure()
x_traj = np.array(results['x_traj'])
t = x_traj[:, -1]
x = x_traj[:, 0]
y = x_traj[:, 1]
theta = x_traj[:, 2]
leg_len = x_traj[:, 3]
plt.subplot(4, 1, 1)
plt.plot(results['t_grid'], t)
plt.subplot(4, 1, 2)
plt.plot(results['t_grid'], x)
plt.plot(results['t_grid'], y)
plt.subplot(4, 1, 3)
plt.plot(results['t_grid'], theta)
plt.subplot(4, 1, 4)
plt.plot(results['t_grid'], leg_len)
# Plot Controls
plt.figure()
u_traj = np.array(results['u_traj'])
reaction = u_traj[:, 0]
leg_force = u_traj[:, 1]
slack = u_traj[:, 2]
sot = u_traj[:, 3]
plt.subplot(4, 1, 1)
plt.step(results['t_grid_u'], np.concatenate((reaction, [reaction[-1]])))
plt.subplot(4, 1, 2)
plt.step(results['t_grid_u'], np.concatenate((leg_force, [leg_force[-1]])))
plt.subplot(4, 1, 3)
plt.step(results['t_grid_u'], np.concatenate((slack, [slack[-1]])))
plt.subplot(4, 1, 4)
plt.step(results['t_grid_u'], np.concatenate((sot, [sot[-1]])))
plt.show()
def plot_pickle(fname):
with open(fname, 'rb') as f:
experiment_results = pickle.load(f)
sparse_results = experiment_results['results_sparse']
dense_results = experiment_results['results_dense']
for result in sparse_results:
plot_results(result)
for result in dense_results:
plot_results(result)
if __name__ == '__main__':
plot_pickle(sys.argv[1])
| 3,346 | 34.231579 | 117 | py |
nosnoc_py | nosnoc_py-main/examples/hopper_robot/ns_experiment.py | import nosnoc as ns
import casadi as ca
import numpy as np
import pickle
import time
import sys
from multiprocessing import Pool, cpu_count
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from time import gmtime, strftime
from hopper_ocp_step import solve_ocp_step, get_default_options_step
from hopper_ocp import solve_ocp, get_default_options
X_GOAL = 3.0
N_STAGES = 50
TERMINAL_TIME = 5.0
N_EXPR = [1, 2, 3, 4, 5, 6, 7]
def run_sparse(n_s):
opts = get_default_options_step()
opts.terminal_time = TERMINAL_TIME
opts.N_stages = N_STAGES
opts.n_s = n_s
results = solve_ocp_step(opts=opts, plot=False, x_goal=X_GOAL)
return results, sum(results['cpu_time_nlp'])
def run_dense(n_s):
opts = get_default_options()
opts.terminal_time = TERMINAL_TIME
opts.N_stages = N_STAGES
opts.n_s = n_s
opts.pss_mode = ns.PssMode.STEWART
results = solve_ocp(opts=opts, plot=False, dense=True, ref_as_init=False, x_goal=X_GOAL)
return results, sum(results['cpu_time_nlp'])
def ns_experiment_mp():
# Try running solver with multiple n_s with both dense and sparse S
cpu_times_dense = []
cpu_times_sparse = []
nlp_iter_dense = []
nlp_iter_sparse = []
results_dense = []
results_sparse = []
n_expr = N_EXPR
with Pool(cpu_count() - 1) as p:
sparse = p.map_async(run_sparse, n_expr)
dense = p.map_async(run_dense, n_expr)
sparse.wait()
dense.wait()
cpu_times_sparse = [e[1] for e in sparse.get()]
cpu_times_dense = [e[1] for e in dense.get()]
nlp_iter_sparse = [e[2] for e in sparse.get()]
nlp_iter_dense = [e[2] for e in dense.get()]
results_sparse = [e[0] for e in sparse.get()]
results_dense = [e[0] for e in dense.get()]
# pickle
with open(strftime("%Y-%m-%d-%H-%M-%S-ns-experiment.pkl", gmtime()), 'wb') as f:
experiment_results = {'results_sparse': results_sparse,
'results_dense': results_dense,
'cpu_times_sparse': cpu_times_sparse,
'cpu_times_dense': cpu_times_dense,
'nlp_iter_sparse': nlp_iter_sparse,
'nlp_iter_dense': nlp_iter_dense,
'n_expr': n_expr}
pickle.dump(experiment_results, f)
breakpoint()
plot_for_paper(cpu_times_sparse, cpu_times_dense, nlp_iter_sparse, nlp_iter_dense, n_expr)
def plot_from_pickle(fname):
with open(fname, 'rb') as f:
experiment_results = pickle.load(f)
plot_for_paper(experiment_results['cpu_times_sparse'],
experiment_results['cpu_times_dense'],
experiment_results['nlp_iter_sparse'],
experiment_results['nlp_iter_dense'],
experiment_results['n_expr'])
def plot_for_paper(cpu_times_sparse, cpu_times_dense, nlp_iter_sparse, nlp_iter_dense, n_expr):
ns.latexify_plot()
plt.figure()
plt.plot(n_expr, cpu_times_sparse,'Xb-', label="Step")
plt.plot(n_expr, cpu_times_dense, 'Xr-', label="Stewart")
plt.xlabel('$n_s$')
plt.ylabel('cpu time [s]')
plt.legend(loc='best')
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
plt.figure()
plt.plot(n_expr, np.array(cpu_times_sparse)/np.array(nlp_iter_sparse),'Xb-', label="Step")
plt.plot(n_expr, np.array(cpu_times_dense)/np.array(nlp_iter_dense), 'Xr-', label="Stewart")
plt.xlabel('$n_{\mathrm{stages}}$')
plt.ylabel('cpu time/iteration [s/iter]')
plt.legend(loc='best')
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
if __name__ == '__main__':
if len(sys.argv) == 2:
plot_from_pickle(sys.argv[1])
else:
ns_experiment_mp()
| 3,864 | 35.462264 | 96 | py |
nosnoc_py | nosnoc_py-main/examples/hopper_robot/hopper_ocp_step.py | # Hopper OCP
# example inspired by https://github.com/KY-Lin22/NIPOCPEC and https://github.com/thowell/motion_planning/blob/main/models/hopper.jl
# The methods and time-freezing refomulation are detailed in https://arxiv.org/abs/2111.06759
import nosnoc as ns
import casadi as ca
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from matplotlib.animation import FuncAnimation
import matplotlib.patches as patches
from functools import partial
HEIGHT = 1.0
def get_hopper_ocp_step(opts, lift_algebraic, x_goal, multijump=False):
# hopper model
# model vars
q = ca.SX.sym('q', 4)
v = ca.SX.sym('v', 4)
t = ca.SX.sym('t')
x = ca.vertcat(q, v)
u = ca.SX.sym('u', 3)
sot = ca.SX.sym('sot')
alpha = ca.SX.sym('alpha', 3)
theta = ca.SX.sym('theta', 3)
if lift_algebraic:
beta = ca.SX.sym('beta', 1)
z = ca.vertcat(theta, beta)
z0 = np.ones((4,))
else:
z = theta
z0 = np.ones((3,))
# dims
n_q = 4
n_v = 4
n_x = n_q + n_v
# state equations
mb = 1 # body
ml = 0.1 # link
Ib = 0.25 # body
Il = 0.025 # link
mu = 0.45 # friction coeficient
g = 9.81
# inertia matrix
M = np.diag([mb + ml, mb + ml, Ib + Il, ml])
# coriolis and gravity
C = np.array([0, (mb + ml)*g, 0, 0]).T
# Control input matrix
B = ca.vertcat(ca.horzcat(0, -np.sin(q[2])),
ca.horzcat(0, np.cos(q[2])),
ca.horzcat(1, 0),
ca.horzcat(0, 1))
f_c_normal = ca.vertcat(0, 1, q[3]*ca.sin(q[2]), -ca.cos(q[2]))
f_c_tangent = ca.vertcat(1, 0, q[3]*ca.cos(q[2]), ca.sin(q[2]))
v_normal = f_c_normal.T@v
v_tangent = f_c_tangent.T@v
f_v = (-C + B@u[0:2])
f_c = q[1] - q[3]*ca.cos(q[2])
if multijump:
n_jumps = int(np.round(x_goal-0.1))
x_step = (x_goal-0.1)/n_jumps
x0 = np.array([0.1, 0.5, 0, 0.5, 0, 0, 0, 0])
x_ref = np.empty((0, n_x))
x_start = x0
# TODO: if N_stages not divisible by n_jumps then this doesn't work... oh well
for ii in range(n_jumps):
# Parameters
x_mid = np.array([x_start[0] + x_step/2, HEIGHT, 0, 0.1, 0, 0, 0, 0])
x_end = np.array([x_start[0] + x_step, 0.5, 0, 0.5, 0, 0, 0, 0])
interpolator = CubicSpline([0, 0.5, 1], [x_start, x_mid, x_end])
t_pts = np.linspace(0, 1, int(np.floor(opts.N_stages/n_jumps))+1)
x_ref = np.concatenate((x_ref, interpolator(t_pts[:-1])))
x_start = x_end
x_ref = np.concatenate((x_ref, np.expand_dims(x_end, axis=0)))
else:
x0 = np.array([0.1, 0.5, 0, 0.5, 0, 0, 0, 0])
x_mid = np.array([(x_goal-0.1)/2+0.1, 0.8, 0, 0.1, 0, 0, 0, 0])
x_end = np.array([x_goal, 0.5, 0, 0.5, 0, 0, 0, 0])
interpolator = CubicSpline([0, 0.5, 1], [x0, x_mid, x_end])
x_ref = interpolator(np.linspace(0, 1, opts.N_stages+1))
# The control u[2] is a slack for modelling of nonslipping constraints.
ubu = np.array([50, 50, 100, 20])
lbu = np.array([-50, -50, 0, 0.1])
u_guess = np.array([0, 0, 0, 1])
ubx = np.array([x_goal+0.1, 1.5, np.pi, 0.50, 10, 10, 5, 5, np.inf])
lbx = np.array([0, 0, -np.pi, 0.1, -10, -10, -5, -5, -np.inf])
Q = np.diag([100, 100, 20, 50, 0.1, 0.1, 0.1, 0.1])
Q_terminal = np.diag([300, 300, 300, 300, 0.1, 0.1, 0.1, 0.1])
R = np.diag([0.01, 0.01, 1e-5])
# Hand create least squares cost
p_x_ref = ca.SX.sym('x_ref', n_x)
f_q = sot*(ca.transpose(x - p_x_ref)@Q@(x-p_x_ref) + ca.transpose(u)@R@u)
f_q_T = ca.transpose(x - x_end)@Q_terminal@(x - x_end)
# hand crafted time freezing :)
a_n = 100
J_normal = f_c_normal
J_tangent = f_c_tangent
inv_M = ca.inv(M)
f_ode = sot * ca.vertcat(v, inv_M@f_v, 1)
inv_M_aux = inv_M
f_aux_pos = ca.vertcat(ca.SX.zeros(n_q, 1), inv_M_aux@(J_normal-J_tangent*mu)*a_n, 0)
f_aux_neg = ca.vertcat(ca.SX.zeros(n_q, 1), inv_M_aux@(J_normal+J_tangent*mu)*a_n, 0)
c = [ca.vertcat(f_c, v_normal, v_tangent)]
f_x = theta[0]*f_ode + theta[1]*f_aux_pos+theta[2]*f_aux_neg
if lift_algebraic:
g_z = ca.vertcat(theta-ca.vertcat(alpha[0]+beta,
beta*alpha[2],
beta*(1-alpha[2])),
beta-(1-alpha[0])*(1-alpha[1]))
g_comp_path = ca.horzcat(ca.vertcat(v_tangent, -v_tangent), ca.vertcat(beta, beta))
else:
g_z = theta-ca.vertcat(alpha[0]+(1-alpha[0])*(1-alpha[1]),
(1-alpha[0])*(1-alpha[1])*alpha[2],
(1-alpha[0])*(1-alpha[1])*(1-alpha[2]))
g_comp_path = ca.horzcat(ca.vertcat(v_tangent, -v_tangent), ca.vertcat(theta[1]+theta[2], theta[1]+theta[2]))
model = ns.NosnocModel(x=ca.vertcat(x, t), f_x=[f_x], alpha=[alpha], c=c, x0=np.concatenate((x0, [0])),
u=ca.vertcat(u, sot), p_time_var=p_x_ref, p_time_var_val=x_ref[1:, :], t_var=t,
z=z, z0=z0, g_z=g_z)
ocp = ns.NosnocOcp(lbu=lbu, ubu=ubu, f_q=f_q, f_terminal=f_q_T, g_path_comp=g_comp_path, lbx=lbx, ubx=ubx, u_guess=u_guess)
v_tangent_fun = ca.Function('v_normal_fun', [x], [v_tangent])
v_normal_fun = ca.Function('v_normal_fun', [x], [v_normal])
f_c_fun = ca.Function('f_c_fun', [x], [f_c])
return model, ocp, x_ref, v_tangent_fun, v_normal_fun, f_c_fun
def get_default_options_step():
opts = ns.NosnocOpts()
opts.pss_mode = ns.PssMode.STEP
opts.use_fesd = True
comp_tol = 1e-9
opts.comp_tol = comp_tol
opts.homotopy_update_slope = 0.1
opts.sigma_0 = 100.
opts.homotopy_update_rule = ns.HomotopyUpdateRule.LINEAR
opts.n_s = 2
opts.step_equilibration = ns.StepEquilibrationMode.HEURISTIC_MEAN
opts.mpcc_mode = ns.MpccMode.SCHOLTES_INEQ
#opts.cross_comp_mode = ns.CrossComplementarityMode.SUM_LAMBDAS_COMPLEMENT_WITH_EVERY_THETA
opts.print_level = 1
opts.opts_casadi_nlp['ipopt']['max_iter'] = 4000
opts.opts_casadi_nlp['ipopt']['acceptable_tol'] = 1e-6
opts.time_freezing = True
opts.equidistant_control_grid = True
opts.N_stages = 40
opts.N_finite_elements = 3
opts.max_iter_homotopy = 6
return opts
def solve_ocp_step(opts=None, plot=True, lift_algebraic=False, x_goal=1.0, ref_as_init=False, multijump=False):
if opts is None:
opts = get_default_options_step()
opts.terminal_time = 5.0
opts.N_stages = 50
model, ocp, x_ref, v_tangent_fun, v_normal_fun, f_c_fun = get_hopper_ocp_step(opts, lift_algebraic, x_goal, multijump)
solver = ns.NosnocSolver(opts, model, ocp)
# Calculate time steps and initialize x to [xref, t]
if ref_as_init:
opts.initialization_strategy = ns.InitializationStrategy.EXTERNAL
t_steps = np.linspace(0, opts.terminal_time, opts.N_stages+1)
solver.set('x', np.c_[x_ref[1:, :], t_steps[1:]])
results = solver.solve()
if plot:
plot_results(results, opts, x_ref, v_tangent_fun, v_normal_fun, f_c_fun, x_goal)
return results
def init_func(htrail, ftrail):
htrail.set_data([], [])
ftrail.set_data([], [])
return htrail, ftrail
frame_cnt = 0
def animate_robot(state, head, foot, body, ftrail, htrail):
global frame_cnt
x_head, y_head = state[0], state[1]
x_foot, y_foot = state[0] - state[3]*np.sin(state[2]), state[1] - state[3]*np.cos(state[2])
head.set_offsets([x_head, y_head])
foot.set_offsets([x_foot, y_foot])
body.set_data([x_foot, x_head], [y_foot, y_head])
ftrail.set_data(np.append(ftrail.get_xdata(orig=False), x_foot), np.append(ftrail.get_ydata(orig=False), y_foot))
htrail.set_data(np.append(htrail.get_xdata(orig=False), x_head), np.append(htrail.get_ydata(orig=False), y_head))
plt.savefig(str(frame_cnt)+'.pdf')
frame_cnt += 1
return head, foot, body, ftrail, htrail
def plot_results(results, opts, x_ref, v_tangent_fun, v_normal_fun, f_c_fun, x_goal):
fig, ax = plt.subplots()
ax.set_xlim(0, x_goal+0.1)
ax.set_ylim(-0.1, HEIGHT+0.5)
patch = patches.Rectangle((-0.1, -0.1), x_goal+10, 0.1, color='grey')
ax.add_patch(patch)
ax.plot(x_ref[:, 0], x_ref[:, 1], color='lightgrey')
head = ax.scatter([0], [0], color='b', s=[100])
foot = ax.scatter([0], [0], color='r', s=[50])
body, = ax.plot([], [], 'k')
ftrail, = ax.plot([], [], color='r', alpha=0.5)
htrail, = ax.plot([], [], color='b', alpha=0.5)
ani = FuncAnimation(fig, partial(animate_robot, head=head, foot=foot, body=body, htrail=htrail, ftrail=ftrail),
init_func=partial(init_func, htrail=htrail, ftrail=ftrail),
frames=results['x_traj'], blit=True, repeat=False)
try:
ani.save('hopper.gif', writer='imagemagick', fps=10)
except Exception:
print("install imagemagick to save as gif")
# Plot Trajectory
plt.figure()
x_traj = np.array(results['x_traj'])
t = x_traj[:, -1]
x = x_traj[:, 0]
y = x_traj[:, 1]
theta = x_traj[:, 2]
leg_len = x_traj[:, 3]
plt.subplot(4, 1, 1)
plt.plot(results['t_grid'], t)
plt.subplot(4, 1, 2)
plt.plot(results['t_grid'], x, color='r')
plt.plot(results['t_grid'], y, color='b')
plt.plot(results['t_grid_u'], x_ref[:, 0], color='r', alpha=0.5, linestyle='--')
plt.plot(results['t_grid_u'], x_ref[:, 1], color='b', alpha=0.5, linestyle='--')
plt.subplot(4, 1, 3)
plt.plot(results['t_grid'], theta, color='b')
plt.subplot(4, 1, 4)
plt.plot(results['t_grid'], leg_len, color='b')
plt.plot(results['t_grid_u'], x_ref[:, 3], color='b', alpha=0.5, linestyle='--')
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(results['t_grid'], f_c_fun(x_traj[:, :-1].T).full().T)
plt.subplot(3, 1, 2)
plt.plot(results['t_grid'], v_tangent_fun(x_traj[:, :-1].T).full().T)
plt.subplot(3, 1, 3)
plt.plot(results['t_grid'], v_normal_fun(x_traj[:, :-1].T).full().T)
# Plot Controls
plt.figure()
u_traj = np.array(results['u_traj'])
reaction = u_traj[:, 0]
leg_force = u_traj[:, 1]
slack = u_traj[:, 2]
sot = u_traj[:, 3]
plt.subplot(4, 1, 1)
plt.step(results['t_grid_u'], np.concatenate((reaction, [reaction[-1]])))
plt.subplot(4, 1, 2)
plt.step(results['t_grid_u'], np.concatenate((leg_force, [leg_force[-1]])))
plt.subplot(4, 1, 3)
plt.step(results['t_grid_u'], np.concatenate((slack, [slack[-1]])))
plt.subplot(4, 1, 4)
plt.step(results['t_grid_u'], np.concatenate((sot, [sot[-1]])))
plt.show()
if __name__ == '__main__':
solve_ocp_step(x_goal=5.0, multijump=True, ref_as_init=True, lift_algebraic=True)
| 10,811 | 36.541667 | 132 | py |
nosnoc_py | nosnoc_py-main/examples/sliding_mode_ocp/sliding_mode_ocp.py | import numpy as np
import matplotlib.pyplot as plt
from casadi import SX, vertcat, horzcat
import nosnoc
# example opts
TERMINAL_CONSTRAINT = True
LINEAR_CONTROL = True
TERMINAL_TIME = 4.0
if LINEAR_CONTROL:
U_MAX = 10
V0 = np.zeros((2,))
else:
U_MAX = 2
V0 = np.zeros((0,))
X0 = np.concatenate((np.array([2 * np.pi / 3, np.pi / 3]), V0))
X_TARGET = np.array([-np.pi / 6, -np.pi / 4])
# constraints
LBU = -U_MAX * np.ones((2,))
UBU = U_MAX * np.ones((2,))
# solver opts
def get_default_options() -> nosnoc.NosnocOpts:
opts = nosnoc.NosnocOpts()
opts.irk_representation = nosnoc.IrkRepresentation.DIFFERENTIAL
opts.comp_tol = 1e-9
opts.homotopy_update_slope = 0.1
opts.n_s = 2
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.rho_h = 1e1
opts.print_level = 1
opts.N_stages = 6
opts.N_finite_elements = 6
return opts
def get_sliding_mode_ocp_description():
# Variable defintion
x1 = SX.sym("x1")
x2 = SX.sym("x2")
v1 = SX.sym("v1")
v2 = SX.sym("v2")
# Control
u1 = SX.sym("u1")
u2 = SX.sym("u2")
u = vertcat(u1, u2)
if LINEAR_CONTROL:
x = vertcat(x1, x2, v1, v2)
# dynamics
f_11 = vertcat(-1 + v1, 0, u1, u2)
f_12 = vertcat(1 + v1, 0, u1, u2)
f_21 = vertcat(0, -1 + v2, u1, u2)
f_22 = vertcat(0, 1 + v2, u1, u2)
# Objective
f_q = v1**2 + v2**2
else:
x = vertcat(x1, x2)
# dynamics
f_11 = vertcat(-1 + u1, 0)
f_12 = vertcat(1 + u1, 0)
f_21 = vertcat(0, -1 + u2)
f_22 = vertcat(0, 1 + u2)
# Objective
f_q = u1**2 + u2**2
# Switching Functions
p = 2
a = 0.15
a1 = 0
b = -0.05
q = 3
c1 = x1 + a * (x2 - a1)**p
c2 = x2 + b * x1**q
c = [c1, c2]
S1 = np.array([[1], [-1]])
S2 = np.array([[1], [-1]])
S = [S1, S2]
# Modes of the ODEs layers
F1 = horzcat(f_11, f_12)
F2 = horzcat(f_21, f_22)
F = [F1, F2]
if TERMINAL_CONSTRAINT:
g_terminal = x[:2] - X_TARGET
f_terminal = SX.zeros(1)
else:
g_terminal = SX.zeros(0)
f_terminal = 100 * (x[:2] - X_TARGET).T @ (x[:2] - X_TARGET)
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0, u=u)
ocp = nosnoc.NosnocOcp(lbu=LBU, ubu=UBU, f_q=f_q, f_terminal=f_terminal, g_terminal=g_terminal)
return model, ocp
def solve_ocp(opts=None):
if opts is None:
opts = get_default_options()
[model, ocp] = get_sliding_mode_ocp_description()
opts.terminal_time = TERMINAL_TIME
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
return results
def example(plot=True):
results = solve_ocp()
if plot:
plot_sliding_mode(
results["x_traj"],
results["u_traj"],
results["t_grid"],
results["t_grid_u"],
)
plot_time_steps(results["time_steps"])
def plot_sliding_mode(x_traj, u_traj, t_grid, t_grid_u, latexify=True):
plt.figure()
plt.subplot(2, 1, 1)
plt.step(t_grid_u, [u_traj[0]] + u_traj, label="u")
plt.grid()
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(t_grid, x_traj, label="x")
plt.legend()
plt.grid()
plt.show()
def plot_time_steps(t_steps):
n = len(t_steps)
plt.figure()
plt.step(list(range(n)), t_steps[0] + t_steps)
plt.grid()
plt.ylabel("time_step [s]")
plt.ylabel("time_step index")
plt.show()
if __name__ == "__main__":
example()
| 3,571 | 20.648485 | 99 | py |
nosnoc_py | nosnoc_py-main/examples/car_control_complementarity/car_control_complementarity.py | # This is an example use of path complementarities to enforce not braking and accelerating
# at the same time.
import nosnoc
import casadi as ca
import numpy as np
import matplotlib.pyplot as plt
X0 = np.array([0, 0])
X_TARGET = np.array([500, 0])
def car_model():
q = ca.SX.sym('q')
v = ca.SX.sym('v')
x = ca.vertcat(q, v)
u = ca.SX.sym('u', 2)
lbu = np.zeros((2,))
ubu = np.ones((2,))
k1 = 5
k2 = 3
kb = 5
j_a = 1
j_b = 1
A = np.array([
[0, 1],
[0, 0]
])
B1 = np.array([
[0, 0],
[k1, -kb]
])
B2 = np.array([
[0, 0],
[k2, -kb]
])
f_1 = A@x + B1@u
f_2 = A@x + B2@u
F = [ca.horzcat(f_1, f_2)]
c = [v-15]
S = [np.array([[-1], [1]])]
g_terminal = x - X_TARGET
f_q = j_a*u[0]**2 + j_b*u[1]**2
g_path_comp = ca.horzcat(u[0], u[1])
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0, u=u)
ocp = nosnoc.NosnocOcp(lbu=lbu, ubu=ubu, f_q=f_q, g_terminal=g_terminal, g_path_comp=g_path_comp)
return model, ocp
def get_default_options():
opts = nosnoc.NosnocOpts()
# opts.pss_mode = nosnoc.PssMode.STEP
opts.use_fesd = True
comp_tol = 1e-6
opts.comp_tol = comp_tol
opts.homotopy_update_slope = 0.1
opts.n_s = 2
opts.step_equilibration = nosnoc.StepEquilibrationMode.L2_RELAXED_SCALED
opts.print_level = 1
opts.N_stages = 30
opts.N_finite_elements = 2
return opts
def solve_ocp(opts=None):
if opts is None:
opts = get_default_options()
model, ocp = car_model()
opts.terminal_time = 30
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
return results
def plot_car_model(results, latexify=True):
x_traj = np.array(results['x_traj'])
u_traj = np.array(results['u_traj'])
t_grid = results['t_grid']
t_grid_u = results['t_grid_u']
if latexify:
nosnoc.latexify_plot()
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(t_grid, x_traj[:, 0])
plt.ylabel("$x$")
plt.xlabel("time [s]")
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(t_grid, x_traj[:, 1])
plt.ylabel("$v$")
plt.xlabel("time [s]")
plt.grid()
plt.figure()
plt.subplot(2, 1, 1)
plt.step(t_grid_u, np.concatenate([[u_traj[0, 0]], u_traj[:, 0]]))
plt.ylabel("$u_a$")
plt.xlabel("time [s]")
plt.grid()
plt.subplot(2, 1, 2)
plt.step(t_grid_u, np.concatenate([[u_traj[0, 1]], u_traj[:, 1]]))
plt.ylabel("$u_b$")
plt.xlabel("time [s]")
plt.grid()
plt.show()
def example():
results = solve_ocp()
plot_car_model(results)
if __name__ == "__main__":
example()
| 2,718 | 18.702899 | 101 | py |
nosnoc_py | nosnoc_py-main/examples/simple_car_algebraics/simple_car_algebraic.py | # This is an example use of path complementarities to enforce not braking and accelerating
# at the same time.
import nosnoc
import casadi as ca
import numpy as np
import matplotlib.pyplot as plt
X0 = np.array([0, 0])
X_TARGET = np.array([500, 0])
def car_model():
q = ca.SX.sym('q')
v = ca.SX.sym('v')
x = ca.vertcat(q, v)
z = ca.SX.sym('z')
u = ca.SX.sym('u')
lbu = -np.ones((1,))
ubu = np.ones((1,))
k1 = 5
k2 = 3
j = 1
A = np.array([
[0, 1],
[0, 0]
])
B1 = np.array([
[0],
[k1]
])
B2 = np.array([
[0],
[k2]
])
f_1 = A@x + B1@u
f_2 = A@x + B2@u
F = [ca.horzcat(f_1, f_2)]
c = [z]
S = [np.array([[-1], [1]])]
g_terminal = x - X_TARGET
f_q = j*u[0]**2
g_z = z-(v-15)
z0 = [-15]
model = nosnoc.NosnocModel(x=x, F=F, S=S, c=c, x0=X0, u=u, z=z, g_z=g_z, z0=z0)
ocp = nosnoc.NosnocOcp(lbu=lbu, ubu=ubu, f_q=f_q, g_terminal=g_terminal)
return model, ocp
def get_default_options():
opts = nosnoc.NosnocOpts()
# opts.pss_mode = nosnoc.PssMode.STEP
opts.use_fesd = True
comp_tol = 1e-6
opts.comp_tol = comp_tol
opts.homotopy_update_slope = 0.1
opts.n_s = 2
opts.step_equilibration = nosnoc.StepEquilibrationMode.L2_RELAXED_SCALED
opts.print_level = 1
opts.N_stages = 30
opts.N_finite_elements = 2
opts.rootfinder_for_initial_z = True
return opts
def solve_ocp(opts=None):
if opts is None:
opts = get_default_options()
model, ocp = car_model()
opts.terminal_time = 30
solver = nosnoc.NosnocSolver(opts, model, ocp)
results = solver.solve()
return results
def plot_car_model(results, latexify=True):
x_traj = np.array(results['x_traj'])
u_traj = np.array(results['u_traj'])
t_grid = results['t_grid']
t_grid_u = results['t_grid_u']
if latexify:
nosnoc.latexify_plot()
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(t_grid, x_traj[:, 0])
plt.ylabel("$x$")
plt.xlabel("time [s]")
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(t_grid, x_traj[:, 1])
plt.ylabel("$v$")
plt.xlabel("time [s]")
plt.grid()
plt.figure()
plt.step(t_grid_u, np.concatenate([[u_traj[0, 0]], u_traj[:, 0]]))
plt.ylabel("$u_a$")
plt.xlabel("time [s]")
plt.grid()
plt.show()
def example():
results = solve_ocp()
plot_car_model(results)
if __name__ == "__main__":
example()
| 2,520 | 18.098485 | 90 | py |
nosnoc_py | nosnoc_py-main/test/test_auto_model.py | import unittest
import casadi as ca
import numpy as np
import nosnoc
class TestAutoModel(unittest.TestCase):
"""
Test auto model class
"""
def test_find_nonlinear_components(self):
x = ca.SX.sym('x')
f_nonsmooth_ode = x + ca.sin(x) + x*ca.cos(x)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
results = am._find_nonlinear_components(am.f_nonsmooth_ode)
self.assertEqual(len(results), 3)
f_nonsmooth_ode = -(x + ca.sin(x) + x*ca.cos(x))
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
results = am._find_nonlinear_components(am.f_nonsmooth_ode)
self.assertEqual(len(results), 3)
f_nonsmooth_ode = x - ca.sin(x) + x*(-ca.cos(x))
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
results = am._find_nonlinear_components(am.f_nonsmooth_ode)
self.assertEqual(len(results), 3)
def test_check_additive(self):
x = ca.SX.sym('x')
f_nonsmooth_ode = x*ca.sign(x)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
additive = am._check_additive(am.f_nonsmooth_ode)
self.assertTrue(additive)
f_nonsmooth_ode = ca.fmax(x, 5)*ca.sign(x)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
additive = am._check_additive(am.f_nonsmooth_ode)
self.assertFalse(additive)
def test_check_smooth(self):
x = ca.SX.sym('x')
f_nonsmooth_ode = x
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
smooth = am._check_smooth(am.f_nonsmooth_ode)
self.assertTrue(smooth)
f_nonsmooth_ode = x*ca.sign(x)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
smooth = am._check_smooth(am.f_nonsmooth_ode)
self.assertFalse(smooth)
f_nonsmooth_ode = ca.fmax(x, 5)*ca.sign(x)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
smooth = am._check_smooth(am.f_nonsmooth_ode)
self.assertFalse(smooth)
def test_rebuild_nonlin(self):
x = ca.SX.sym('x')
f_nonsmooth_ode = x
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
f = am._rebuild_nonlin(am.f_nonsmooth_ode)
self.assertEqual(f.str(), f_nonsmooth_ode.str())
f_nonsmooth_ode = ca.sign(x)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
f = am._rebuild_nonlin(am.f_nonsmooth_ode)
self.assertEqual(len(am.alpha), 1)
self.assertEqual(len(am.c), 1)
self.assertEqual(am.c[0].str(), 'x')
self.assertEqual(f.str(), '((2*alpha_0)-1)')
f_nonsmooth_ode = ca.fmax(x, 5)
am = nosnoc.NosnocAutoModel(x=x, f_nonsmooth_ode=f_nonsmooth_ode, x0=np.array([]), u=ca.SX([]))
f = am._rebuild_nonlin(am.f_nonsmooth_ode)
self.assertEqual(len(am.alpha), 1)
self.assertEqual(len(am.c), 1)
self.assertEqual(am.c[0].str(), '(x-5)')
self.assertEqual(f.str(), '((alpha_0*x)+(5*(1-alpha_0)))')
| 3,413 | 39.642857 | 103 | py |
nosnoc_py | nosnoc_py-main/test/oscillator_test.py | from examples.oscillator.oscillator_example import (
get_default_options,
TSIM,
X_SOL,
solve_oscillator,
)
import unittest
import nosnoc
import numpy as np
EXACT_SWITCH_TIME = 1
X_SWITCH_EXACT = np.array([1.0, 0.0])
def compute_errors(results) -> dict:
X_sim = results["X_sim"]
switch_diff = np.abs(results["t_grid"] - EXACT_SWITCH_TIME)
err_t_switch = np.min(switch_diff)
switch_index = np.where(switch_diff == err_t_switch)[0][0]
err_x_switch = np.max(np.abs(X_sim[switch_index] - X_SWITCH_EXACT))
err_t_end = np.abs(results["t_grid"][-1] - TSIM)
err_x_end = np.max(np.abs(X_sim[-1] - X_SOL))
return {
"t_switch": err_t_switch,
"t_end": err_t_end,
"x_switch": err_x_switch,
"x_end": err_x_end,
}
class OscillatorTests(unittest.TestCase):
def test_default(self):
opts = get_default_options()
opts.print_level = 0
results = solve_oscillator(opts, do_plot=False)
errors = compute_errors(results)
print(errors)
tol = 1e-5
assert errors["t_switch"] < tol
assert errors["t_end"] < tol
assert errors["x_switch"] < tol
assert errors["x_end"] < tol
def test_polishing(self):
opts = get_default_options()
opts.print_level = 0
opts.comp_tol = 1e-4
opts.do_polishing_step = True
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT
# opts.constraint_handling = nosnoc.ConstraintHandling.LEAST_SQUARES
# opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER
results = solve_oscillator(opts, do_plot=False)
errors = compute_errors(results)
print(errors)
tol = 1e-5
assert errors["t_switch"] < tol
assert errors["t_end"] < tol
assert errors["x_switch"] < tol
assert errors["x_end"] < tol
def test_fix_active_set(self):
opts = get_default_options()
opts.print_level = 0
opts.fix_active_set_fe0 = True
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.step_equilibration = nosnoc.StepEquilibrationMode.L2_RELAXED
results = solve_oscillator(opts, do_plot=False)
errors = compute_errors(results)
print(errors)
tol = 1e-5
assert errors["t_switch"] < tol
assert errors["t_end"] < tol
assert errors["x_switch"] < tol
assert errors["x_end"] < tol
if __name__ == "__main__":
unittest.main()
# uncomment to run single test locally
# oscillator_test = OscillatorTests()
# oscillator_test.test_least_squares_problem()
| 2,798 | 28.463158 | 106 | py |
nosnoc_py | nosnoc_py-main/test/test_timefreezing.py | import unittest
from examples.temperature_control.time_freezing_hysteresis_temperature_control import control
class TestTimeFreezing(unittest.TestCase):
def test_control_example(self):
model, opts, solver, results = control(with_plot=False)
end_time = model.t_fun(results["x_list"][-1])
self.assertLessEqual(opts.terminal_time - opts.time_freezing_tolerance, end_time)
self.assertLessEqual(end_time, opts.terminal_time + opts.time_freezing_tolerance)
if __name__ == "__main__":
unittest.main()
| 539 | 32.75 | 93 | py |
nosnoc_py | nosnoc_py-main/test/test_ocp_motor.py | import unittest
from parameterized import parameterized
from examples.motor_with_friction.motor_with_friction_ocp import (
solve_ocp,
example,
get_default_options,
X0,
X_TARGET,
)
import nosnoc
import numpy as np
options = [(step_equilibration, pss_mode)
for pss_mode in nosnoc.PssMode
for step_equilibration in nosnoc.StepEquilibrationMode
if step_equilibration != nosnoc.StepEquilibrationMode.DIRECT]
class TestOcpMotor(unittest.TestCase):
def test_default(self):
example(plot=False)
@parameterized.expand(options)
def test_problem(self, step_equilibration, pss_mode):
opts = get_default_options()
opts.step_equilibration = step_equilibration
opts.pss_mode = pss_mode
# opts.print_level = 0
# print(f"test setting: {opts}")
results = solve_ocp(opts)
x_traj = results["x_traj"]
message = f"For step_equilibration {step_equilibration} and pss_mode {pss_mode}"
self.assertTrue(np.allclose(x_traj[0], X0, atol=1e-4), message)
self.assertTrue(np.allclose(x_traj[-1], X_TARGET, atol=1e-4), message)
if __name__ == "__main__":
unittest.main()
| 1,205 | 27.046512 | 88 | py |
nosnoc_py | nosnoc_py-main/test/test_problem_dimensions.py | import unittest
from examples.simplest.simplest_example import (
get_default_options,
get_simplest_model_sliding,
)
import nosnoc
NS_VALUES = range(1, 5)
N_FINITE_ELEMENT_VALUES = range(2, 5)
# TODO: add control stages instead of just simulation
class TestProblemDimension(unittest.TestCase):
def test_w(self):
model = get_simplest_model_sliding()
for ns in NS_VALUES:
for Nfe in N_FINITE_ELEMENT_VALUES:
for pss_mode in nosnoc.PssMode:
for irk in nosnoc.IrkSchemes:
opts = get_default_options()
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.print_level = 0
opts.n_s = ns
opts.N_finite_elements = Nfe
opts.irk_scheme = irk
opts.pss_mode = pss_mode
opts.preprocess()
if pss_mode == nosnoc.PssMode.STEWART:
n_x = 1
n_z = 5
n_h = 1
elif pss_mode == nosnoc.PssMode.STEP:
n_x = 1
n_z = 3
n_h = 1
nw_expected = Nfe * (ns * (n_x + n_z) + n_h)
if opts.right_boundary_point_explicit:
n_end = 0
else:
nw_expected += Nfe * n_x
if pss_mode == nosnoc.PssMode.STEWART:
n_end = n_z - 2
elif pss_mode == nosnoc.PssMode.STEP:
n_end = n_z - 1
nw_expected += (Nfe - 1) * (n_end)
try:
solver = nosnoc.NosnocSolver(opts, model)
message = f"For ns {ns}, Nfe {Nfe}, pss_mode {pss_mode}, irk {irk}"
self.assertEqual(solver.problem.w.shape[0], nw_expected, msg=message)
except AssertionError:
raise Exception(f"Test failed with setting:\n {opts=} \n{model=}")
if __name__ == "__main__":
unittest.main()
| 2,338 | 36.725806 | 97 | py |
nosnoc_py | nosnoc_py-main/test/simple_sim_tests.py | from examples.simplest.simplest_example import (
TOL,
get_default_options,
X0,
TSIM,
EXACT_SWITCH_TIME,
solve_simplest_example,
get_simplest_model_sliding,
get_simplest_model_switch,
)
import unittest
import nosnoc
import numpy as np
NS_VALUES = range(1, 4)
N_FINITE_ELEMENT_VALUES = range(2, 4)
NO_FESD_X_END = 0.36692644
def compute_errors(results, model) -> dict:
X_sim = results["X_sim"]
err_x0 = np.abs(X_sim[0] - X0)
switch_diff = np.abs(results["t_grid"] - EXACT_SWITCH_TIME)
err_t_switch = np.min(switch_diff)
switch_index = np.where(switch_diff == err_t_switch)[0][0]
err_x_switch = np.abs(X_sim[switch_index])
err_t_end = np.abs(results["t_grid"][-1] - TSIM)
x_end_ref = 0.0
if "switch" in model.name:
x_end_ref = TSIM - EXACT_SWITCH_TIME
err_x_end = np.abs(X_sim[-1] - x_end_ref)
return {
"x0": err_x0,
"t_switch": err_t_switch,
"t_end": err_t_end,
"x_switch": err_x_switch,
"x_end": err_x_end,
}
def test_opts(opts, model):
results = solve_simplest_example(opts=opts, model=model)
errors = compute_errors(results, model)
print(errors)
tol = 1e1 * TOL
assert errors["x0"] < tol
assert errors["t_switch"] < tol
assert errors["t_end"] < tol
assert errors["x_switch"] < tol
assert errors["x_end"] < tol
return results
class SimpleTests(unittest.TestCase):
def test_default(self):
model = get_simplest_model_sliding()
test_opts(get_default_options(), model)
def test_switch(self):
model = get_simplest_model_switch()
for ns in NS_VALUES:
for Nfe in N_FINITE_ELEMENT_VALUES:
for pss_mode in nosnoc.PssMode:
for cross_comp_mode in nosnoc.CrossComplementarityMode:
opts = get_default_options()
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_DELTA
opts.print_level = 0
opts.n_s = ns
opts.N_finite_elements = Nfe
opts.pss_mode = pss_mode
opts.cross_comp_mode = cross_comp_mode
opts.print_level = 0
try:
test_opts(opts, model=model)
except:
raise Exception(f"Test failed with setting:\n {opts=} \n{model=}")
print("main_test_switch: SUCCESS")
def test_sliding(self):
model = get_simplest_model_sliding()
for ns in NS_VALUES:
for Nfe in N_FINITE_ELEMENT_VALUES:
for pss_mode in nosnoc.PssMode:
for irk_scheme in nosnoc.IrkSchemes:
opts = get_default_options()
opts.step_equilibration = nosnoc.StepEquilibrationMode.HEURISTIC_MEAN
opts.irk_scheme = irk_scheme
opts.print_level = 0
opts.n_s = ns
opts.N_finite_elements = Nfe
opts.pss_mode = pss_mode
try:
test_opts(opts, model=model)
except:
raise Exception(f"Test failed with setting:\n {opts=} \n{model=}")
print("main_test_sliding: SUCCESS")
def test_discretization(self):
model = get_simplest_model_sliding()
for mpcc_mode in [nosnoc.MpccMode.SCHOLTES_EQ, nosnoc.MpccMode.SCHOLTES_INEQ]:
for irk_scheme in nosnoc.IrkSchemes:
for irk_representation in nosnoc.IrkRepresentation:
opts = get_default_options()
opts.mpcc_mode = mpcc_mode
opts.irk_scheme = irk_scheme
opts.print_level = 0
opts.irk_representation = irk_representation
try:
test_opts(opts, model=model)
except:
raise Exception(f"Test failed with setting:\n {opts=} \n{model=}")
print("main_test_sliding: SUCCESS")
def test_fesd_off(self):
model = get_simplest_model_switch()
opts = get_default_options()
opts.print_level = 0
opts.use_fesd = False
try:
# solve
results = solve_simplest_example(opts=opts, model=model)
errors = compute_errors(results, model)
tol = 1e1 * TOL
# these should be off
assert errors["x_end"] > 0.01
assert errors["t_switch"] > 0.01
# these should be correct
assert errors["x0"] < tol
assert errors["t_end"] < tol
#
assert np.allclose(results["time_steps"], np.min(results["time_steps"]))
except:
raise Exception("Test with FESD off failed")
print("main_test_fesd_off: SUCCESS")
def test_least_squares_problem(self):
model = get_simplest_model_switch()
opts = get_default_options()
opts.print_level = 2
opts.n_s = 2
opts.constraint_handling = nosnoc.ConstraintHandling.LEAST_SQUARES
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER_IP_AUG
# opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER
# opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT
opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT_COMPLEMENTARITY
opts.initialization_strategy = nosnoc.InitializationStrategy.ALL_XCURRENT_W0_START
# opts.fix_active_set_fe0 = True
opts.sigma_0 = 1e0
opts.gamma_h = np.inf
# opts.comp_tol = 1e-5
# opts.do_polishing_step = True
try:
results = test_opts(opts, model=model)
print(results["t_grid"])
except:
raise Exception(f"Test failed.")
def test_least_squares_problem_opts(self):
model = get_simplest_model_switch()
for step_equilibration in [nosnoc.StepEquilibrationMode.DIRECT_COMPLEMENTARITY, nosnoc.StepEquilibrationMode.DIRECT]:
for fix_as in [True, False]:
opts = get_default_options()
opts.fix_active_set_fe0 = fix_as
opts.print_level = 2
opts.constraint_handling = nosnoc.ConstraintHandling.LEAST_SQUARES
opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER_IP_AUG
opts.step_equilibration = step_equilibration
opts.initialization_strategy = nosnoc.InitializationStrategy.ALL_XCURRENT_W0_START
opts.sigma_0 = 1e0
opts.gamma_h = np.inf
try:
results = test_opts(opts, model=model)
# print(results["t_grid"])
except:
# print(f"Test failed with {fix_as=}, {step_equilibration=}")
raise Exception(f"Test failed with {fix_as=}, {step_equilibration=}")
def test_initializations(self):
model = get_simplest_model_switch()
for initialization_strategy in nosnoc.InitializationStrategy:
opts = get_default_options()
opts.print_level = 0
opts.initialization_strategy = initialization_strategy
print(f"\ntesting initialization_strategy = {initialization_strategy}")
try:
test_opts(opts, model=model)
except:
raise Exception(f"Test failed with setting:\n {opts=}")
def test_polishing(self):
model = get_simplest_model_switch()
opts = get_default_options()
opts.print_level = 1
opts.do_polishing_step = True
opts.comp_tol = 1e-3
try:
test_opts(opts, model=model)
except:
raise Exception(f"Test failed with setting:\n {opts=} \n{model=}")
if __name__ == "__main__":
unittest.main()
# uncomment to run single test locally
# simple_test = SimpleTests()
# simple_test.test_least_squares_problem()
| 8,386 | 34.09205 | 125 | py |
nosnoc_py | nosnoc_py-main/test/test_parametric_ocp.py | import unittest
from examples.cart_pole_with_friction.parametric_cart_pole_with_friction import solve_paramteric_example
from examples.cart_pole_with_friction.cart_pole_with_friction import solve_example
import numpy as np
class TestParametericOcp(unittest.TestCase):
def test_one_parametric_ocp(self):
ref_results = solve_example()
results_parametric = solve_paramteric_example(with_global_var=False)
self.assertTrue(np.allclose(ref_results["w_sol"], results_parametric["w_sol"], atol=1e-7))
self.assertTrue(np.alltrue(ref_results["nlp_iter"] == results_parametric["nlp_iter"]))
self.assertEqual(results_parametric["v_global"].shape, (1, 0))
self.assertEqual(ref_results["v_global"].shape, (1, 0))
results_with_global_var = solve_paramteric_example(with_global_var=True)
self.assertTrue(np.allclose(np.ones((1,)), results_with_global_var["v_global"], atol=1e-7))
self.assertTrue(
np.allclose(np.array(ref_results["cost_val"]),
np.array(results_with_global_var["cost_val"]),
atol=1e-6))
if __name__ == "__main__":
unittest.main()
| 1,178 | 39.655172 | 104 | py |