task_type
stringclasses
4 values
code_task
stringclasses
15 values
start_line
int64
4
1.79k
end_line
int64
4
1.8k
before
stringlengths
79
76.1k
between
stringlengths
17
806
after
stringlengths
2
72.6k
reason_categories_output
stringlengths
2
2.24k
horizon_categories_output
stringlengths
83
3.99k
reason_freq_analysis
stringclasses
150 values
horizon_freq_analysis
stringlengths
23
185
infilling_python
RL_Motion_Planning
572
572
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)']
[' self.model.save_(dir_param)']
[' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 572 is defined at line 569 and has a Short-Range dependency. Variable 'dir_param' used at line 572 is defined at line 569 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
RL_Motion_Planning
575
575
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):']
[' self.model.load_(dir_param)']
[' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 575 is defined at line 574 and has a Short-Range dependency. Variable 'dir_param' used at line 575 is defined at line 574 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
RL_Motion_Planning
599
601
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0']
[" trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)']
[' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'trans' used at line 599 is defined at line 596 and has a Short-Range dependency. Library 'tf' used at line 599 is imported at line 20 and has a Long-Range dependency. Library 'tf' used at line 600 is imported at line 20 and has a Long-Range dependency. Variable 'trans' used at line 600 is defined at line 596 and has a Short-Range dependency. Library 'tf' used at line 601 is imported at line 20 and has a Long-Range dependency.
{}
{'Variable Short-Range': 2, 'Library Long-Range': 3}
infilling_python
RL_Motion_Planning
603
603
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)']
[" trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))"]
[' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'trans' used at line 603 is defined at line 599 and has a Short-Range dependency. Library 'tf' used at line 603 is imported at line 20 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Library Long-Range': 1}
infilling_python
RL_Motion_Planning
607
607
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():']
[' trans[key] = tf.cast(trans[key], dtype=tf.float32)']
[' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 607}]
Variable 'trans' used at line 607 is defined at line 603 and has a Short-Range dependency. Variable 'key' used at line 607 is part of a Loop defined at line 606 and has a Short-Range dependency. Library 'tf' used at line 607 is imported at line 20 and has a Long-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
infilling_python
RL_Motion_Planning
614
614
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions']
[' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)']
[' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Library 'Union' used at line 614 is imported at line 16 and has a Long-Range dependency. Library 'Dict' used at line 614 is imported at line 16 and has a Long-Range dependency. Variable 'buffer' used at line 614 is defined at line 611 and has a Short-Range dependency. Variable 'batch_size' used at line 614 is defined at line 611 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 2}
infilling_python
RL_Motion_Planning
624
624
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(']
[' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)']
[' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 624}, {'reason_category': 'Loop Body', 'usage_line': 624}]
Variable 'skill' used at line 624 is part of a Loop defined at line 619 and has a Short-Range dependency. Library 'tf' used at line 624 is imported at line 20 and has a Long-Range dependency.
{'If Body': 1, 'Loop Body': 1}
{'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
infilling_python
RL_Motion_Planning
639
640
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:']
[' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])']
[' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 639}, {'reason_category': 'Loop Body', 'usage_line': 639}, {'reason_category': 'Define Stop Criteria', 'usage_line': 639}, {'reason_category': 'If Body', 'usage_line': 640}, {'reason_category': 'Loop Body', 'usage_line': 640}]
Variable 'keys' used at line 639 is defined at line 617 and has a Medium-Range dependency. Variable 'combined_transitions' used at line 640 is defined at line 634 and has a Short-Range dependency. Variable 'key' used at line 640 is part of a Loop defined at line 639 and has a Short-Range dependency. Variable 'skill' used at line 640 is part of a Loop defined at line 636 and has a Short-Range dependency.
{'If Body': 2, 'Loop Body': 2, 'Define Stop Criteria': 1}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1, 'Variable Loop Short-Range': 2}
infilling_python
RL_Motion_Planning
643
643
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:']
[' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)']
[' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'If Body', 'usage_line': 643}, {'reason_category': 'Loop Based', 'usage_line': 643}]
Variable 'combined_transitions' used at line 643 is defined at line 634 and has a Short-Range dependency. Variable 'key' used at line 643 is part of a Loop defined at line 642 and has a Short-Range dependency. Library 'tf' used at line 643 is imported at line 20 and has a Long-Range dependency.
{'If Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
infilling_python
RL_Motion_Planning
649
649
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(']
[' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)']
[' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Elif Body', 'usage_line': 649}]
Variable 'transitions' used at line 649 is defined at line 648 and has a Short-Range dependency. Library 'tf' used at line 649 is imported at line 20 and has a Long-Range dependency.
{'Elif Body': 1}
{'Variable Short-Range': 1, 'Library Long-Range': 1}
infilling_python
RL_Motion_Planning
663
663
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)']
[' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)']
[' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 663 is defined at line 658 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
RL_Motion_Planning
668
673
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}']
[' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])']
[' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Define Stop Criteria', 'usage_line': 668}, {'reason_category': 'If Condition', 'usage_line': 669}, {'reason_category': 'Loop Body', 'usage_line': 669}, {'reason_category': 'Loop Body', 'usage_line': 670}, {'reason_category': 'If Body', 'usage_line': 670}, {'reason_category': 'Loop Body', 'usage_line': 671}, {'reason_category': 'Define Stop Criteria', 'usage_line': 672}, {'reason_category': 'Loop Body', 'usage_line': 673}]
Variable 'loss_dict' used at line 668 is defined at line 664 and has a Short-Range dependency. Variable 'key' used at line 669 is part of a Loop defined at line 668 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 669 is defined at line 667 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 670 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 670 is part of a Loop defined at line 668 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 671 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 671 is part of a Loop defined at line 668 and has a Short-Range dependency. Variable 'loss_dict' used at line 671 is defined at line 664 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 672 is defined at line 667 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 673 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 673 is part of a Loop defined at line 672 and has a Short-Range dependency. Library 'tf' used at line 673 is imported at line 20 and has a Long-Range dependency.
{'Define Stop Criteria': 2, 'If Condition': 1, 'Loop Body': 4, 'If Body': 1}
{'Variable Short-Range': 7, 'Variable Loop Short-Range': 4, 'Library Long-Range': 1}
infilling_python
RL_Motion_Planning
671
671
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []']
[' avg_loss_dict[key].append(loss_dict[key])']
[' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 671}]
Variable 'avg_loss_dict' used at line 671 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 671 is part of a Loop defined at line 668 and has a Short-Range dependency. Variable 'loss_dict' used at line 671 is defined at line 664 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 2, 'Variable Loop Short-Range': 1}
infilling_python
RL_Motion_Planning
673
673
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():']
[' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])']
[' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 673}]
Variable 'avg_loss_dict' used at line 673 is defined at line 667 and has a Short-Range dependency. Variable 'key' used at line 673 is part of a Loop defined at line 672 and has a Short-Range dependency. Library 'tf' used at line 673 is imported at line 20 and has a Long-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1, 'Library Long-Range': 1}
infilling_python
RL_Motion_Planning
700
700
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()']
[' data_off = self.offline_buffer.sample_episodes()']
[' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'self' used at line 700 is defined at line 692 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
RL_Motion_Planning
715
717
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')"]
[' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()']
[' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[{'reason_category': 'Loop Body', 'usage_line': 715}, {'reason_category': 'Loop Body', 'usage_line': 716}, {'reason_category': 'Define Stop Criteria', 'usage_line': 716}, {'reason_category': 'Loop Body', 'usage_line': 717}]
Variable 'self' used at line 715 is defined at line 692 and has a Medium-Range dependency. Variable 'avg_loss_dict' used at line 716 is defined at line 715 and has a Short-Range dependency. Variable 'avg_loss_dict' used at line 717 is defined at line 715 and has a Short-Range dependency. Variable 'key' used at line 717 is part of a Loop defined at line 716 and has a Short-Range dependency.
{'Loop Body': 3, 'Define Stop Criteria': 1}
{'Variable Medium-Range': 1, 'Variable Short-Range': 2, 'Variable Loop Short-Range': 1}
infilling_python
RL_Motion_Planning
859
859
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config']
[' args = get_config_env(args, ag_in_env_goal=True)']
[' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Function 'get_config_env' used at line 859 is defined at line 735 and has a Long-Range dependency.
{}
{'Function Long-Range': 1}
infilling_python
RL_Motion_Planning
892
892
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ']
[' args = get_config(db=db)']
[' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Function 'get_config' used at line 892 is defined at line 758 and has a Long-Range dependency. Variable 'db' used at line 892 is defined at line 883 and has a Short-Range dependency.
{}
{'Function Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
RL_Motion_Planning
903
904
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache']
[' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()']
[' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Library 'tf' used at line 903 is imported at line 20 and has a Long-Range dependency. Library 'tf' used at line 904 is imported at line 20 and has a Long-Range dependency.
{}
{'Library Long-Range': 2}
infilling_python
RL_Motion_Planning
914
915
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(']
[' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),']
[' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'args' used at line 914 is defined at line 892 and has a Medium-Range dependency. Function 'sample_transitions' used at line 915 is defined at line 142 and has a Long-Range dependency. Variable 'args' used at line 915 is defined at line 892 and has a Medium-Range dependency. Function 'state_to_goal' used at line 915 is defined at line 74 and has a Long-Range dependency. Variable 'n_objs' used at line 915 is defined at line 910 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 2, 'Function Long-Range': 2, 'Variable Short-Range': 1}
infilling_python
RL_Motion_Planning
918
919
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(']
[' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)']
[' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Variable 'args' used at line 918 is defined at line 892 and has a Medium-Range dependency. Function 'sample_transitions' used at line 919 is defined at line 142 and has a Long-Range dependency. Variable 'args' used at line 919 is defined at line 892 and has a Medium-Range dependency. Function 'state_to_goal' used at line 919 is defined at line 74 and has a Long-Range dependency. Variable 'n_objs' used at line 919 is defined at line 910 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 2, 'Function Long-Range': 2, 'Variable Short-Range': 1}
infilling_python
RL_Motion_Planning
952
952
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])"]
[" prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])"]
[" buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Function 'repurpose_skill_seq' used at line 952 is defined at line 89 and has a Long-Range dependency. Variable 'args' used at line 952 is defined at line 892 and has a Long-Range dependency. Variable 'buffered_data' used at line 952 is defined at line 948 and has a Short-Range dependency.
{}
{'Function Long-Range': 1, 'Variable Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
RL_Motion_Planning
962
962
['import os', "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging", "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Suppress oneDNN warning", '# os.environ["CUDA_VISIBLE_DEVICES"] = "0"', 'import argparse', 'import datetime', 'import json', 'import os', 'import pickle', 'import sys', 'import time', 'import pickle', 'from abc import ABC', 'from argparse import Namespace', 'from collections import OrderedDict', 'from typing import Dict, Union, List', 'from typing import Tuple', 'import logging', 'import numpy as np', 'import tensorflow as tf', 'import tensorflow_probability as tfp', 'import wandb', 'from keras.layers import Dense', 'from tf_agents.replay_buffers.table import Table', 'from tqdm import tqdm', 'import random', '', '# Set the seed', 'SEED = 1234', '', 'random.seed(SEED)', 'np.random.seed(SEED)', 'tf.random.set_seed(SEED)', 'current_time = datetime.datetime(2024, 1, 1, 0, 0, 0).strftime("%Y%m%d-%H%M%S")', '', "# Ensure TensorFlow doesn't try to use GPU if it's not available", "os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if tf.test.is_gpu_available() else ''", '', '# Suppress other warnings', "tf.get_logger().setLevel('ERROR')", '', '# Get the absolute path of the script module', 'script_path = os.path.realpath(__file__)', 'script_dir = os.path.dirname(script_path)', '', "log_dir = os.path.join(script_dir, './logging', 'BC', 'run' + current_time)", 'if not os.path.exists(log_dir):', ' os.makedirs(log_dir, exist_ok=True)', '', "logging.basicConfig(filename=os.path.join(log_dir, 'logs.txt'), filemode='w',", " format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", " datefmt='%m/%d/%Y %H:%M:%S',", ' level=logging.INFO)', 'logger = logging.getLogger(__name__)', '', '', 'def get_buffer_shape(args) -> Dict[str, Tuple[int, ...]]:', ' buffer_shape = {', " 'prev_goals': (args.horizon, args.ag_dim),", " 'prev_skills': (args.horizon, args.c_dim),", " 'states': (args.horizon + 1, args.s_dim),", " 'env_goals': (args.horizon + 1, args.g_dim),", " 'curr_goals': (args.horizon, args.ag_dim),", " 'curr_skills': (args.horizon, args.c_dim),", " 'states_2': (args.horizon, args.s_dim),", " 'actions': (args.horizon, args.a_dim),", " 'successes': (args.horizon,),", " 'distances': (args.horizon,),", " 'has_gt_skill': (args.horizon,),", ' }', ' return buffer_shape', '', '', 'def state_to_goal(num_objs: int):', ' """', ' Converts state to goal. (Achieved Goal Space)', ' If obj_identifiers is not None, then it further filters the achieved goals based on the object/skill id.', ' """', ' ', ' @tf.function(experimental_relax_shapes=True) # Imp otherwise code will be very slow', ' def get_goal(states: tf.Tensor, obj_identifiers: tf.Tensor = None):', ' # Get achieved goals', ' goals = tf.map_fn(lambda x: x[3: 3 + num_objs * 3], states, fn_output_signature=tf.float32)', ' return goals', ' ', ' return get_goal', '', '', 'def repurpose_skill_seq(args, skill_seq):', ' """', ' Repurpose the skill sequence to be used for training the policy. Use value of wrap_skill_id', ' = "0": no change', ' = "1": wrap pick/grab/drop:obj_id to pick/grab/drop', ' = "2": wrap pick:obj_id to pick/grab/drop:obj_id to obj_id', ' :param skill_seq: one-hot skill sequence of shape (n_trajs, horizon, c_dim)', ' :return: tensor of shape (n_trajs, horizon, c_dim) and type same as skill_seq', ' """', " if args.env_name != 'OpenAIPickandPlace':", ' tf.print("Wrapping skill sequence is currently only supported for PnP tasks!")', ' sys.exit(-1)', ' ', ' if args.wrap_level == "0":', ' return skill_seq', ' elif args.wrap_level == "1":', ' # wrap by i = j % 3 where i is the new position of skill originally at j. Dim changes from c_dim to 3', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq % 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=3)', ' return skill_seq', ' elif args.wrap_level == "2":', ' # wrap such that 0/1/2 -> 0, 3/4/5 -> 1, 6/7/8 -> 2 ... Dim changes from c_dim to self.args.num_objs', ' skill_seq = tf.argmax(skill_seq, axis=-1)', ' skill_seq = skill_seq // 3', ' # Convert back to one-hot', ' skill_seq = tf.one_hot(skill_seq, depth=args.num_objs)', ' return skill_seq', ' else:', ' raise NotImplementedError("Invalid value for wrap_skill_id: {}".format(args.wrap_level))', '', '', 'def orthogonal_regularization(model, reg_coef=1e-4):', ' """Orthogonal regularization v2.', ' See equation (3) in https://arxiv.org/abs/1809.11096.', ' Rβ(W) = β∥W⊤W ⊙ (1 − I)∥2F, where ⊙ is the Hadamard product.', ' Args:', ' model: A keras model to apply regularization for.', " reg_coef: Orthogonal regularization coefficient. Don't change this value.", ' Returns:', ' A regularization loss term.', ' """', ' reg = 0', ' for layer in model.layers:', ' if isinstance(layer, tf.keras.layers.Dense):', ' prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)', ' reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))', ' ', ' print("Orthogonal Regularization: {}".format(reg * reg_coef))', ' return reg * reg_coef', '', '', 'def sample_transitions(sample_style: str, state_to_goal=None, num_options: int = None):', ' def sample_random_transitions(episodic_data, batch_size_in_transitions=None):', ' """', ' Sample random transitions without HER.', ' Functionality: Sample random time-steps from each episode: (g_t-1, c_t-1, s_t, g_t, c_t, a_t) for all episodes.', ' """', ' ', ' batch_size = batch_size_in_transitions # Number of transitions to sample', " T = episodic_data['actions'].shape[1]", " successes = episodic_data['successes']", ' ', ' # Get index at which episode terminated', ' terminate_idxes = tf.math.argmax(successes, axis=-1)', ' # If no success, set to last index', ' mask_no_success = tf.math.equal(terminate_idxes, 0)', ' terminate_idxes += tf.multiply((T - 1) * tf.ones_like(terminate_idxes),', ' tf.cast(mask_no_success, terminate_idxes.dtype))', ' ', " # Get episode idx for each transition to sample: more likely to sample from episodes which didn't end in success", ' p = (terminate_idxes + 1) / tf.reduce_sum(terminate_idxes + 1)', ' episode_idxs = tfp.distributions.Categorical(probs=p).sample(sample_shape=(batch_size,))', ' episode_idxs = tf.cast(episode_idxs, dtype=terminate_idxes.dtype)', ' # Get terminate index for the selected episodes', ' terminate_idxes = tf.gather(terminate_idxes, episode_idxs)', ' print("terminate_idxes: ", terminate_idxes)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # --------------------------------- 2) Select which time steps + goals to use --------------------------------', ' # Get the current time step', ' t_samples_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' t_samples = t_samples_frac * tf.cast(terminate_idxes, dtype=t_samples_frac.dtype)', ' t_samples = tf.cast(tf.round(t_samples), dtype=terminate_idxes.dtype)', ' ', ' # Get random init time step (before t_samples)', ' rdm_past_offset_frac = tf.zeros_like(t_samples_frac)', ' t_samples_init = rdm_past_offset_frac * tf.cast(t_samples, dtype=rdm_past_offset_frac.dtype)', ' t_samples_init = tf.cast(tf.floor(t_samples_init), dtype=t_samples.dtype)', ' print("t_samples_init: ", t_samples_init)', ' ', ' # Get the future time step', ' rdm_future_offset_frac = tf.experimental.numpy.random.random(size=(batch_size,))', ' future_offset = rdm_future_offset_frac * tf.cast((terminate_idxes - t_samples), rdm_future_offset_frac.dtype)', ' future_offset = tf.cast(future_offset, terminate_idxes.dtype)', ' t_samples_future = t_samples + future_offset', ' print("t_samples_future: ", t_samples_future)', ' ', ' # ------------------------------------------------------------------------------------------------------------', ' # ----------------- 3) Select the batch of transitions corresponding to the current time steps ---------------', ' curr_indices = tf.stack((episode_idxs, t_samples), axis=-1)', ' transitions = {}', ' for key in episodic_data.keys():', ' transitions[key] = tf.gather_nd(episodic_data[key], indices=curr_indices)', ' ', " transitions['achieved_goals'] = state_to_goal(", " states=tf.gather_nd(episodic_data['states'], indices=curr_indices),", ' obj_identifiers=None)', ' ', ' # --------------- 4) Select the batch of transitions corresponding to the future time steps ------------', ' future_indices = tf.stack((episode_idxs, t_samples_future), axis=-1)', " transitions['her_goals'] = state_to_goal(states=tf.gather_nd(episodic_data['states'], indices=future_indices),", ' obj_identifiers=None) # Object ids are not used for unsegmented HER', ' ', ' # --------------- 5) Select the batch of transitions corresponding to the initial time steps ------------', ' init_indices = tf.stack((episode_idxs, t_samples_init), axis=-1)', " transitions['init_states'] = tf.gather_nd(episodic_data['states'], indices=init_indices)", ' print("transitions: ", transitions)', ' return transitions', ' ', " if sample_style == 'random_unsegmented':", ' return sample_random_transitions', ' else:', ' raise NotImplementedError', '', '', 'class ReplayBufferTf:', ' def __init__(self, buffer_shapes: Dict[str, Tuple[int, ...]], size_in_transitions, T, transition_fn=None):', ' """Creates a replay buffer.', '', ' Args:', ' buffer_shapes (dict of ints): the shape for all buffers that are used in the replay', ' buffer', ' size_in_transitions (int): the size of the buffer, measured in transitions', ' T (int): the time horizon for episodes', ' transition_fn (function): a function that samples from the replay buffer', ' """', ' self.T = tf.constant(T, dtype=tf.int32)', ' self.buffer_size = tf.constant(size_in_transitions // T, dtype=tf.int32)', ' ', ' self.current_size = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of episodes', ' self.n_transitions_stored = tf.Variable(0, dtype=tf.int32) # Size of buffer in terms of no. of transitions', ' ', ' self.transition_fn = transition_fn', ' self.buffer_keys: List[str] = [key for key in buffer_shapes.keys()]', ' tensor_spec = [tf.TensorSpec(buffer_shapes[key], tf.float32, key) for key in self.buffer_keys]', ' self.table = Table(tensor_spec, capacity=self.buffer_size)', ' ', ' @tf.function # Make sure batch_size passed here is a tf.constant to avoid retracing', ' def sample_transitions(self, batch_size):', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', ' transitions = self.transition_fn(buffered_data, batch_size)', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def sample_episodes(self, ep_start: int = None, ep_end: int = None, num_episodes: int = None):', ' ', ' if ep_start is None or ep_end is None:', ' if num_episodes:', ' num_episodes = tf.math.minimum(tf.cast(num_episodes, dtype=self.current_size.dtype), self.current_size)', ' else:', ' num_episodes = self.current_size', ' ep_range = tf.range(num_episodes)', ' else:', ' ep_range = tf.range(ep_start, ep_end)', ' ', ' buffered_data = {}', ' _data = self.table.read(rows=ep_range)', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' print("buffered_data: ", buffered_data)', ' return buffered_data', ' ', ' @tf.function', ' def store_episode(self, episode_batch):', ' """', ' Store each episode into replay buffer', ' episode_batch: {"": array(1 x (T or T+1) x dim)}', ' """', ' idxs = self._get_storage_idxs(num_to_ins=tf.constant(1, dtype=tf.int32))', ' values = [episode_batch[key] for key in self.buffer_keys if key in episode_batch.keys()]', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + self.T)', ' ', ' def store_episodes(self, episodes_batch):', " for ep_idx in tf.range(tf.shape(episodes_batch['actions'])[0]):", ' episode_batch = {}', ' for key in self.buffer_keys:', ' episode_batch[key] = tf.gather(episodes_batch[key], ep_idx)', ' self.store_episode(episode_batch)', ' ', ' def _get_storage_idxs(self, num_to_ins=None):', ' if num_to_ins is None:', ' num_to_ins = tf.cast(1, dtype=tf.int32)', ' ', ' # consecutively insert until you hit the end of the buffer, and then insert randomly.', ' if self.current_size + num_to_ins <= self.buffer_size:', ' idxs = tf.range(self.current_size, self.current_size + num_to_ins)', ' elif self.current_size < self.buffer_size:', ' overflow = num_to_ins - (self.buffer_size - self.current_size)', ' idx_a = tf.range(self.current_size, self.buffer_size)', ' idx_b = tf.experimental.numpy.random.randint(0, self.current_size, size=(overflow,), dtype=tf.int32)', ' idxs = tf.concat([idx_a, idx_b], axis=0)', ' else:', ' idxs = tf.experimental.numpy.random.randint(0, self.buffer_size, size=(num_to_ins,), dtype=tf.int32)', ' ', ' # update buffer size', ' self.current_size.assign(tf.math.minimum(self.buffer_size, self.current_size + num_to_ins))', ' print("idxs: ", idxs)', ' return idxs', ' ', ' def get_current_size_ep(self):', ' return self.current_size', ' ', ' def get_current_size_trans(self):', ' return self.current_size * self.T', ' ', ' def clear_buffer(self):', ' self.current_size.assign(0)', ' ', ' @property', ' def full(self):', ' return self.current_size == self.buffer_size', ' ', ' def __len__(self):', ' return self.current_size', ' ', ' def save_buffer_data(self, path):', ' buffered_data = {}', ' _data = self.table.read(rows=tf.range(self.current_size))', ' for index, key in enumerate(self.buffer_keys):', ' buffered_data[key] = _data[index]', ' ', " with open(path, 'wb') as handle:", ' pickle.dump(buffered_data, handle, protocol=pickle.HIGHEST_PROTOCOL)', ' ', ' def load_data_into_buffer(self, buffered_data=None, clear_buffer=True, num_demos_to_load=None):', ' ', ' if buffered_data is None:', ' raise ValueError("No buffered_data provided")', ' ', ' if clear_buffer:', ' self.clear_buffer()', ' ', ' if num_demos_to_load is not None:', ' ', ' # Randomly sample idxs to load', " idxs = np.random.choice(len(buffered_data['actions']), size=num_demos_to_load, replace=False).tolist()", ' ', ' for key in buffered_data.keys():', ' buffered_data[key] = tf.gather(buffered_data[key], idxs)', ' ', ' # Check if all tensors are present in loaded data', ' data_sizes = [len(buffered_data[key]) for key in self.buffer_keys]', ' assert np.all(np.array(data_sizes) == data_sizes[0])', ' ', ' idxs = self._get_storage_idxs(num_to_ins=data_sizes[0])', ' values = [buffered_data[key] for key in self.buffer_keys]', ' ', ' self.table.write(rows=idxs, values=values)', ' self.n_transitions_stored.assign(self.n_transitions_stored + len(idxs) * self.T)', '', '', 'class Actor(tf.keras.Model):', ' def __init__(self, action_dim):', ' super(Actor, self).__init__()', ' ', ' # Rewrite the base weights to initialise using Xavier(gain=1.0) and bias=0.0', ' self.base = tf.keras.Sequential([', " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=256, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=128, activation=tf.nn.relu, kernel_initializer='glorot_uniform', bias_initializer='zeros'),", " Dense(units=action_dim, kernel_initializer='glorot_uniform', bias_initializer='zeros')", ' ])', ' ', ' self.MEAN_MIN, self.MEAN_MAX = -7, 7', ' self.eps = np.finfo(np.float32).eps', ' self.pi = tf.constant(np.pi)', ' self.FIXED_STD = 0.05', ' ', ' self.train = True', ' ', ' def get_log_prob(self, states, actions):', ' """Evaluate log probs for actions conditioned on states.', ' Args:', ' states: A batch of states.', ' actions: A batch of actions to evaluate log probs on.', ' Returns:', ' Log probabilities of actions.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' std = tf.ones_like(mu) * self.FIXED_STD', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' ', ' # Get log probs from Gaussian distribution', ' log_probs = -0.5 * tf.square((actions - mu) / std) - 0.5 * tf.math.log(2 * self.pi) - tf.math.log(std)', ' log_probs = tf.reduce_sum(log_probs, axis=1, keepdims=False)', ' print("log_probs: ", log_probs)', ' return log_probs', ' ', ' def call(self, states, training=None, mask=None):', ' """Computes actions for given inputs.', ' Args:', ' states: A batch of states.', ' training: Ignored', ' mask: Ignored.', ' Returns:', ' A mode action, a sampled action and log probability of the sampled action.', ' """', ' mu = self.base(states)', ' mu = tf.nn.tanh(mu)', ' mu = tf.clip_by_value(mu, self.MEAN_MIN, self.MEAN_MAX)', ' ', ' if self.train:', ' # Sample actions from the distribution', ' actions = tf.random.normal(shape=mu.shape, mean=mu, stddev=self.FIXED_STD)', ' else:', ' actions = mu', ' ', ' # Compute log probs', ' log_probs = self.get_log_prob(states, actions)', ' log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting', ' ', ' actions = tf.clip_by_value(actions, -1 + self.eps, 1 - self.eps)', ' print("mu: ", mu)', ' print("actions: ", actions)', ' print("log_probs: ", log_probs)', ' return mu, actions, log_probs', '', '', 'class BC(tf.keras.Model, ABC):', ' def __init__(self, args: Namespace):', ' super(BC, self).__init__()', ' self.args = args', ' ', ' # Declare Policy Network and Optimiser', ' self.actor = Actor(args.a_dim)', ' self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=args.actor_lr)', ' ', ' # Build Model', ' self.build_model()', ' ', ' # For HER', ' self.use_her = False', " logger.info('[[[ Using HER ? ]]]: {}'.format(self.use_her))", ' ', ' @tf.function(experimental_relax_shapes=True)', ' def train(self, data_exp, data_rb):', ' with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:', ' tape.watch(self.actor.variables)', ' ', " actions_mu, _, _ = self.actor(tf.concat([data_rb['states'], data_rb['goals']], axis=1))", " pi_loss = tf.reduce_sum(tf.math.squared_difference(data_rb['actions'], actions_mu), axis=-1)", ' pi_loss = tf.reduce_mean(pi_loss)', ' penalty = orthogonal_regularization(self.actor.base)', ' pi_loss_w_penalty = pi_loss + penalty', ' ', ' grads = tape.gradient(pi_loss_w_penalty, self.actor.trainable_variables)', ' self.actor_optimizer.apply_gradients(zip(grads, self.actor.trainable_variables))', ' print("loss/pi: ", pi_loss)', ' print("penalty/pi_ortho_penalty: ", penalty)', ' return {', " 'loss/pi': pi_loss,", " 'penalty/pi_ortho_penalty': penalty,", ' }', ' ', ' def act(self, state, env_goal, prev_goal, prev_skill, epsilon, stddev):', ' state = tf.clip_by_value(state, -self.args.clip_obs, self.args.clip_obs)', ' env_goal = tf.clip_by_value(env_goal, -self.args.clip_obs, self.args.clip_obs)', ' prev_goal = tf.clip_by_value(prev_goal, -self.args.clip_obs, self.args.clip_obs)', ' ', ' # ###################################### Current Goal ####################################### #', ' curr_goal = env_goal', ' ', ' # ###################################### Current Skill ###################################### #', ' curr_skill = prev_skill # Not used in this implementation', ' ', ' # ########################################## Action ######################################### #', ' # Explore', ' if tf.random.uniform(()) < epsilon:', ' action = tf.random.uniform((1, self.args.a_dim), -self.args.action_max, self.args.action_max)', ' # Exploit', ' else:', ' action_mu, _, _ = self.actor(tf.concat([state, curr_goal], axis=1)) # a_t = mu(s_t, g_t)', ' action_dev = tf.random.normal(action_mu.shape, mean=0.0, stddev=stddev)', ' action = action_mu + action_dev # Add noise to action', ' action = tf.clip_by_value(action, -self.args.action_max, self.args.action_max)', ' ', ' # Safety check for action, should not be nan or inf', ' has_nan = tf.math.reduce_any(tf.math.is_nan(action))', ' has_inf = tf.math.reduce_any(tf.math.is_inf(action))', ' if has_nan or has_inf:', " logger.warning('Action has nan or inf. Setting action to zero. Action: {}'.format(action))", ' action = tf.zeros_like(action)', ' ', ' return curr_goal, curr_skill, action', ' ', ' def get_init_skill(self):', ' """', ' demoDICE does not use skills. Use this function to return a dummy skill of dimension (1, c_dim)', ' """', ' skill = tf.zeros((1, self.args.c_dim))', ' return skill', ' ', ' @staticmethod', ' def get_init_goal(init_state, g_env):', ' return g_env', ' ', ' def build_model(self):', ' # a_t <- f(s_t) for each skill', ' _ = self.actor(tf.concat([np.ones([1, self.args.s_dim]), np.ones([1, self.args.g_dim])], 1))', ' ', ' def save_(self, dir_param):', ' self.actor.save_weights(dir_param + "/policy.h5")', ' ', ' def load_(self, dir_param):', ' self.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def change_training_mode(self, training_mode: bool):', ' pass', ' ', ' def update_target_networks(self):', ' pass', '', '', 'class AgentBase(object):', ' def __init__(', ' self,', ' args,', ' model,', ' algo: str,', ' expert_buffer: ReplayBufferTf,', ' offline_buffer: ReplayBufferTf', ' ):', ' ', ' self.args = args', ' self.model = model', ' ', ' # Define the Buffers', ' self.expert_buffer = expert_buffer', ' self.offline_buffer = offline_buffer', ' ', ' self.offline_gt_prev_skill = None', ' self.offline_gt_curr_skill = None', ' ', ' # Define Tensorboard for logging Losses and Other Metrics', ' if not os.path.exists(args.dir_summary):', ' os.makedirs(args.dir_summary)', ' ', ' if not os.path.exists(args.dir_plot):', ' os.makedirs(args.dir_plot)', ' self.summary_writer = tf.summary.create_file_writer(args.dir_summary)', ' ', ' # Define wandb logging', ' if self.args.log_wandb:', ' self.wandb_logger = wandb.init(', ' project=args.wandb_project,', ' config=vars(args),', " id='{}_{}'.format(algo, current_time),", ' reinit=True, # Allow multiple wandb.init() calls in the same process.', ' )', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' def preprocess_in_state_space(self, item):', ' item = tf.clip_by_value(item, -self.args.clip_obs, self.args.clip_obs)', ' return item', ' ', ' def save_model(self, dir_param):', ' if not os.path.exists(dir_param):', ' os.makedirs(dir_param)', ' self.model.save_(dir_param)', ' ', ' def load_model(self, dir_param):', ' self.model.load_(dir_param)', ' ', ' def process_data(self, transitions, expert=False, is_supervised=False):', ' ', ' trans = transitions.copy()', ' ', ' # Process the states and goals', " trans['states'] = self.preprocess_in_state_space(trans['states'])", " trans['states_2'] = self.preprocess_in_state_space(trans['states_2'])", " trans['env_goals'] = self.preprocess_in_state_space(trans['env_goals'])", " trans['init_states'] = self.preprocess_in_state_space(trans['init_states'])", " trans['her_goals'] = self.preprocess_in_state_space(trans['her_goals'])", " trans['achieved_goals'] = self.preprocess_in_state_space(trans['achieved_goals'])", ' ', ' if self.model.use_her:', " trans['goals'] = trans['her_goals']", ' else:', " trans['goals'] = trans['env_goals']", ' ', ' # Define if the transitions are from expert or not/are supervised or not', " trans['is_demo'] = tf.cast(expert, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", " trans['is_sup'] = tf.cast(is_supervised, dtype=tf.int32) * tf.ones_like(trans['successes'], dtype=tf.int32)", ' ', ' # Compute terminate skills i.e. if prev_skill != curr_skill then terminate_skill = 1 else 0', " trans['terminate_skills'] = tf.cast(tf.not_equal(tf.argmax(trans['prev_skills'], axis=-1),", " tf.argmax(trans['curr_skills'], axis=-1)),", ' dtype=tf.int32)', ' # reshape the terminate_skills to be of shape (batch_size, 1)', " trans['terminate_skills'] = tf.reshape(trans['terminate_skills'], shape=(-1, 1))", ' ', ' # Make sure the data is of type tf.float32', ' for key in trans.keys():', ' trans[key] = tf.cast(trans[key], dtype=tf.float32)', ' print("trans :", trans)', ' return trans', ' ', ' def sample_data(self, buffer, batch_size):', ' ', ' # Sample Transitions', ' transitions: Union[Dict[int, dict], dict] = buffer.sample_transitions(batch_size)', ' ', ' # Process the transitions', ' keys = None', ' if all(isinstance(v, dict) for v in transitions.values()):', ' for skill in transitions.keys():', ' ', ' # For skills whose transition data is not None', ' if transitions[skill] is not None:', ' transitions[skill] = self.process_data(', ' transitions[skill], tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' keys = transitions[skill].keys()', ' ', ' # If keys is None, No transitions were sampled', ' if keys is None:', ' raise ValueError("No transitions were sampled")', ' ', ' # Concatenate the transitions from different skills', ' combined_transitions = {key: [] for key in keys}', ' ', ' for skill in transitions.keys():', ' ', ' if transitions[skill] is not None:', ' for key in keys:', ' combined_transitions[key].append(transitions[skill][key])', ' ', ' for key in keys:', ' combined_transitions[key] = tf.concat(combined_transitions[key], axis=0)', ' ', ' transitions = combined_transitions', ' ', ' elif isinstance(transitions, dict):', ' transitions = self.process_data(', ' transitions, tf.constant(True, dtype=tf.bool), tf.constant(True, dtype=tf.bool)', ' )', ' ', ' else:', ' raise ValueError("Invalid type of transitions")', ' print("transitions: ", transitions)', ' return transitions', ' ', ' @tf.function', ' def train(self):', ' ', ' self.model.change_training_mode(training_mode=True)', ' ', ' data_expert = self.sample_data(self.expert_buffer, self.args.batch_size)', ' data_policy = self.sample_data(self.offline_buffer, self.args.batch_size)', ' loss_dict = self.model.train(data_expert, data_policy)', ' ', ' # Average the losses', ' avg_loss_dict = {}', ' for key in loss_dict.keys():', ' if key not in avg_loss_dict.keys():', ' avg_loss_dict[key] = []', ' avg_loss_dict[key].append(loss_dict[key])', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = tf.reduce_mean(avg_loss_dict[key])', ' print("avg_loss_dict: ", avg_loss_dict)', ' return avg_loss_dict', ' ', ' def learn(self):', ' # This is a base class method, inherited classes must implement this method', ' raise NotImplementedError', '', '', 'class Agent(AgentBase):', ' def __init__(self, args,', ' expert_buffer: ReplayBufferTf = None,', ' offline_buffer: ReplayBufferTf = None):', ' ', " super(Agent, self).__init__(args, BC(args), 'BC', expert_buffer, offline_buffer)", ' ', ' def load_actor(self, dir_param):', ' self.model.actor.load_weights(dir_param + "/policy.h5")', ' ', ' def learn(self):', ' args = self.args', ' ', ' # Tracker for wandb logging', ' log_step = 0', ' ', ' # [Update] Load the expert data into the expert buffer, expert data and offline data into the offline buffer', ' data_exp = self.expert_buffer.sample_episodes()', ' data_off = self.offline_buffer.sample_episodes()', ' self.expert_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_exp, clear_buffer=True)', ' self.offline_buffer.load_data_into_buffer(buffered_data=data_off, clear_buffer=False)', ' ', ' with tqdm(total=args.max_time_steps, leave=False) as pbar:', ' for curr_t in range(0, args.max_time_steps):', ' ', ' # Update the reference actors and directors using polyak averaging', ' if curr_t % args.update_target_interval == 0:', ' tf.print("Updating the target actors and critics at train step {}".format(curr_t))', ' self.model.update_target_networks()', ' ', ' # Train the policy', " pbar.set_description('Training')", ' avg_loss_dict = self.train()', ' for key in avg_loss_dict.keys():', ' avg_loss_dict[key] = avg_loss_dict[key].numpy().item()', ' ', ' # Log', ' if self.args.log_wandb:', ' self.wandb_logger.log(avg_loss_dict, step=log_step)', ' self.wandb_logger.log({', " 'policy_buffer_size': self.offline_buffer.get_current_size_trans(),", " 'expert_buffer_size': self.expert_buffer.get_current_size_trans(),", ' }, step=log_step)', ' ', ' # Update', ' pbar.update(1)', ' log_step += 1', ' ', ' # Save the model', ' self.save_model(args.dir_param)', '', '', 'def get_config_env(args, ag_in_env_goal):', ' """', ' :param args: Namespace object', ' :param ag_in_env_goal: If True, then achieved goal is in the same space as env goal', ' """', ' ', ' args.g_dim = 3', ' args.s_dim = 10', ' args.a_dim = 4', ' ', " # Specify the expert's latent skill dimension [Default]", " # Define number of skills, this could be different from agent's practiced skill dimension", " assert hasattr(args, 'num_objs')", ' args.c_dim = 3 * args.num_objs', ' ', ' if ag_in_env_goal:', ' args.ag_dim = args.g_dim # Achieved Goal in the same space as Env Goal', ' else:', ' args.ag_dim = 3 # Goal/Object position in the 3D space', ' print("args: ", args)', ' return args', '', '', 'def get_config(db=False):', ' # Construct the absolute path of the data directory', " data_dir = os.path.join(script_dir, 'pnp_data')", '', ' parser = argparse.ArgumentParser()', ' ', " parser.add_argument('--log_wandb', type=bool, default=False)", " parser.add_argument('--wandb_project', type=str, default='offlineILPnPOne',", " choices=['offlineILPnPOne', 'offlineILPnPOneExp', 'offlineILPnPTwoExp'])", ' ', " parser.add_argument('--expert_demos', type=int, default=25)", " parser.add_argument('--offline_demos', type=int, default=75)", " parser.add_argument('--eval_demos', type=int, default=1 if db else 10,", " help='Use 10 (num of demos to evaluate trained pol)')", " parser.add_argument('--test_demos', type=int, default=0, help='For Visualisation')", " parser.add_argument('--perc_train', type=int, default=1.0)", ' ', ' # Specify Environment Configuration', " parser.add_argument('--env_name', type=str, default='OpenAIPickandPlace')", " parser.add_argument('--num_objs', type=int, default=1)", " parser.add_argument('--horizon', type=int, default=100,", " help='Set 100 for one_obj, 150 for two_obj and 200 for three_obj')", " parser.add_argument('--stacking', type=bool, default=False)", " parser.add_argument('--expert_behaviour', type=str, default='0', choices=['0', '1'],", " help='Expert behaviour in two_object env')", " parser.add_argument('--full_space_as_goal', type=bool, default=False)", " parser.add_argument('--fix_goal', type=bool, default=False,", " help='[Debugging] Fix the goal position for one object task')", " parser.add_argument('--fix_object', type=bool, default=False,", " help='[Debugging] Fix the object position for one object task')", ' ', ' # Specify Data Collection Configuration', " parser.add_argument('--buffer_size', type=int, default=int(2e5),", " help='Number of transitions to store in buffer (max_time_steps)')", ' ', ' # Specify Training configuration', " parser.add_argument('--max_pretrain_time_steps', type=int, default=0 if not db else 0,", " help='No. of time steps to run pretraining - actor, director on expert data. Set to 0 to skip')", " parser.add_argument('--max_time_steps', type=int, default=10000 if not db else 1,", " help='No. of time steps to run. Recommended 5k for one_obj, 10k for two_obj')", " parser.add_argument('--batch_size', type=int, default=1,", " help='No. of trans to sample from buffer for each update')", " parser.add_argument('--trans_style', type=str, default='random_unsegmented',", " choices=['random_unsegmented', 'random_segmented'],", " help='How to sample transitions from expert buffer')", ' ', ' # Viterbi configuration', " parser.add_argument('--skill_supervision', type=str, default='none',", " choices=['full', 'semi:0.10', 'semi:0.25', 'none'],", " help='Type of supervision for latent skills. '", " 'full: Use ground truth skills for offline data.'", " 'semi:x: Use Viterbi to update latent skills for offline data.'", " 'none: Use Viterbi to update latent skills for expert and offline data.')", " parser.add_argument('--num_skills', type=int, default=None,", " help='Number of skills to use for agent, if provided, will override expert skill set. '", ' \'Use when skill supervision is "none"\')', " parser.add_argument('--wrap_level', type=str, default='1', choices=['0', '1', '2'],", " help='consumed by multi-object expert to determine how to wrap effective skills of expert')", ' ', ' # Polyak', " parser.add_argument('--update_target_interval', type=int, default=20,", " help='Number of time steps after which target networks will be updated using polyak averaging')", " parser.add_argument('--actor_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for actor.')", " parser.add_argument('--director_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for director.')", " parser.add_argument('--critic_polyak', type=float, default=0.95,", " help='Polyak averaging coefficient for critic.')", ' ', ' # Evaluation', " parser.add_argument('--eval_interval', type=int, default=100)", " parser.add_argument('--visualise_test', type=bool, default=False, help='Visualise test episodes?')", ' ', ' # Parameters', " parser.add_argument('--discount', type=float, default=0.99, help='Discount used for returns.')", " parser.add_argument('--replay_regularization', type=float, default=0.05,", " help='Replay Regularization Coefficient. Used by both ValueDICE (0.1) and DemoDICE (0.05)')", " parser.add_argument('--nu_grad_penalty_coeff', type=float, default=1e-4,", " help='Nu Net Gradient Penalty Coefficient. ValueDICE uses 10.0, DemoDICE uses 1e-4')", " parser.add_argument('--cost_grad_penalty_coeff', type=float, default=10,", " help='Cost Net Gradient Penalty Coefficient')", " parser.add_argument('--actor_lr', type=float, default=3e-3)", " parser.add_argument('--critic_lr', type=float, default=3e-4)", " parser.add_argument('--disc_lr', type=float, default=3e-4)", " parser.add_argument('--clip_obs', type=float, default=200.0,", " help='Un-normalised i.e. raw Observed Values (State and Goals) are clipped to this value')", ' ', ' # Specify Path Configurations', " parser.add_argument('--dir_data', type=str, default=data_dir)", " parser.add_argument('--dir_root_log', type=str, default=log_dir)", " parser.add_argument('--dir_summary', type=str, default=os.path.join(log_dir, 'summary'))", " parser.add_argument('--dir_plot', type=str, default=os.path.join(log_dir, 'plots'))", " parser.add_argument('--dir_param', type=str, default=os.path.join(log_dir, 'models'))", " parser.add_argument('--dir_post', type=str, default='./finetuned_models',", " help='Provide the <path_to_models>')", " parser.add_argument('--dir_pre', type=str, default='./pretrained_models',", " help='Provide the <path_to_models>')", ' ', ' args = parser.parse_args()', ' ', ' # Load the environment config', ' args = get_config_env(args, ag_in_env_goal=True)', ' ', ' # Other Configurations', ' args.train_demos = int(args.expert_demos * args.perc_train)', ' args.val_demos = args.expert_demos - args.train_demos', ' ', ' # Set number of skills [For unsupervised skill learning]', " if args.num_skills is not None and args.skill_supervision == 'none':", " print('Overriding c_dim with specified %d skills' % args.num_skills)", ' args.c_dim = args.num_skills', ' ', ' # Set number of skills [For full or semi-supervised skill learning]', " if args.env_name == 'OpenAIPickandPlace' and args.wrap_level != '0' and args.skill_supervision != 'none':", " print('Overriding c_dim based on Wrap Level %s' % args.wrap_level)", " if args.wrap_level == '1':", ' args.c_dim = 3', " elif args.wrap_level == '2':", ' args.c_dim = args.num_objs', ' else:', " raise NotImplementedError('Wrap level %s not implemented' % args.wrap_level)", ' ', ' return args', '', '', 'def run(db: bool, algo: str):', ' ', ' if db:', ' print("Running in Debug Mode. (db=True)")', ' ', ' tf.config.run_functions_eagerly(db)', ' ', ' logger.info("# ################# Working on Model: \\"{}\\" ################# #".format(algo))', ' ', ' args = get_config(db=db)', ' args.algo = algo', ' args.log_dir = log_dir', ' ', ' logger.info("---------------------------------------------------------------------------------------------")', ' config: dict = vars(args)', ' config = {key: str(value) for key, value in config.items()}', ' config = OrderedDict(sorted(config.items()))', ' logger.info(json.dumps(config, indent=4))', ' ', ' # Clear tensorflow graph and cache', ' tf.keras.backend.clear_session()', ' tf.compat.v1.reset_default_graph()', ' ', ' # ######################################################################################################## #', ' # ############################################# DATA LOADING ############################################# #', ' # ######################################################################################################## #', ' # Load Buffer to store expert data', ' n_objs = args.num_objs', ' buffer_shape: Dict[str, Tuple[int, ...]] = get_buffer_shape(args)', ' ', ' expert_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim),', ' )', ' offline_buffer = ReplayBufferTf(', ' buffer_shape, args.buffer_size, args.horizon,', ' sample_transitions(args.trans_style, state_to_goal=state_to_goal(n_objs), num_options=args.c_dim)', ' )', ' if n_objs == 3:', " expert_data_file = 'three_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'three_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 2:', " expert_data_file = 'two_obj_{}_train.pkl'.format(args.expert_behaviour)", " offline_data_file = 'two_obj_{}_offline.pkl'.format(args.expert_behaviour)", ' elif n_objs == 1:', " expert_data_file = 'single_obj_train.pkl'", " offline_data_file = 'single_obj_offline.pkl'", ' else:', ' raise NotImplementedError', ' expert_data_path = os.path.join(args.dir_data, expert_data_file)', ' offline_data_path = os.path.join(args.dir_data, offline_data_file)', ' ', ' if not os.path.exists(expert_data_path):', ' logger.error(', ' "Expert data not found at {}. Please run the data generation script first.".format(expert_data_path))', ' sys.exit(-1)', ' ', ' if not os.path.exists(offline_data_path):', ' logger.error(', ' "Offline data not found at {}. Please run the data generation script first.".format(offline_data_path))', ' sys.exit(-1)', ' ', ' # Store the expert data in the expert buffer -> D_E', ' logger.info("Loading Expert Demos from {} into Expert Buffer for training.".format(expert_data_path))', " with open(expert_data_path, 'rb') as handle:", ' buffered_data = pickle.load(handle)', ' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' expert_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.expert_demos)', ' ', ' # Store the offline data in the policy buffer for DemoDICE -> D_O', ' logger.info("Loading Offline Demos from {} into Offline Buffer for training.".format(offline_data_path))', " with open(offline_data_path, 'rb') as handle:"]
[' buffered_data = pickle.load(handle)']
[' ', ' # [Optional] Reformat the G.T. skill sequences', " curr_skills = repurpose_skill_seq(args, buffered_data['curr_skills'])", " prev_skills = repurpose_skill_seq(args, buffered_data['prev_skills'])", " buffered_data['curr_skills'] = curr_skills", " buffered_data['prev_skills'] = prev_skills", ' # Add a new key "has_gt_skill" indicating that the skill is G.T.', " buffered_data['has_gt_skill'] = tf.ones_like(buffered_data['successes'], dtype=tf.float32)", ' offline_buffer.load_data_into_buffer(buffered_data=buffered_data, num_demos_to_load=args.offline_demos)', ' # ########################################################################################################### #', ' # ############################################# TRAINING #################################################### #', ' # ########################################################################################################### #', ' start = time.time()', ' ', ' agent = Agent(args, expert_buffer, offline_buffer)', ' ', ' logger.info("Training .......")', ' agent.learn()', '', '', 'if __name__ == "__main__":', ' num_runs = 1', ' for i in range(num_runs):', " run(db=True, algo='BC')"]
[]
Library 'pickle' used at line 962 is imported at line 12 and has a Long-Range dependency.
{}
{'Library Long-Range': 1}
infilling_python
simplex_method
4
4
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):']
[' return sum(x*y for x,y in zip(a,b))']
['', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Generator_Expressions', 'usage_line': 4}]
Variable 'x' used at line 4 is part of a Generator_Expressions defined at line 4 and has a Short-Range dependency. Variable 'y' used at line 4 is part of a Generator_Expressions defined at line 4 and has a Short-Range dependency. Variable 'a' used at line 4 is defined at line 3 and has a Short-Range dependency. Variable 'b' used at line 4 is defined at line 3 and has a Short-Range dependency.
{'Generator_Expressions': 1}
{'Variable Generator_Expressions Short-Range': 2, 'Variable Short-Range': 2}
infilling_python
simplex_method
8
8
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):']
[' return [row[j] for row in A]']
['', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 8}]
Variable 'row' used at line 8 is part of a List_Comprehension defined at line 8 and has a Short-Range dependency. Variable 'A' used at line 8 is defined at line 7 and has a Short-Range dependency. Variable 'j' used at line 8 is defined at line 7 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 2}
infilling_python
simplex_method
12
12
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):']
[' return [column(A, j) for j in range(len(A[0]))]']
['', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 12}]
Variable 'j' used at line 12 is part of a List_Comprehension defined at line 12 and has a Short-Range dependency. Variable 'A' used at line 12 is defined at line 11 and has a Short-Range dependency. Function 'column' used at line 12 is defined at line 7 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 1, 'Function Short-Range': 1}
infilling_python
simplex_method
16
16
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):']
[' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1']
['', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 16}]
Variable 'c' used at line 16 is part of a List_Comprehension defined at line 16 and has a Short-Range dependency. Variable 'col' used at line 16 is defined at line 15 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 1}
infilling_python
simplex_method
20
21
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):']
[' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]']
['', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 20}]
Variable 'i' used at line 20 is part of a List_Comprehension defined at line 20 and has a Short-Range dependency. Variable 'x' used at line 20 is part of a List_Comprehension defined at line 20 and has a Short-Range dependency. Variable 'column' used at line 20 is defined at line 19 and has a Short-Range dependency. Variable 'tableau' used at line 21 is defined at line 19 and has a Short-Range dependency. Variable 'pivotRow' used at line 21 is defined at line 20 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 2, 'Variable Short-Range': 3}
infilling_python
simplex_method
25
26
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):']
[' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])']
['', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Generator_Expressions', 'usage_line': 26}]
Variable 'tableau' used at line 25 is defined at line 24 and has a Short-Range dependency. Variable 'x' used at line 26 is part of a Generator_Expressions defined at line 26 and has a Short-Range dependency. Variable 'lastRow' used at line 26 is defined at line 25 and has a Short-Range dependency.
{'Generator_Expressions': 1}
{'Variable Short-Range': 2, 'Variable Generator_Expressions Short-Range': 1}
infilling_python
simplex_method
32
33
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False']
[' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y']
['', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Lambda_Expressions', 'usage_line': 32}]
Library 'heapq' used at line 32 is imported at line 1 and has a Long-Range dependency. Variable 'L' used at line 32 is defined at line 29 and has a Short-Range dependency. Variable 'x' used at line 32 is part of a Lambda_Expressions defined at line 32 and has a Short-Range dependency. Variable 'x' used at line 33 is defined at line 32 and has a Short-Range dependency. Variable 'y' used at line 33 is defined at line 32 and has a Short-Range dependency.
{'Lambda_Expressions': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 3, 'Variable Lambda_Expressions Short-Range': 1}
infilling_python
simplex_method
37
37
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):']
[' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ']
['', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 37}]
Variable 'i' used at line 37 is part of a List_Comprehension defined at line 37 and has a Short-Range dependency. Variable 'rowStart' used at line 37 is defined at line 36 and has a Short-Range dependency. Variable 'numRows' used at line 37 is defined at line 36 and has a Short-Range dependency. Variable 'j' used at line 37 is part of a List_Comprehension defined at line 37 and has a Short-Range dependency. Variable 'numCols' used at line 37 is defined at line 36 and has a Short-Range dependency. Variable 'val' used at line 37 is defined at line 36 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 2, 'Variable Short-Range': 4}
infilling_python
simplex_method
51
52
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:']
[' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)']
[' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 51}, {'reason_category': 'If Body', 'usage_line': 52}]
Variable 'newVars' used at line 51 is defined at line 41 and has a Short-Range dependency. Variable 'ltThreshold' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'numRows' used at line 52 is defined at line 42 and has a Short-Range dependency. Variable 'ltThreshold' used at line 52 is defined at line 39 and has a Medium-Range dependency.
{'If Body': 2}
{'Variable Short-Range': 2, 'Variable Medium-Range': 2}
infilling_python
simplex_method
56
56
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:']
[' numRows += len(eqThreshold)']
['', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 56}]
Variable 'numRows' used at line 56 is defined at line 42 and has a Medium-Range dependency. Variable 'eqThreshold' used at line 56 is defined at line 39 and has a Medium-Range dependency.
{'If Body': 1}
{'Variable Medium-Range': 2}
infilling_python
simplex_method
60
60
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:']
[' cost = [-x for x in cost]']
['', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'If Body', 'usage_line': 60}, {'reason_category': 'List_Comprehension', 'usage_line': 60}]
Variable 'x' used at line 60 is part of a List_Comprehension defined at line 60 and has a Short-Range dependency. Variable 'cost' used at line 60 is defined at line 39 and has a Medium-Range dependency.
{'If Body': 1, 'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Medium-Range': 1}
infilling_python
simplex_method
67
67
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables']
[' newCost = list(cost) + [0] * newVars']
[' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[]
Variable 'cost' used at line 67 is defined at line 39 and has a Medium-Range dependency. Variable 'newVars' used at line 67 is defined at line 41 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 2}
infilling_python
simplex_method
78
78
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables']
[' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]']
[' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 78}, {'reason_category': 'List_Comprehension', 'usage_line': 78}]
Variable 'constraints' used at line 78 is defined at line 68 and has a Short-Range dependency. Variable 'c' used at line 78 is part of a List_Comprehension defined at line 78 and has a Short-Range dependency. Variable 'r' used at line 78 is part of a List_Comprehension defined at line 78 and has a Short-Range dependency. Variable 'constraintList' used at line 78 is part of a Loop defined at line 76 and has a Short-Range dependency. Function 'identity' used at line 78 is defined at line 36 and has a Long-Range dependency. Variable 'numRows' used at line 78 is defined at line 42 and has a Long-Range dependency. Variable 'newVars' used at line 78 is defined at line 41 and has a Long-Range dependency. Variable 'coefficient' used at line 78 is part of a Loop defined at line 76 and has a Short-Range dependency. Variable 'offset' used at line 78 is defined at line 73 and has a Short-Range dependency.
{'Loop Body': 1, 'List_Comprehension': 1}
{'Variable Short-Range': 2, 'Variable List_Comprehension Short-Range': 2, 'Variable Loop Short-Range': 2, 'Function Long-Range': 1, 'Variable Long-Range': 2}
infilling_python
simplex_method
80
80
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint']
[' threshold += oldThreshold']
[' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 80}]
Variable 'threshold' used at line 80 is defined at line 69 and has a Medium-Range dependency. Variable 'oldThreshold' used at line 80 is part of a Loop defined at line 76 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Medium-Range': 1, 'Variable Loop Short-Range': 1}
infilling_python
simplex_method
82
82
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints']
[' offset += len(oldThreshold)']
['', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 82}]
Variable 'offset' used at line 82 is defined at line 73 and has a Short-Range dependency. Variable 'oldThreshold' used at line 82 is part of a Loop defined at line 76 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 1}
infilling_python
simplex_method
96
97
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables']
[' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])']
[' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 96}, {'reason_category': 'List_Comprehension', 'usage_line': 97}]
Variable 'row' used at line 96 is part of a List_Comprehension defined at line 96 and has a Short-Range dependency. Variable 'x' used at line 96 is part of a List_Comprehension defined at line 96 and has a Short-Range dependency. Variable 'A' used at line 96 is defined at line 94 and has a Short-Range dependency. Variable 'b' used at line 96 is defined at line 94 and has a Short-Range dependency. Variable 'tableau' used at line 97 is defined at line 96 and has a Short-Range dependency. Variable 'ci' used at line 97 is part of a List_Comprehension defined at line 97 and has a Short-Range dependency. Variable 'c' used at line 97 is defined at line 94 and has a Short-Range dependency.
{'List_Comprehension': 2}
{'Variable List_Comprehension Short-Range': 3, 'Variable Short-Range': 4}
infilling_python
simplex_method
106
107
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)']
[' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]']
['', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 106}, {'reason_category': 'List_Comprehension', 'usage_line': 106}, {'reason_category': 'Loop Body', 'usage_line': 107}, {'reason_category': 'Lambda_Expressions', 'usage_line': 107}]
Variable 'i' used at line 106 is part of a List_Comprehension defined at line 106 and has a Short-Range dependency. Variable 'x' used at line 106 is part of a List_Comprehension defined at line 106 and has a Short-Range dependency. Variable 'tableau' used at line 106 is defined at line 97 and has a Short-Range dependency. Variable 'column_choices' used at line 107 is defined at line 106 and has a Short-Range dependency. Variable 'a' used at line 107 is part of a Lambda_Expressions defined at line 107 and has a Short-Range dependency.
{'Loop Body': 2, 'List_Comprehension': 1, 'Lambda_Expressions': 1}
{'Variable List_Comprehension Short-Range': 2, 'Variable Short-Range': 2, 'Variable Lambda_Expressions Short-Range': 1}
infilling_python
simplex_method
110
111
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness']
[' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')"]
['', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 110}, {'reason_category': 'If Condition', 'usage_line': 110}, {'reason_category': 'Generator_Expressions', 'usage_line': 110}, {'reason_category': 'Loop Body', 'usage_line': 111}, {'reason_category': 'If Body', 'usage_line': 111}]
Variable 'row' used at line 110 is part of a Generator_Expressions defined at line 110 and has a Short-Range dependency. Variable 'tableau' used at line 110 is defined at line 97 and has a Medium-Range dependency. Variable 'column' used at line 110 is defined at line 107 and has a Short-Range dependency.
{'Loop Body': 2, 'If Condition': 1, 'Generator_Expressions': 1, 'If Body': 1}
{'Variable Generator_Expressions Short-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
simplex_method
114
114
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient']
[' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]']
['', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 114}, {'reason_category': 'List_Comprehension', 'usage_line': 114}]
Variable 'i' used at line 114 is part of a List_Comprehension defined at line 114 and has a Short-Range dependency. Variable 'r' used at line 114 is part of a List_Comprehension defined at line 114 and has a Short-Range dependency. Variable 'tableau' used at line 114 is defined at line 97 and has a Medium-Range dependency. Variable 'column' used at line 114 is defined at line 107 and has a Short-Range dependency.
{'Loop Body': 1, 'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 2, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
simplex_method
120
120
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)']
[' row = min(quotients, key=lambda x: x[1])[0]']
['', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 120}, {'reason_category': 'Lambda_Expressions', 'usage_line': 120}]
Variable 'quotients' used at line 120 is defined at line 114 and has a Short-Range dependency. Variable 'x' used at line 120 is part of a Lambda_Expressions defined at line 120 and has a Short-Range dependency.
{'Loop Body': 1, 'Lambda_Expressions': 1}
{'Variable Short-Range': 1, 'Variable Lambda_Expressions Short-Range': 1}
infilling_python
simplex_method
123
123
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column']
[' pivot = row, column']
['', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 123}]
Variable 'row' used at line 123 is defined at line 120 and has a Short-Range dependency. Variable 'column' used at line 123 is defined at line 107 and has a Medium-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Medium-Range': 1}
infilling_python
simplex_method
127
127
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot']
[' pivotDenom = tableau[i][j]']
['', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 127}]
Variable 'tableau' used at line 127 is defined at line 97 and has a Medium-Range dependency. Variable 'i' used at line 127 is defined at line 126 and has a Short-Range dependency. Variable 'j' used at line 127 is defined at line 126 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Medium-Range': 1, 'Variable Short-Range': 2}
infilling_python
simplex_method
130
130
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row']
[' tableau[i] = [x / pivotDenom for x in tableau[i]]']
['', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 130}, {'reason_category': 'List_Comprehension', 'usage_line': 130}]
Variable 'tableau' used at line 130 is defined at line 97 and has a Long-Range dependency. Variable 'i' used at line 130 is defined at line 126 and has a Short-Range dependency. Variable 'x' used at line 130 is part of a List_Comprehension defined at line 130 and has a Short-Range dependency. Variable 'pivotDenom' used at line 130 is defined at line 127 and has a Short-Range dependency.
{'Loop Body': 1, 'List_Comprehension': 1}
{'Variable Long-Range': 1, 'Variable Short-Range': 2, 'Variable List_Comprehension Short-Range': 1}
infilling_python
simplex_method
134
136
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):']
[' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]']
[' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'Loop Body', 'usage_line': 134}, {'reason_category': 'If Condition', 'usage_line': 134}, {'reason_category': 'Loop Body', 'usage_line': 135}, {'reason_category': 'List_Comprehension', 'usage_line': 135}, {'reason_category': 'If Body', 'usage_line': 135}, {'reason_category': 'Loop Body', 'usage_line': 136}, {'reason_category': 'List_Comprehension', 'usage_line': 136}, {'reason_category': 'If Body', 'usage_line': 136}]
Variable 'k' used at line 134 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'i' used at line 134 is defined at line 126 and has a Short-Range dependency. Variable 'y' used at line 135 is part of a List_Comprehension defined at line 135 and has a Short-Range dependency. Variable 'tableau' used at line 135 is defined at line 130 and has a Short-Range dependency. Variable 'i' used at line 135 is defined at line 126 and has a Short-Range dependency. Variable 'k' used at line 135 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'j' used at line 135 is defined at line 126 and has a Short-Range dependency. Variable 'tableau' used at line 136 is defined at line 130 and has a Short-Range dependency. Variable 'k' used at line 136 is part of a Loop defined at line 133 and has a Short-Range dependency. Variable 'x' used at line 136 is part of a List_Comprehension defined at line 136 and has a Short-Range dependency. Variable 'y' used at line 136 is part of a List_Comprehension defined at line 136 and has a Short-Range dependency. Variable 'pivotRowMultiple' used at line 136 is defined at line 135 and has a Short-Range dependency.
{'Loop Body': 3, 'If Condition': 1, 'List_Comprehension': 2, 'If Body': 2}
{'Variable Loop Short-Range': 3, 'Variable Short-Range': 6, 'Variable List_Comprehension Short-Range': 3}
infilling_python
simplex_method
143
143
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns']
[' columns = transpose(tableau)']
['', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[]
Function 'transpose' used at line 143 is defined at line 11 and has a Long-Range dependency. Variable 'tableau' used at line 143 is defined at line 97 and has a Long-Range dependency.
{}
{'Function Long-Range': 1, 'Variable Long-Range': 1}
infilling_python
simplex_method
146
146
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.']
[' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]']
['', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 146}]
Variable 'j' used at line 146 is part of a List_Comprehension defined at line 146 and has a Short-Range dependency. Variable 'col' used at line 146 is part of a List_Comprehension defined at line 146 and has a Short-Range dependency. Variable 'columns' used at line 146 is defined at line 143 and has a Short-Range dependency. Function 'isPivotCol' used at line 146 is defined at line 15 and has a Long-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 2, 'Variable Short-Range': 1, 'Function Long-Range': 1}
infilling_python
simplex_method
149
149
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.']
[' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]']
[' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.', ' objective_value = -(tableau[-1][-1])', '', ' return tableau, primal_solution, objective_value']
[{'reason_category': 'List_Comprehension', 'usage_line': 149}]
Variable 'colIndex' used at line 149 is part of a List_Comprehension defined at line 149 and has a Short-Range dependency. Variable 'indices' used at line 149 is defined at line 146 and has a Short-Range dependency. Function 'variableValueForPivotColumn' used at line 149 is defined at line 19 and has a Long-Range dependency. Variable 'tableau' used at line 149 is defined at line 97 and has a Long-Range dependency. Variable 'columns' used at line 149 is defined at line 143 and has a Short-Range dependency.
{'List_Comprehension': 1}
{'Variable List_Comprehension Short-Range': 1, 'Variable Short-Range': 2, 'Function Long-Range': 1, 'Variable Long-Range': 1}
infilling_python
simplex_method
152
152
['import heapq', '# Calculate the dot product of two vectors', 'def dot(a,b):', ' return sum(x*y for x,y in zip(a,b))', '', '# Get a specific column from a 2D list (matrix)', 'def column(A, j):', ' return [row[j] for row in A]', '', '# Transpose a 2D list (matrix)', 'def transpose(A):', ' return [column(A, j) for j in range(len(A[0]))]', '', '# Check if a column is a pivot column', 'def isPivotCol(col):', ' return (len([c for c in col if c == 0]) == len(col) - 1) and sum(col) == 1', '', '# Find the value of a variable for a pivot column', 'def variableValueForPivotColumn(tableau, column):', ' pivotRow = [i for (i, x) in enumerate(column) if x == 1][0]', ' return tableau[pivotRow][-1]', '', '# Check if we can improve the current solution', 'def canImprove(tableau):', ' lastRow = tableau[-1]', ' return any(x > 0 for x in lastRow[:-1])', '', "# Check if there's more than one minimum in a list", 'def moreThanOneMin(L):', ' if len(L) <= 1:', ' return False', ' x,y = heapq.nsmallest(2, L, key=lambda x: x[1])', ' return x == y', '', '# Create an identity matrix with certain specifications', 'def identity(numRows, numCols, val=1, rowStart=0):', ' return [[(val if i == j else 0) for j in range(numCols)] for i in range(rowStart, numRows)] ', '', 'def standardForm(cost, greaterThans=[], gtThreshold=[], lessThans=[], ltThreshold=[], equalities=[], eqThreshold=[], maximization=True):', ' # Initialize variables for the count of new variables and the number of rows in the tableau', ' newVars = 0', ' numRows = 0', '', ' # Adjust the number of variables and rows based on the constraints', " # Add slack variables for 'greater than' inequalities", ' if gtThreshold != []:', ' newVars += len(gtThreshold)', ' numRows += len(gtThreshold)', '', ' if ltThreshold != []:', ' newVars += len(ltThreshold)', ' numRows += len(ltThreshold)', ' ', " # Equalities don't need slack variables but add to the number of rows", ' if eqThreshold != []:', ' numRows += len(eqThreshold)', '', ' # If the problem is a minimization, convert it to a maximization by negating the cost vector ', ' if not maximization:', ' cost = [-x for x in cost]', '', ' # If no new variables are needed, the problem is already in standard form', ' if newVars == 0:', ' return cost, equalities, eqThreshold', '', ' # Extend the cost function with zeros for the new slack variables', ' newCost = list(cost) + [0] * newVars', ' constraints = []', ' threshold = []', '', " # Prepare the constraints for each condition ('greater than', 'less than', 'equal to')", ' oldConstraints = [(greaterThans, gtThreshold, -1), (lessThans, ltThreshold, 1), (equalities, eqThreshold, 0)]', ' offset = 0', '', ' # Process each set of constraints', ' for constraintList, oldThreshold, coefficient in oldConstraints:', ' # Append the identity matrix multiplied by the coefficient for slack variables', ' constraints += [c + r for c, r in zip(constraintList, identity(numRows, newVars, coefficient, offset))]', ' # Append the thresholds for each constraint', ' threshold += oldThreshold', ' # Increase the offset for the identity matrix used for the next set of constraints', ' offset += len(oldThreshold)', '', ' return newCost, constraints, threshold', '', "'''", ' simplex: [float], [[float]], [float] -> [float], float', ' Solve the given standard-form linear program:', ' max <c,x>', ' s.t. Ax = b', ' x >= 0', ' providing the optimal solution x* and the value of the objective function', "'''", 'def simplex(c, A, b):', ' # assume the last m columns of A are the slack variables; the initial basis is the set of slack variables', ' tableau = [row[:] + [x] for row, x in zip(A, b)]', ' tableau.append([ci for ci in c] + [0])', ' print("Initial tableau:")', ' for row in tableau:', ' print(row)', ' print()', '', ' # Iterate until no improvements can be made', ' while canImprove(tableau):', ' # Choose entering variable (minimum positive index of the last row)', ' column_choices = [(i,x) for (i,x) in enumerate(tableau[-1][:-1]) if x > 0]', ' column = min(column_choices, key=lambda a: a[1])[0]', '', ' # Check for unboundedness', ' if all(row[column] <= 0 for row in tableau):', " raise Exception('Linear program is unbounded.')", '', ' # Check for degeneracy: more than one minimizer of the quotient', ' quotients = [(i, r[-1] / r[column]) for i,r in enumerate(tableau[:-1]) if r[column] > 0]', '', ' if moreThanOneMin(quotients):', " raise Exception('Linear program is degenerate.')", '', ' # Chosing leaving variable (row index minimizing the quotient)', ' row = min(quotients, key=lambda x: x[1])[0]', '', ' #pivots on the chosen row and column', ' pivot = row, column', '', ' print("Next pivot index is=%d,%d \\n" % pivot)', ' i,j = pivot', ' pivotDenom = tableau[i][j]', '', ' # Normalize the pivot row', ' tableau[i] = [x / pivotDenom for x in tableau[i]]', '', ' # Zero out the other entries in the pivot column', ' for k,row in enumerate(tableau):', ' if k != i:', ' pivotRowMultiple = [y * tableau[k][j] for y in tableau[i]]', ' tableau[k] = [x - y for x,y in zip(tableau[k], pivotRowMultiple)]', ' print("Tableau after pivot:")', ' for row in tableau:', ' print(row)', ' print()', ' ', ' # Transpose the tableau to make it easier to work with columns', ' columns = transpose(tableau)', '', ' # Identify pivot columns in the tableau. A column is a pivot column if it has a single 1 and the rest of its entries are 0.', ' indices = [j for j, col in enumerate(columns[:-1]) if isPivotCol(col)]', '', ' # Looking at the rightmost entry (the value part of the tableau row) of the row where the 1 in the pivot column is located.', ' primal_solution = [(colIndex, variableValueForPivotColumn(tableau, columns[colIndex])) for colIndex in indices]', ' ', ' # The last entry of the last row of the tableau gives us the negation of the objective function value.']
[' objective_value = -(tableau[-1][-1])']
['', ' return tableau, primal_solution, objective_value']
[]
Variable 'tableau' used at line 152 is defined at line 97 and has a Long-Range dependency.
{}
{'Variable Long-Range': 1}
completion_python
Image_Filtering
17
22
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),']
[' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel']
['', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 17 is imported at line 1 and has a Medium-Range dependency. Variable 'sigma_y' used at line 17 is defined at line 10 and has a Short-Range dependency. Variable 'size_y' used at line 17 is defined at line 14 and has a Short-Range dependency. Library 'np' used at line 19 is imported at line 1 and has a Medium-Range dependency. Variable 'x' used at line 19 is defined at line 16 and has a Short-Range dependency. Variable 'sigma_x' used at line 19 is defined at line 10 and has a Short-Range dependency. Variable 'y' used at line 19 is defined at line 16 and has a Short-Range dependency. Variable 'sigma_y' used at line 19 is defined at line 10 and has a Short-Range dependency. Variable 'kernel' used at line 20 is defined at line 19 and has a Short-Range dependency. Library 'np' used at line 20 is imported at line 1 and has a Medium-Range dependency. Variable 'sigma_x' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'sigma_y' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'kernel' used at line 21 is defined at line 20 and has a Short-Range dependency. Variable 'kernel' used at line 22 is defined at line 21 and has a Short-Range dependency.
{}
{'Library Medium-Range': 3, 'Variable Short-Range': 11}
completion_python
Image_Filtering
19
22
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ']
[' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel']
['', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 19 is imported at line 1 and has a Medium-Range dependency. Variable 'x' used at line 19 is defined at line 16 and has a Short-Range dependency. Variable 'sigma_x' used at line 19 is defined at line 10 and has a Short-Range dependency. Variable 'y' used at line 19 is defined at line 16 and has a Short-Range dependency. Variable 'sigma_y' used at line 19 is defined at line 10 and has a Short-Range dependency. Variable 'kernel' used at line 20 is defined at line 19 and has a Short-Range dependency. Library 'np' used at line 20 is imported at line 1 and has a Medium-Range dependency. Variable 'sigma_x' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'sigma_y' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'kernel' used at line 21 is defined at line 20 and has a Short-Range dependency. Variable 'kernel' used at line 22 is defined at line 21 and has a Short-Range dependency.
{}
{'Library Medium-Range': 2, 'Variable Short-Range': 9}
completion_python
Image_Filtering
20
22
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))']
[' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel']
['', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'kernel' used at line 20 is defined at line 19 and has a Short-Range dependency. Library 'np' used at line 20 is imported at line 1 and has a Medium-Range dependency. Variable 'sigma_x' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'sigma_y' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'kernel' used at line 21 is defined at line 20 and has a Short-Range dependency. Variable 'kernel' used at line 22 is defined at line 21 and has a Short-Range dependency.
{}
{'Variable Short-Range': 5, 'Library Medium-Range': 1}
completion_python
Image_Filtering
21
22
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y']
[' kernel /= kernel.sum()', ' return kernel']
['', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'kernel' used at line 21 is defined at line 20 and has a Short-Range dependency. Variable 'kernel' used at line 22 is defined at line 21 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
completion_python
Image_Filtering
16
22
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '']
[' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel']
['', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 16 is imported at line 1 and has a Medium-Range dependency. Variable 'sigma_x' used at line 16 is defined at line 10 and has a Short-Range dependency. Variable 'size_x' used at line 16 is defined at line 13 and has a Short-Range dependency. Library 'np' used at line 17 is imported at line 1 and has a Medium-Range dependency. Variable 'sigma_y' used at line 17 is defined at line 10 and has a Short-Range dependency. Variable 'size_y' used at line 17 is defined at line 14 and has a Short-Range dependency. Library 'np' used at line 19 is imported at line 1 and has a Medium-Range dependency. Variable 'x' used at line 19 is defined at line 16 and has a Short-Range dependency. Variable 'sigma_x' used at line 19 is defined at line 10 and has a Short-Range dependency. Variable 'y' used at line 19 is defined at line 16 and has a Short-Range dependency. Variable 'sigma_y' used at line 19 is defined at line 10 and has a Short-Range dependency. Variable 'kernel' used at line 20 is defined at line 19 and has a Short-Range dependency. Library 'np' used at line 20 is imported at line 1 and has a Medium-Range dependency. Variable 'sigma_x' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'sigma_y' used at line 20 is defined at line 10 and has a Short-Range dependency. Variable 'kernel' used at line 21 is defined at line 20 and has a Short-Range dependency. Variable 'kernel' used at line 22 is defined at line 21 and has a Short-Range dependency.
{}
{'Library Medium-Range': 4, 'Variable Short-Range': 13}
completion_python
Image_Filtering
31
35
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '']
[' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image']
['', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'left' used at line 31 is defined at line 28 and has a Short-Range dependency. Variable 'target_size' used at line 31 is defined at line 24 and has a Short-Range dependency. Variable 'top' used at line 32 is defined at line 29 and has a Short-Range dependency. Variable 'target_size' used at line 32 is defined at line 24 and has a Short-Range dependency. Variable 'image' used at line 34 is defined at line 25 and has a Short-Range dependency. Variable 'top' used at line 34 is defined at line 29 and has a Short-Range dependency. Variable 'bottom' used at line 34 is defined at line 32 and has a Short-Range dependency. Variable 'right' used at line 34 is defined at line 31 and has a Short-Range dependency. Variable 'cropped_image' used at line 35 is defined at line 34 and has a Short-Range dependency.
{}
{'Variable Short-Range': 9}
completion_python
Image_Filtering
25
35
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):']
[' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image']
['', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 25 is imported at line 1 and has a Medium-Range dependency. Variable 'image' used at line 25 is defined at line 24 and has a Short-Range dependency. Variable 'image' used at line 26 is defined at line 25 and has a Short-Range dependency. Variable 'w' used at line 28 is defined at line 26 and has a Short-Range dependency. Variable 'target_size' used at line 28 is defined at line 24 and has a Short-Range dependency. Variable 'h' used at line 29 is defined at line 26 and has a Short-Range dependency. Variable 'target_size' used at line 29 is defined at line 24 and has a Short-Range dependency. Variable 'left' used at line 31 is defined at line 28 and has a Short-Range dependency. Variable 'target_size' used at line 31 is defined at line 24 and has a Short-Range dependency. Variable 'top' used at line 32 is defined at line 29 and has a Short-Range dependency. Variable 'target_size' used at line 32 is defined at line 24 and has a Short-Range dependency. Variable 'image' used at line 34 is defined at line 25 and has a Short-Range dependency. Variable 'top' used at line 34 is defined at line 29 and has a Short-Range dependency. Variable 'bottom' used at line 34 is defined at line 32 and has a Short-Range dependency. Variable 'right' used at line 34 is defined at line 31 and has a Short-Range dependency. Variable 'cropped_image' used at line 35 is defined at line 34 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 15}
completion_python
Image_Filtering
61
61
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)']
['blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)']
['', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'cv2' used at line 61 is imported at line 2 and has a Long-Range dependency. Variable 'img_b_gray' used at line 61 is defined at line 45 and has a Medium-Range dependency. Variable 'gaussian_kernel_b' used at line 61 is defined at line 58 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 1}
completion_python
Image_Filtering
76
76
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:']
[' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)']
[' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'If Body', 'usage_line': 76}]
Library 'np' used at line 76 is imported at line 1 and has a Long-Range dependency. Variable 'new_height' used at line 76 is defined at line 72 and has a Short-Range dependency. Variable 'new_width' used at line 76 is defined at line 73 and has a Short-Range dependency. Variable 'image' used at line 76 is defined at line 67 and has a Short-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
completion_python
Image_Filtering
78
78
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:']
[' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)']
['', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Else Reasoning', 'usage_line': 78}]
Library 'np' used at line 78 is imported at line 1 and has a Long-Range dependency. Variable 'new_height' used at line 78 is defined at line 72 and has a Short-Range dependency. Variable 'new_width' used at line 78 is defined at line 73 and has a Short-Range dependency.
{'Else Reasoning': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
Image_Filtering
82
82
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):']
[' downsampled_image[i, j] = image[i * factor, j * factor]']
[' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 82}]
Variable 'downsampled_image' used at line 82 is defined at line 78 and has a Short-Range dependency. Variable 'i' used at line 82 is part of a Loop defined at line 80 and has a Short-Range dependency. Variable 'j' used at line 82 is part of a Loop defined at line 81 and has a Short-Range dependency. Variable 'image' used at line 82 is defined at line 67 and has a Medium-Range dependency. Variable 'factor' used at line 82 is defined at line 67 and has a Medium-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 1, 'Variable Loop Short-Range': 2, 'Variable Medium-Range': 2}
completion_python
Image_Filtering
80
83
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '']
[' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image']
['', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 80}, {'reason_category': 'Define Stop Criteria', 'usage_line': 81}, {'reason_category': 'Loop Body', 'usage_line': 82}]
Variable 'new_height' used at line 80 is defined at line 72 and has a Short-Range dependency. Variable 'new_width' used at line 81 is defined at line 73 and has a Short-Range dependency. Variable 'downsampled_image' used at line 82 is defined at line 78 and has a Short-Range dependency. Variable 'i' used at line 82 is part of a Loop defined at line 80 and has a Short-Range dependency. Variable 'j' used at line 82 is part of a Loop defined at line 81 and has a Short-Range dependency. Variable 'image' used at line 82 is defined at line 67 and has a Medium-Range dependency. Variable 'factor' used at line 82 is defined at line 67 and has a Medium-Range dependency. Variable 'downsampled_image' used at line 83 is defined at line 78 and has a Short-Range dependency.
{'Define Stop Criteria': 2, 'Loop Body': 1}
{'Variable Short-Range': 4, 'Variable Loop Short-Range': 2, 'Variable Medium-Range': 2}
completion_python
Image_Filtering
94
96
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):']
[' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum']
['', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 94 is imported at line 1 and has a Long-Range dependency. Variable 'image' used at line 94 is defined at line 93 and has a Short-Range dependency. Library 'np' used at line 95 is imported at line 1 and has a Long-Range dependency. Variable 'spectrum' used at line 95 is defined at line 94 and has a Short-Range dependency. Variable 'log_spectrum' used at line 96 is defined at line 95 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 3}
completion_python
Image_Filtering
95
96
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))']
[' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum']
['', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 95 is imported at line 1 and has a Long-Range dependency. Variable 'spectrum' used at line 95 is defined at line 94 and has a Short-Range dependency. Variable 'log_spectrum' used at line 96 is defined at line 95 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
completion_python
Image_Filtering
119
120
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):']
[' G = cv2.pyrDown(G)', ' gpA.append(G)']
['', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 119}, {'reason_category': 'Loop Body', 'usage_line': 120}]
Library 'cv2' used at line 119 is imported at line 2 and has a Long-Range dependency. Variable 'G' used at line 119 is defined at line 116 and has a Short-Range dependency. Variable 'gpA' used at line 120 is defined at line 117 and has a Short-Range dependency. Variable 'G' used at line 120 is defined at line 119 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
completion_python
Image_Filtering
126
127
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):']
[' G = cv2.pyrDown(G)', ' gpB.append(G)']
['', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 126}, {'reason_category': 'Loop Body', 'usage_line': 127}]
Library 'cv2' used at line 126 is imported at line 2 and has a Long-Range dependency. Variable 'G' used at line 126 is defined at line 123 and has a Short-Range dependency. Variable 'gpB' used at line 127 is defined at line 124 and has a Short-Range dependency. Variable 'G' used at line 127 is defined at line 126 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
completion_python
Image_Filtering
132
134
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):']
[' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)']
['', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 132}, {'reason_category': 'Loop Body', 'usage_line': 133}, {'reason_category': 'Loop Body', 'usage_line': 134}]
Library 'cv2' used at line 132 is imported at line 2 and has a Long-Range dependency. Variable 'gpA' used at line 132 is defined at line 117 and has a Medium-Range dependency. Variable 'i' used at line 132 is part of a Loop defined at line 131 and has a Short-Range dependency. Library 'cv2' used at line 133 is imported at line 2 and has a Long-Range dependency. Variable 'gpA' used at line 133 is defined at line 117 and has a Medium-Range dependency. Variable 'i' used at line 133 is part of a Loop defined at line 131 and has a Short-Range dependency. Variable 'GE' used at line 133 is defined at line 132 and has a Short-Range dependency. Variable 'lpA' used at line 134 is defined at line 130 and has a Short-Range dependency. Variable 'L' used at line 134 is defined at line 133 and has a Short-Range dependency.
{'Loop Body': 3}
{'Library Long-Range': 2, 'Variable Medium-Range': 2, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 3}
completion_python
Image_Filtering
138
140
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):']
[' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)']
['', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 138}, {'reason_category': 'Loop Body', 'usage_line': 139}, {'reason_category': 'Loop Body', 'usage_line': 140}]
Library 'cv2' used at line 138 is imported at line 2 and has a Long-Range dependency. Variable 'gpB' used at line 138 is defined at line 124 and has a Medium-Range dependency. Variable 'i' used at line 138 is part of a Loop defined at line 137 and has a Short-Range dependency. Library 'cv2' used at line 139 is imported at line 2 and has a Long-Range dependency. Variable 'gpB' used at line 139 is defined at line 124 and has a Medium-Range dependency. Variable 'i' used at line 139 is part of a Loop defined at line 137 and has a Short-Range dependency. Variable 'GE' used at line 139 is defined at line 138 and has a Short-Range dependency. Variable 'lpB' used at line 140 is defined at line 136 and has a Short-Range dependency. Variable 'L' used at line 140 is defined at line 139 and has a Short-Range dependency.
{'Loop Body': 3}
{'Library Long-Range': 2, 'Variable Medium-Range': 2, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 3}
completion_python
Image_Filtering
145
147
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):']
[' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)']
['', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 145}, {'reason_category': 'Loop Body', 'usage_line': 146}, {'reason_category': 'Loop Body', 'usage_line': 147}]
Variable 'la' used at line 145 is part of a Loop defined at line 144 and has a Short-Range dependency. Library 'np' used at line 146 is imported at line 1 and has a Long-Range dependency. Variable 'la' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'cols' used at line 146 is defined at line 145 and has a Short-Range dependency. Variable 'lb' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'LS' used at line 147 is defined at line 143 and has a Short-Range dependency. Variable 'ls' used at line 147 is defined at line 146 and has a Short-Range dependency.
{'Loop Body': 3}
{'Variable Loop Short-Range': 3, 'Library Long-Range': 1, 'Variable Short-Range': 3}
completion_python
Image_Filtering
172
172
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):']
[' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0']
[' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 172}]
Variable 'mask' used at line 172 is defined at line 169 and has a Short-Range dependency. Variable 'i' used at line 172 is part of a Loop defined at line 171 and has a Short-Range dependency. Variable 'strip_width' used at line 172 is defined at line 168 and has a Short-Range dependency. Variable 'width' used at line 172 is defined at line 170 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 3, 'Variable Loop Short-Range': 1}
completion_python
Image_Filtering
169
173
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):']
[' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask']
['', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 171}, {'reason_category': 'Loop Body', 'usage_line': 172}]
Library 'np' used at line 169 is imported at line 1 and has a Long-Range dependency. Variable 'shape' used at line 169 is defined at line 168 and has a Short-Range dependency. Variable 'mask' used at line 170 is defined at line 169 and has a Short-Range dependency. Variable 'height' used at line 171 is defined at line 170 and has a Short-Range dependency. Variable 'width' used at line 171 is defined at line 170 and has a Short-Range dependency. Variable 'mask' used at line 172 is defined at line 169 and has a Short-Range dependency. Variable 'i' used at line 172 is part of a Loop defined at line 171 and has a Short-Range dependency. Variable 'strip_width' used at line 172 is defined at line 168 and has a Short-Range dependency. Variable 'width' used at line 172 is defined at line 170 and has a Short-Range dependency. Variable 'mask' used at line 173 is defined at line 169 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 8, 'Variable Loop Short-Range': 1}
completion_python
Image_Filtering
181
182
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):']
[' M = cv2.pyrDown(M)', ' gpmask.append(M)']
['gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 181}, {'reason_category': 'Loop Body', 'usage_line': 182}]
Library 'cv2' used at line 181 is imported at line 2 and has a Long-Range dependency. Variable 'M' used at line 181 is defined at line 178 and has a Short-Range dependency. Variable 'gpmask' used at line 182 is defined at line 179 and has a Short-Range dependency. Variable 'M' used at line 182 is defined at line 181 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
completion_python
Image_Filtering
185
187
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):']
[' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)']
['', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 185}, {'reason_category': 'Loop Body', 'usage_line': 186}, {'reason_category': 'Loop Body', 'usage_line': 187}]
Variable 'lpA' used at line 185 is defined at line 130 and has a Long-Range dependency. Variable 'i' used at line 185 is part of a Loop defined at line 184 and has a Short-Range dependency. Variable 'lpA' used at line 186 is defined at line 130 and has a Long-Range dependency. Variable 'i' used at line 186 is part of a Loop defined at line 184 and has a Short-Range dependency. Variable 'gpmask' used at line 186 is defined at line 183 and has a Short-Range dependency. Variable 'lpB' used at line 186 is defined at line 136 and has a Long-Range dependency. Variable 'LS' used at line 187 is defined at line 176 and has a Medium-Range dependency. Variable 'ls' used at line 187 is defined at line 186 and has a Short-Range dependency.
{'Loop Body': 3}
{'Variable Long-Range': 3, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 2, 'Variable Medium-Range': 1}
completion_python
Image_Filtering
192
194
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):']
[' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])']
['', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 192}, {'reason_category': 'Loop Body', 'usage_line': 193}, {'reason_category': 'Loop Body', 'usage_line': 194}]
Library 'cv2' used at line 192 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 192 is defined at line 190 and has a Short-Range dependency. Library 'cv2' used at line 193 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 193 is defined at line 192 and has a Short-Range dependency. Variable 'LS' used at line 193 is defined at line 176 and has a Medium-Range dependency. Variable 'i' used at line 193 is part of a Loop defined at line 191 and has a Short-Range dependency. Library 'cv2' used at line 194 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'LS' used at line 194 is defined at line 176 and has a Medium-Range dependency. Variable 'i' used at line 194 is part of a Loop defined at line 191 and has a Short-Range dependency.
{'Loop Body': 3}
{'Library Long-Range': 3, 'Variable Short-Range': 3, 'Variable Medium-Range': 2, 'Variable Loop Short-Range': 2}
completion_python
Image_Filtering
220
221
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '']
[' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)']
['', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 220}, {'reason_category': 'Loop Body', 'usage_line': 221}]
Library 'cv2' used at line 220 is imported at line 2 and has a Long-Range dependency. Variable 'frame' used at line 220 is defined at line 214 and has a Short-Range dependency. Variable 'frames' used at line 221 is defined at line 212 and has a Short-Range dependency. Variable 'frame' used at line 221 is defined at line 220 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
completion_python
Image_Filtering
231
236
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):']
[' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal']
['', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'fs' used at line 231 is defined at line 230 and has a Short-Range dependency. Variable 'low_cutoff' used at line 232 is defined at line 230 and has a Short-Range dependency. Variable 'nyquist' used at line 232 is defined at line 231 and has a Short-Range dependency. Variable 'high_cutoff' used at line 233 is defined at line 230 and has a Short-Range dependency. Variable 'nyquist' used at line 233 is defined at line 231 and has a Short-Range dependency. Library 'butter' used at line 234 is imported at line 5 and has a Long-Range dependency. Variable 'order' used at line 234 is defined at line 230 and has a Short-Range dependency. Variable 'low' used at line 234 is defined at line 232 and has a Short-Range dependency. Variable 'high' used at line 234 is defined at line 233 and has a Short-Range dependency. Library 'filtfilt' used at line 235 is imported at line 5 and has a Long-Range dependency. Variable 'b' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'a' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'signal' used at line 235 is defined at line 230 and has a Short-Range dependency. Variable 'filtered_signal' used at line 236 is defined at line 235 and has a Short-Range dependency.
{}
{'Variable Short-Range': 12, 'Library Long-Range': 2}
completion_python
Image_Filtering
235
236
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')"]
[' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal']
['', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'filtfilt' used at line 235 is imported at line 5 and has a Long-Range dependency. Variable 'b' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'a' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'signal' used at line 235 is defined at line 230 and has a Short-Range dependency. Variable 'filtered_signal' used at line 236 is defined at line 235 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 4}
completion_python
Image_Transformation
29
36
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):']
[' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized']
['B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Variable 'img_array' used at line 29 is defined at line 28 and has a Short-Range dependency. Variable 'end_size' used at line 29 is defined at line 28 and has a Short-Range dependency. Variable 'x_start' used at line 30 is defined at line 29 and has a Short-Range dependency. Variable 'end_size' used at line 30 is defined at line 28 and has a Short-Range dependency. Variable 'img_array' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'end_size' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'y_start' used at line 33 is defined at line 32 and has a Short-Range dependency. Variable 'end_size' used at line 33 is defined at line 28 and has a Short-Range dependency. Variable 'img_array' used at line 35 is defined at line 28 and has a Short-Range dependency. Variable 'x_start' used at line 35 is defined at line 29 and has a Short-Range dependency. Variable 'x_end' used at line 35 is defined at line 30 and has a Short-Range dependency. Variable 'y_start' used at line 35 is defined at line 32 and has a Short-Range dependency. Variable 'y_end' used at line 35 is defined at line 33 and has a Short-Range dependency. Variable 'img_resized' used at line 36 is defined at line 35 and has a Short-Range dependency.
{}
{'Variable Short-Range': 14}
completion_python
Image_Transformation
32
36
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '']
[' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized']
['B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Variable 'img_array' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'end_size' used at line 32 is defined at line 28 and has a Short-Range dependency. Variable 'y_start' used at line 33 is defined at line 32 and has a Short-Range dependency. Variable 'end_size' used at line 33 is defined at line 28 and has a Short-Range dependency. Variable 'img_array' used at line 35 is defined at line 28 and has a Short-Range dependency. Variable 'x_start' used at line 35 is defined at line 29 and has a Short-Range dependency. Variable 'x_end' used at line 35 is defined at line 30 and has a Short-Range dependency. Variable 'y_start' used at line 35 is defined at line 32 and has a Short-Range dependency. Variable 'y_end' used at line 35 is defined at line 33 and has a Short-Range dependency. Variable 'img_resized' used at line 36 is defined at line 35 and has a Short-Range dependency.
{}
{'Variable Short-Range': 10}
completion_python
Image_Transformation
35
36
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '']
[' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized']
['B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Variable 'img_array' used at line 35 is defined at line 28 and has a Short-Range dependency. Variable 'x_start' used at line 35 is defined at line 29 and has a Short-Range dependency. Variable 'x_end' used at line 35 is defined at line 30 and has a Short-Range dependency. Variable 'y_start' used at line 35 is defined at line 32 and has a Short-Range dependency. Variable 'y_end' used at line 35 is defined at line 33 and has a Short-Range dependency. Variable 'img_resized' used at line 36 is defined at line 35 and has a Short-Range dependency.
{}
{'Variable Short-Range': 6}
completion_python
Image_Transformation
44
44
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:']
[' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)']
[' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 44}]
Library 'np' used at line 44 is imported at line 3 and has a Long-Range dependency. Variable 'B_resized' used at line 44 is defined at line 37 and has a Short-Range dependency. Variable 'row' used at line 44 is part of a Loop defined at line 42 and has a Short-Range dependency.
{'If Body': 1, 'Loop Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 1, 'Variable Loop Short-Range': 1}
completion_python
Image_Transformation
46
46
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:']
[' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)']
['print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Loop Body', 'usage_line': 46}, {'reason_category': 'Else Reasoning', 'usage_line': 46}]
Library 'np' used at line 46 is imported at line 3 and has a Long-Range dependency. Variable 'A_resized' used at line 46 is defined at line 26 and has a Medium-Range dependency. Variable 'row' used at line 46 is part of a Loop defined at line 42 and has a Short-Range dependency.
{'Loop Body': 1, 'Else Reasoning': 1}
{'Library Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Loop Short-Range': 1}
completion_python
Image_Transformation
43
46
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):']
[' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)']
['print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Loop Body', 'usage_line': 43}, {'reason_category': 'If Condition', 'usage_line': 43}, {'reason_category': 'If Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 44}, {'reason_category': 'Loop Body', 'usage_line': 45}, {'reason_category': 'Else Reasoning', 'usage_line': 45}, {'reason_category': 'Loop Body', 'usage_line': 46}, {'reason_category': 'Else Reasoning', 'usage_line': 46}]
Variable 'row' used at line 43 is part of a Loop defined at line 42 and has a Short-Range dependency. Library 'np' used at line 44 is imported at line 3 and has a Long-Range dependency. Variable 'B_resized' used at line 44 is defined at line 37 and has a Short-Range dependency. Variable 'row' used at line 44 is part of a Loop defined at line 42 and has a Short-Range dependency. Library 'np' used at line 46 is imported at line 3 and has a Long-Range dependency. Variable 'A_resized' used at line 46 is defined at line 26 and has a Medium-Range dependency. Variable 'row' used at line 46 is part of a Loop defined at line 42 and has a Short-Range dependency.
{'Loop Body': 4, 'If Condition': 1, 'If Body': 1, 'Else Reasoning': 2}
{'Variable Loop Short-Range': 3, 'Library Long-Range': 2, 'Variable Short-Range': 1, 'Variable Medium-Range': 1}
completion_python
Image_Transformation
83
84
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):']
[' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix']
['', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 83 is imported at line 3 and has a Long-Range dependency. Variable 'dx' used at line 83 is defined at line 82 and has a Short-Range dependency. Variable 'dy' used at line 83 is defined at line 82 and has a Short-Range dependency. Variable 'translation_matrix' used at line 84 is defined at line 83 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
completion_python
Image_Transformation
88
88
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:']
[' angle = np.radians(angle)']
[' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Body', 'usage_line': 88}]
Library 'np' used at line 88 is imported at line 3 and has a Long-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1}
completion_python
Image_Transformation
87
92
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):']
[' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix']
['', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Condition', 'usage_line': 87}, {'reason_category': 'If Body', 'usage_line': 88}]
Variable 'radians' used at line 87 is defined at line 86 and has a Short-Range dependency. Library 'np' used at line 88 is imported at line 3 and has a Long-Range dependency. Library 'np' used at line 89 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 89 is defined at line 88 and has a Short-Range dependency. Library 'np' used at line 90 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 90 is defined at line 88 and has a Short-Range dependency. Library 'np' used at line 91 is imported at line 3 and has a Long-Range dependency. Variable 'costheta' used at line 91 is defined at line 89 and has a Short-Range dependency. Variable 'sintheta' used at line 91 is defined at line 90 and has a Short-Range dependency. Variable 'rotation_matrix' used at line 92 is defined at line 91 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 1}
{'Variable Short-Range': 6, 'Library Long-Range': 4}
completion_python
Image_Transformation
96
96
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:']
[' angle = np.radians(angle)']
[' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Body', 'usage_line': 96}]
Library 'np' used at line 96 is imported at line 3 and has a Long-Range dependency.
{'If Body': 1}
{'Library Long-Range': 1}
completion_python
Image_Transformation
95
103
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):']
[' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix']
['', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Condition', 'usage_line': 95}, {'reason_category': 'If Body', 'usage_line': 96}]
Variable 'radians' used at line 95 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 96 is imported at line 3 and has a Long-Range dependency. Library 'np' used at line 97 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 97 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 98 is imported at line 3 and has a Long-Range dependency. Variable 'angle' used at line 98 is defined at line 94 and has a Short-Range dependency. Library 'np' used at line 100 is imported at line 3 and has a Long-Range dependency. Variable 'scale_factor' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'costheta' used at line 100 is defined at line 97 and has a Short-Range dependency. Variable 'sintheta' used at line 100 is defined at line 98 and has a Short-Range dependency. Variable 'dx' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'scale_factor' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'sintheta' used at line 101 is defined at line 98 and has a Short-Range dependency. Variable 'costheta' used at line 101 is defined at line 97 and has a Short-Range dependency. Variable 'dy' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'similarity_matrix' used at line 103 is defined at line 100 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 1}
{'Variable Short-Range': 12, 'Library Long-Range': 4}
completion_python
Image_Transformation
100
103
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '']
[' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix']
['', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 100 is imported at line 3 and has a Long-Range dependency. Variable 'scale_factor' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'costheta' used at line 100 is defined at line 97 and has a Short-Range dependency. Variable 'sintheta' used at line 100 is defined at line 98 and has a Short-Range dependency. Variable 'dx' used at line 100 is defined at line 94 and has a Short-Range dependency. Variable 'scale_factor' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'sintheta' used at line 101 is defined at line 98 and has a Short-Range dependency. Variable 'costheta' used at line 101 is defined at line 97 and has a Short-Range dependency. Variable 'dy' used at line 101 is defined at line 94 and has a Short-Range dependency. Variable 'similarity_matrix' used at line 103 is defined at line 100 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 9}
completion_python
Image_Transformation
106
112
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):']
[' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result']
['', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 106 is imported at line 3 and has a Long-Range dependency. Variable 'scale' used at line 106 is defined at line 105 and has a Short-Range dependency. Library 'np' used at line 107 is imported at line 3 and has a Long-Range dependency. Variable 'ax' used at line 107 is defined at line 105 and has a Short-Range dependency. Variable 'ay' used at line 107 is defined at line 105 and has a Short-Range dependency. Library 'np' used at line 108 is imported at line 3 and has a Long-Range dependency. Library 'np' used at line 109 is imported at line 3 and has a Long-Range dependency. Function 'translation' used at line 109 is defined at line 82 and has a Medium-Range dependency. Variable 'x' used at line 109 is defined at line 105 and has a Short-Range dependency. Variable 'y' used at line 109 is defined at line 105 and has a Short-Range dependency. Function 'rotation' used at line 109 is defined at line 86 and has a Medium-Range dependency. Variable 'angle' used at line 109 is defined at line 105 and has a Short-Range dependency. Library 'np' used at line 110 is imported at line 3 and has a Long-Range dependency. Variable 'scaling' used at line 110 is defined at line 106 and has a Short-Range dependency. Variable 'result' used at line 110 is defined at line 109 and has a Short-Range dependency. Library 'np' used at line 111 is imported at line 3 and has a Long-Range dependency. Variable 'shear' used at line 111 is defined at line 107 and has a Short-Range dependency. Variable 'result' used at line 111 is defined at line 110 and has a Short-Range dependency. Variable 'result' used at line 112 is defined at line 111 and has a Short-Range dependency.
{}
{'Library Long-Range': 6, 'Variable Short-Range': 11, 'Function Medium-Range': 2}
completion_python
Image_Transformation
109
112
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])']
[' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result']
['', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 109 is imported at line 3 and has a Long-Range dependency. Function 'translation' used at line 109 is defined at line 82 and has a Medium-Range dependency. Variable 'x' used at line 109 is defined at line 105 and has a Short-Range dependency. Variable 'y' used at line 109 is defined at line 105 and has a Short-Range dependency. Function 'rotation' used at line 109 is defined at line 86 and has a Medium-Range dependency. Variable 'angle' used at line 109 is defined at line 105 and has a Short-Range dependency. Library 'np' used at line 110 is imported at line 3 and has a Long-Range dependency. Variable 'scaling' used at line 110 is defined at line 106 and has a Short-Range dependency. Variable 'result' used at line 110 is defined at line 109 and has a Short-Range dependency. Library 'np' used at line 111 is imported at line 3 and has a Long-Range dependency. Variable 'shear' used at line 111 is defined at line 107 and has a Short-Range dependency. Variable 'result' used at line 111 is defined at line 110 and has a Short-Range dependency. Variable 'result' used at line 112 is defined at line 111 and has a Short-Range dependency.
{}
{'Library Long-Range': 3, 'Function Medium-Range': 2, 'Variable Short-Range': 8}
completion_python
Image_Transformation
110
112
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))']
[' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result']
['', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[]
Library 'np' used at line 110 is imported at line 3 and has a Long-Range dependency. Variable 'scaling' used at line 110 is defined at line 106 and has a Short-Range dependency. Variable 'result' used at line 110 is defined at line 109 and has a Short-Range dependency. Library 'np' used at line 111 is imported at line 3 and has a Long-Range dependency. Variable 'shear' used at line 111 is defined at line 107 and has a Short-Range dependency. Variable 'result' used at line 111 is defined at line 110 and has a Short-Range dependency. Variable 'result' used at line 112 is defined at line 111 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 5}
completion_python
Image_Transformation
121
121
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:']
[' return 0']
[' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Body', 'usage_line': 121}]
null
{'If Body': 1}
null
completion_python
Image_Transformation
123
131
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:']
[' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)']
['', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Else Reasoning', 'usage_line': 123}, {'reason_category': 'Else Reasoning', 'usage_line': 124}, {'reason_category': 'Else Reasoning', 'usage_line': 125}, {'reason_category': 'Else Reasoning', 'usage_line': 126}, {'reason_category': 'Else Reasoning', 'usage_line': 127}, {'reason_category': 'Else Reasoning', 'usage_line': 128}, {'reason_category': 'Else Reasoning', 'usage_line': 129}, {'reason_category': 'Else Reasoning', 'usage_line': 130}, {'reason_category': 'Else Reasoning', 'usage_line': 131}]
Variable 'image' used at line 123 is defined at line 114 and has a Short-Range dependency. Variable 'y1' used at line 123 is defined at line 117 and has a Short-Range dependency. Variable 'x1' used at line 123 is defined at line 115 and has a Short-Range dependency. Variable 'image' used at line 124 is defined at line 114 and has a Short-Range dependency. Variable 'y1' used at line 124 is defined at line 117 and has a Short-Range dependency. Variable 'x2' used at line 124 is defined at line 116 and has a Short-Range dependency. Variable 'image' used at line 125 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 125 is defined at line 118 and has a Short-Range dependency. Variable 'x1' used at line 125 is defined at line 115 and has a Short-Range dependency. Variable 'image' used at line 126 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 126 is defined at line 118 and has a Short-Range dependency. Variable 'x2' used at line 126 is defined at line 116 and has a Short-Range dependency. Variable 'x2' used at line 128 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 128 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 128 is defined at line 118 and has a Short-Range dependency. Variable 'y' used at line 128 is defined at line 114 and has a Medium-Range dependency. Variable 'x' used at line 129 is defined at line 114 and has a Medium-Range dependency. Variable 'x1' used at line 129 is defined at line 115 and has a Medium-Range dependency. Variable 'y2' used at line 129 is defined at line 118 and has a Medium-Range dependency. Variable 'y' used at line 129 is defined at line 114 and has a Medium-Range dependency. Variable 'x2' used at line 130 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 130 is defined at line 114 and has a Medium-Range dependency. Variable 'y' used at line 130 is defined at line 114 and has a Medium-Range dependency. Variable 'y1' used at line 130 is defined at line 117 and has a Medium-Range dependency. Variable 'x' used at line 131 is defined at line 114 and has a Medium-Range dependency. Variable 'x1' used at line 131 is defined at line 115 and has a Medium-Range dependency. Variable 'y' used at line 131 is defined at line 114 and has a Medium-Range dependency. Variable 'y1' used at line 131 is defined at line 117 and has a Medium-Range dependency.
{'Else Reasoning': 9}
{'Variable Short-Range': 11, 'Variable Medium-Range': 17}
completion_python
Image_Transformation
115
133
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):']
[' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)']
['', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Condition', 'usage_line': 120}, {'reason_category': 'If Body', 'usage_line': 121}, {'reason_category': 'Else Reasoning', 'usage_line': 122}, {'reason_category': 'Else Reasoning', 'usage_line': 123}, {'reason_category': 'Else Reasoning', 'usage_line': 124}, {'reason_category': 'Else Reasoning', 'usage_line': 125}, {'reason_category': 'Else Reasoning', 'usage_line': 126}, {'reason_category': 'Else Reasoning', 'usage_line': 127}, {'reason_category': 'Else Reasoning', 'usage_line': 128}, {'reason_category': 'Else Reasoning', 'usage_line': 129}, {'reason_category': 'Else Reasoning', 'usage_line': 130}, {'reason_category': 'Else Reasoning', 'usage_line': 131}]
Variable 'x' used at line 115 is defined at line 114 and has a Short-Range dependency. Variable 'x1' used at line 116 is defined at line 115 and has a Short-Range dependency. Variable 'y' used at line 117 is defined at line 114 and has a Short-Range dependency. Variable 'y1' used at line 118 is defined at line 117 and has a Short-Range dependency. Variable 'x1' used at line 120 is defined at line 115 and has a Short-Range dependency. Variable 'y1' used at line 120 is defined at line 117 and has a Short-Range dependency. Variable 'x2' used at line 120 is defined at line 116 and has a Short-Range dependency. Variable 'image' used at line 120 is defined at line 114 and has a Short-Range dependency. Variable 'y2' used at line 120 is defined at line 118 and has a Short-Range dependency. Variable 'image' used at line 123 is defined at line 114 and has a Short-Range dependency. Variable 'y1' used at line 123 is defined at line 117 and has a Short-Range dependency. Variable 'x1' used at line 123 is defined at line 115 and has a Short-Range dependency. Variable 'image' used at line 124 is defined at line 114 and has a Short-Range dependency. Variable 'y1' used at line 124 is defined at line 117 and has a Short-Range dependency. Variable 'x2' used at line 124 is defined at line 116 and has a Short-Range dependency. Variable 'image' used at line 125 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 125 is defined at line 118 and has a Short-Range dependency. Variable 'x1' used at line 125 is defined at line 115 and has a Short-Range dependency. Variable 'image' used at line 126 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 126 is defined at line 118 and has a Short-Range dependency. Variable 'x2' used at line 126 is defined at line 116 and has a Short-Range dependency. Variable 'x2' used at line 128 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 128 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 128 is defined at line 118 and has a Short-Range dependency. Variable 'y' used at line 128 is defined at line 114 and has a Medium-Range dependency. Variable 'x' used at line 129 is defined at line 114 and has a Medium-Range dependency. Variable 'x1' used at line 129 is defined at line 115 and has a Medium-Range dependency. Variable 'y2' used at line 129 is defined at line 118 and has a Medium-Range dependency. Variable 'y' used at line 129 is defined at line 114 and has a Medium-Range dependency. Variable 'x2' used at line 130 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 130 is defined at line 114 and has a Medium-Range dependency. Variable 'y' used at line 130 is defined at line 114 and has a Medium-Range dependency. Variable 'y1' used at line 130 is defined at line 117 and has a Medium-Range dependency. Variable 'x' used at line 131 is defined at line 114 and has a Medium-Range dependency. Variable 'x1' used at line 131 is defined at line 115 and has a Medium-Range dependency. Variable 'y' used at line 131 is defined at line 114 and has a Medium-Range dependency. Variable 'y1' used at line 131 is defined at line 117 and has a Medium-Range dependency. Variable 'w1' used at line 133 is defined at line 128 and has a Short-Range dependency. Variable 'f11' used at line 133 is defined at line 123 and has a Short-Range dependency. Variable 'w2' used at line 133 is defined at line 129 and has a Short-Range dependency. Variable 'f12' used at line 133 is defined at line 124 and has a Short-Range dependency. Variable 'w3' used at line 133 is defined at line 130 and has a Short-Range dependency. Variable 'f21' used at line 133 is defined at line 125 and has a Short-Range dependency. Variable 'w4' used at line 133 is defined at line 131 and has a Short-Range dependency. Variable 'f22' used at line 133 is defined at line 126 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 1, 'Else Reasoning': 10}
{'Variable Short-Range': 28, 'Variable Medium-Range': 17}
completion_python
Image_Transformation
120
133
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '']
[' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)']
['', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'If Condition', 'usage_line': 120}, {'reason_category': 'If Body', 'usage_line': 121}, {'reason_category': 'Else Reasoning', 'usage_line': 122}, {'reason_category': 'Else Reasoning', 'usage_line': 123}, {'reason_category': 'Else Reasoning', 'usage_line': 124}, {'reason_category': 'Else Reasoning', 'usage_line': 125}, {'reason_category': 'Else Reasoning', 'usage_line': 126}, {'reason_category': 'Else Reasoning', 'usage_line': 127}, {'reason_category': 'Else Reasoning', 'usage_line': 128}, {'reason_category': 'Else Reasoning', 'usage_line': 129}, {'reason_category': 'Else Reasoning', 'usage_line': 130}, {'reason_category': 'Else Reasoning', 'usage_line': 131}]
Variable 'x1' used at line 120 is defined at line 115 and has a Short-Range dependency. Variable 'y1' used at line 120 is defined at line 117 and has a Short-Range dependency. Variable 'x2' used at line 120 is defined at line 116 and has a Short-Range dependency. Variable 'image' used at line 120 is defined at line 114 and has a Short-Range dependency. Variable 'y2' used at line 120 is defined at line 118 and has a Short-Range dependency. Variable 'image' used at line 123 is defined at line 114 and has a Short-Range dependency. Variable 'y1' used at line 123 is defined at line 117 and has a Short-Range dependency. Variable 'x1' used at line 123 is defined at line 115 and has a Short-Range dependency. Variable 'image' used at line 124 is defined at line 114 and has a Short-Range dependency. Variable 'y1' used at line 124 is defined at line 117 and has a Short-Range dependency. Variable 'x2' used at line 124 is defined at line 116 and has a Short-Range dependency. Variable 'image' used at line 125 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 125 is defined at line 118 and has a Short-Range dependency. Variable 'x1' used at line 125 is defined at line 115 and has a Short-Range dependency. Variable 'image' used at line 126 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 126 is defined at line 118 and has a Short-Range dependency. Variable 'x2' used at line 126 is defined at line 116 and has a Short-Range dependency. Variable 'x2' used at line 128 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 128 is defined at line 114 and has a Medium-Range dependency. Variable 'y2' used at line 128 is defined at line 118 and has a Short-Range dependency. Variable 'y' used at line 128 is defined at line 114 and has a Medium-Range dependency. Variable 'x' used at line 129 is defined at line 114 and has a Medium-Range dependency. Variable 'x1' used at line 129 is defined at line 115 and has a Medium-Range dependency. Variable 'y2' used at line 129 is defined at line 118 and has a Medium-Range dependency. Variable 'y' used at line 129 is defined at line 114 and has a Medium-Range dependency. Variable 'x2' used at line 130 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 130 is defined at line 114 and has a Medium-Range dependency. Variable 'y' used at line 130 is defined at line 114 and has a Medium-Range dependency. Variable 'y1' used at line 130 is defined at line 117 and has a Medium-Range dependency. Variable 'x' used at line 131 is defined at line 114 and has a Medium-Range dependency. Variable 'x1' used at line 131 is defined at line 115 and has a Medium-Range dependency. Variable 'y' used at line 131 is defined at line 114 and has a Medium-Range dependency. Variable 'y1' used at line 131 is defined at line 117 and has a Medium-Range dependency. Variable 'w1' used at line 133 is defined at line 128 and has a Short-Range dependency. Variable 'f11' used at line 133 is defined at line 123 and has a Short-Range dependency. Variable 'w2' used at line 133 is defined at line 129 and has a Short-Range dependency. Variable 'f12' used at line 133 is defined at line 124 and has a Short-Range dependency. Variable 'w3' used at line 133 is defined at line 130 and has a Short-Range dependency. Variable 'f21' used at line 133 is defined at line 125 and has a Short-Range dependency. Variable 'w4' used at line 133 is defined at line 131 and has a Short-Range dependency. Variable 'f22' used at line 133 is defined at line 126 and has a Short-Range dependency.
{'If Condition': 1, 'If Body': 1, 'Else Reasoning': 10}
{'Variable Short-Range': 24, 'Variable Medium-Range': 17}
completion_python
Image_Transformation
143
146
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '', ' for i in range(rows):', ' for j in range(cols):']
[' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)']
[' output = np.array(output, np.uint8)', ' return output', '', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Loop Body', 'usage_line': 143}, {'reason_category': 'Loop Body', 'usage_line': 144}, {'reason_category': 'Loop Body', 'usage_line': 145}, {'reason_category': 'Loop Body', 'usage_line': 146}]
Library 'np' used at line 143 is imported at line 3 and has a Long-Range dependency. Variable 'j' used at line 143 is part of a Loop defined at line 142 and has a Short-Range dependency. Variable 'center' used at line 143 is defined at line 138 and has a Short-Range dependency. Variable 'i' used at line 143 is part of a Loop defined at line 141 and has a Short-Range dependency. Library 'np' used at line 144 is imported at line 3 and has a Long-Range dependency. Variable 'T_invert' used at line 144 is defined at line 139 and has a Short-Range dependency. Variable 'shift_center' used at line 144 is defined at line 143 and has a Short-Range dependency. Variable 'coordinates' used at line 145 is defined at line 144 and has a Short-Range dependency. Variable 'center' used at line 145 is defined at line 138 and has a Short-Range dependency. Variable 'output' used at line 146 is defined at line 137 and has a Short-Range dependency. Variable 'i' used at line 146 is part of a Loop defined at line 141 and has a Short-Range dependency. Variable 'j' used at line 146 is part of a Loop defined at line 142 and has a Short-Range dependency. Function 'bilinear_interpolation' used at line 146 is defined at line 114 and has a Long-Range dependency. Variable 'I' used at line 146 is defined at line 135 and has a Medium-Range dependency. Variable 'x' used at line 146 is defined at line 145 and has a Short-Range dependency. Variable 'y' used at line 146 is defined at line 145 and has a Short-Range dependency.
{'Loop Body': 4}
{'Library Long-Range': 2, 'Variable Loop Short-Range': 4, 'Variable Short-Range': 8, 'Function Long-Range': 1, 'Variable Medium-Range': 1}
completion_python
Image_Transformation
141
148
['from PIL import Image', 'import cv2', 'import numpy as np', 'import matplotlib.pyplot as plt', '', '#Task 1:', '#resize two imgs', '#resize imgA with cv2.resize', '#resize imgB via center cropping', '#ensure both resized images are resized to same end_size', '#concatenate the resized imgs via every other row imgA and every other row imgB', '', "A = Image.open('./imgA.jpg')", "B = Image.open('./imgB.jpg')", 'A_array = np.array(A)', 'B_array = np.array(B)', "print('Array shapes:',A_array.shape, B_array.shape)", 'print(A_array)', 'print(B_array)', '', 'end_size = 256', '', 'if end_size >= A_array.shape[0] or end_size>= B_array.shape[0]:', " print('choose end size less than: ', np.min(A_array.shape,B_array.shape))", '', 'A_resized = cv2.resize(A_array, (end_size, end_size))', '', 'def center_crop(img_array, end_size):', ' x_start = int((img_array.shape[0]-end_size)/2)', ' x_end = x_start + end_size', '', ' y_start = int((img_array.shape[1]-end_size)/2)', ' y_end = y_start + end_size', '', ' img_resized = img_array[x_start:x_end, y_start:y_end, :]', ' return img_resized', 'B_resized = center_crop(B_array, end_size)', 'print(B_resized.shape)', 'C = np.concatenate((A_resized[0:256,0:128,:],B_resized[0:256,128:256,:]),axis = 1)', '', 'D = B_resized[0:1,:,:]', 'for row in range(1,A_resized.shape[0]):', ' if row % 2 == 0:', ' D = np.concatenate((D,B_resized[row:row+1,:,:]), axis=0)', ' else:', ' D = np.concatenate((D,A_resized[row:row+1,:,:]), axis =0)', 'print(D)', '', '#Task 2:', '#upload picture of multiple peppers each different colors', '#create a mask without the yellow peppers by using range provided', '#do this in rgb and hsv -> note different ranges depending on rgb or hsv', "pepper_img = Image.open('./pepper.png')", 'pepper = np.array(pepper_img)', '', 'lower_yellow = np.array([150, 175, 0], dtype=np.uint8)', 'upper_yellow = np.array([255, 255, 150], dtype=np.uint8)', 'mask = np.all((pepper >= lower_yellow) & (pepper <= upper_yellow), axis=-1)', '', 'result = np.where(mask, 1, 0)', '', 'print(pepper)', 'print(result)', '', "img = cv2.imread('./pepper.png')", 'hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)', '', 'lower_yellow = np.array([19, 0, 0], dtype=np.uint8)', 'upper_yellow = np.array([24, 255, 255], dtype=np.uint8)', 'mask = np.all((hsv_img >= lower_yellow) & (hsv_img <= upper_yellow), axis=-1)', '', 'result_hsv = np.where(mask, 1, 0)', '', 'print(hsv_img)', 'print(result_hsv)', '', '#Task 3:', '#write transormation functions to translsate, rotate, and perform similarity and affine transformation', '#write bilinear interpolation function from scratch', '#apply series of transformations to an image', '', 'def translation(dx,dy):', ' translation_matrix = np.array([[1,0,dx],[0,1,dy],[0,0,1]])', ' return translation_matrix', '', 'def rotation(angle,radians = True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', ' rotation_matrix = np.array([[costheta, sintheta,0],[-1*sintheta,costheta,0],[0,0,1]])', ' return rotation_matrix', '', 'def similarity_matrix(angle, dx, dy, scale_factor,radians=True):', ' if radians == False:', ' angle = np.radians(angle)', ' costheta = np.cos(angle)', ' sintheta = np.sin(angle)', '', ' similarity_matrix = np.array([[scale_factor*costheta,scale_factor*sintheta,dx],', ' [-1*scale_factor*sintheta, scale_factor*costheta, dy],', ' [0,0,1]])', ' return similarity_matrix', '', 'def affine(angle, x, y, scale, ax, ay):', ' scaling = np.array([[scale, 0,0], [0, scale, 0], [0,0,1]])', ' shear = np.array([[1, ax, 0], [ay, 1,0], [0, 0,1]])', ' result = np.array([[0,0,0], [0,0,0], [0,0,0]])', ' result = np.dot(translation(x, y), rotation(angle))', ' result = np.dot(result, scaling)', ' result = np.dot(result, shear)', ' return result', '', 'def bilinear_interpolation(image,x,y):', ' x1 = int(x)', ' x2 = x1 + 1', ' y1 = int(y)', ' y2 = y1 + 1', '', ' if x1 < 0 or y1 < 0 or x2 >= image.shape[1] or y2 >= image.shape[0]:', ' return 0', ' else:', ' f11 = image[y1][x1]', ' f12 = image[y1][x2]', ' f21 = image[y2][x1]', ' f22 = image[y2][x2]', '', ' w1 = (x2-x)*(y2-y)', ' w2 = (x-x1)*(y2-y)', ' w3 = (x2-x)*(y-y1)', ' w4 = (x-x1)*(y-y1)', '', ' return (w1*f11) + (w2*f12) + (w3*f21) + (w4*f22)', '', 'def image_warp(I,T):', ' rows,cols = I.shape[:2]', ' output = np.zeros((rows,cols,3))', ' center = (cols/2, rows/2)', ' T_invert = np.linalg.inv(T)', '']
[' for i in range(rows):', ' for j in range(cols):', ' shift_center = np.array([j-center[0],i -center[1],1])', ' coordinates = np.dot(T_invert,shift_center)', ' x,y = coordinates[0] + center[0], coordinates [1] + center[1]', ' output[i][j] = bilinear_interpolation(I,x,y)', ' output = np.array(output, np.uint8)', ' return output']
['', "path= './arabella.jpg'", 'arabella = cv2.imread(path)', 'arabella_smol = cv2.resize(arabella, dsize=(256, 192), interpolation=cv2.INTER_AREA)', 'arabella_smol = np.array(arabella_smol)', 'arabella_smol = arabella_smol[:, :, [2, 1, 0]]', '', '#translate images keep params as shown', 't1 = translation(21,25)', 'warped_arabella1 = image_warp(arabella_smol,t1)', 't2 = translation(-21,25)', 'warped_arabella2 = image_warp(arabella_smol,t2)', 't3 = translation(21,-25)', 'warped_arabella3 = image_warp(arabella_smol,t3)', 't4 = translation(-21,25)', 'warped_arabella4 = image_warp(arabella_smol,t4)', 'print(warped_arabella1)', 'print(warped_arabella2)', 'print(warped_arabella3)', 'print(warped_arabella4)', '', '# rotate image 30 degrees clockwise and 30 degrees counterclockwise', 'r1 = rotation(30, False)', 'r2 = rotation(-30, False)', '', 'warped_arabella5 = image_warp(arabella_smol,r1)', 'warped_arabella6 = image_warp(arabella_smol,r2)', 'print(warped_arabella5)', 'print(warped_arabella6)', '', '#apply similarity transformation to image, keep params as shown below', 's1 = similarity_matrix(60, 0, 0, 0.5,radians=False)', 'warped_arabella7 = image_warp(arabella_smol,s1)', 'print(warped_arabella7)', '', '#apply affine transformation to image, keep params as shown below', 'a1 = affine(90, 2, 3, .5, 5, 2)', 'warped_arabella8 = image_warp(arabella_smol,a1)', 'print(warped_arabella8)', '', '#Task 4:', '#artificially replicate overhead and desklight scene via addition of overhead lit only and desklight lit only scenes', '#make sure to scale properly ', "path1= './desklight.jpg'", "path2= './overheadlight.jpg'", "path3 = './bothlight.jpg'", 'I1 = np.array(cv2.imread(path1))[:,:,[2,1,0]]', 'I2 = np.array(cv2.imread(path2))[:,:,[2,1,0]]', 'I12 = np.array(cv2.imread(path3))[:,:,[2,1,0]]', '', 'type(I12[0,0,0])', 'I1_float = I1/255.0', 'I2_float = I2/255.0', 'I12_float = I1_float + I2_float', 'type(I12_float[0,0,0]),np.min(I12_float),np.max(I12_float)', 'I12_uint8 = (I12_float * 255.0).astype(np.uint8)', 'type(I12_uint8[0,0,0]),np.min(I12_uint8),np.max(I12_uint8)', '', 'synthI12 = I1+I2', 'diffI = synthI12 - I12', 'diffI_scaled = (diffI - np.min(diffI))/(np.max(diffI)-np.min(diffI))', '', 'print(I12)', 'print(synthI12)', 'print(diffI_scaled)']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 141}, {'reason_category': 'Loop Body', 'usage_line': 142}, {'reason_category': 'Define Stop Criteria', 'usage_line': 142}, {'reason_category': 'Loop Body', 'usage_line': 143}, {'reason_category': 'Loop Body', 'usage_line': 144}, {'reason_category': 'Loop Body', 'usage_line': 145}, {'reason_category': 'Loop Body', 'usage_line': 146}]
Variable 'rows' used at line 141 is defined at line 136 and has a Short-Range dependency. Variable 'cols' used at line 142 is defined at line 136 and has a Short-Range dependency. Library 'np' used at line 143 is imported at line 3 and has a Long-Range dependency. Variable 'j' used at line 143 is part of a Loop defined at line 142 and has a Short-Range dependency. Variable 'center' used at line 143 is defined at line 138 and has a Short-Range dependency. Variable 'i' used at line 143 is part of a Loop defined at line 141 and has a Short-Range dependency. Library 'np' used at line 144 is imported at line 3 and has a Long-Range dependency. Variable 'T_invert' used at line 144 is defined at line 139 and has a Short-Range dependency. Variable 'shift_center' used at line 144 is defined at line 143 and has a Short-Range dependency. Variable 'coordinates' used at line 145 is defined at line 144 and has a Short-Range dependency. Variable 'center' used at line 145 is defined at line 138 and has a Short-Range dependency. Variable 'output' used at line 146 is defined at line 137 and has a Short-Range dependency. Variable 'i' used at line 146 is part of a Loop defined at line 141 and has a Short-Range dependency. Variable 'j' used at line 146 is part of a Loop defined at line 142 and has a Short-Range dependency. Function 'bilinear_interpolation' used at line 146 is defined at line 114 and has a Long-Range dependency. Variable 'I' used at line 146 is defined at line 135 and has a Medium-Range dependency. Variable 'x' used at line 146 is defined at line 145 and has a Short-Range dependency. Variable 'y' used at line 146 is defined at line 145 and has a Short-Range dependency. Library 'np' used at line 147 is imported at line 3 and has a Long-Range dependency. Variable 'output' used at line 147 is defined at line 137 and has a Short-Range dependency. Variable 'output' used at line 148 is defined at line 147 and has a Short-Range dependency.
{'Define Stop Criteria': 2, 'Loop Body': 5}
{'Variable Short-Range': 12, 'Library Long-Range': 3, 'Variable Loop Short-Range': 4, 'Function Long-Range': 1, 'Variable Medium-Range': 1}
completion_python
Credit_Scoring_Fairness
51
51
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()']
['gnb_pred = gnb.fit(x_train, y_train).predict(x_test)']
['', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 51 is defined at line 50 and has a Short-Range dependency. Variable 'x_train' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'y_train' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'x_test' used at line 51 is defined at line 39 and has a Medium-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Medium-Range': 3}