repo
stringlengths 3
91
| file
stringlengths 16
152
| code
stringlengths 0
3.77M
| file_length
int64 0
3.77M
| avg_line_length
float64 0
16k
| max_line_length
int64 0
273k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Beholder-GAN | Beholder-GAN-master/metrics/sliced_wasserstein.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import numpy as np
import scipy.ndimage
#----------------------------------------------------------------------------
def get_descriptors_for_minibatch(minibatch, nhood_size, nhoods_per_image):
S = minibatch.shape # (minibatch, channel, height, width)
assert len(S) == 4 and S[1] == 3
N = nhoods_per_image * S[0]
H = nhood_size // 2
nhood, chan, x, y = np.ogrid[0:N, 0:3, -H:H+1, -H:H+1]
img = nhood // nhoods_per_image
x = x + np.random.randint(H, S[3] - H, size=(N, 1, 1, 1))
y = y + np.random.randint(H, S[2] - H, size=(N, 1, 1, 1))
idx = ((img * S[1] + chan) * S[2] + y) * S[3] + x
return minibatch.flat[idx]
#----------------------------------------------------------------------------
def finalize_descriptors(desc):
if isinstance(desc, list):
desc = np.concatenate(desc, axis=0)
assert desc.ndim == 4 # (neighborhood, channel, height, width)
desc -= np.mean(desc, axis=(0, 2, 3), keepdims=True)
desc /= np.std(desc, axis=(0, 2, 3), keepdims=True)
desc = desc.reshape(desc.shape[0], -1)
return desc
#----------------------------------------------------------------------------
def sliced_wasserstein(A, B, dir_repeats, dirs_per_repeat):
assert A.ndim == 2 and A.shape == B.shape # (neighborhood, descriptor_component)
results = []
for repeat in range(dir_repeats):
dirs = np.random.randn(A.shape[1], dirs_per_repeat) # (descriptor_component, direction)
dirs /= np.sqrt(np.sum(np.square(dirs), axis=0, keepdims=True)) # normalize descriptor components for each direction
dirs = dirs.astype(np.float32)
projA = np.matmul(A, dirs) # (neighborhood, direction)
projB = np.matmul(B, dirs)
projA = np.sort(projA, axis=0) # sort neighborhood projections for each direction
projB = np.sort(projB, axis=0)
dists = np.abs(projA - projB) # pointwise wasserstein distances
results.append(np.mean(dists)) # average over neighborhoods and directions
return np.mean(results) # average over repeats
#----------------------------------------------------------------------------
def downscale_minibatch(minibatch, lod):
if lod == 0:
return minibatch
t = minibatch.astype(np.float32)
for i in range(lod):
t = (t[:, :, 0::2, 0::2] + t[:, :, 0::2, 1::2] + t[:, :, 1::2, 0::2] + t[:, :, 1::2, 1::2]) * 0.25
return np.round(t).clip(0, 255).astype(np.uint8)
#----------------------------------------------------------------------------
gaussian_filter = np.float32([
[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, 36, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]]) / 256.0
def pyr_down(minibatch): # matches cv2.pyrDown()
assert minibatch.ndim == 4
return scipy.ndimage.convolve(minibatch, gaussian_filter[np.newaxis, np.newaxis, :, :], mode='mirror')[:, :, ::2, ::2]
def pyr_up(minibatch): # matches cv2.pyrUp()
assert minibatch.ndim == 4
S = minibatch.shape
res = np.zeros((S[0], S[1], S[2] * 2, S[3] * 2), minibatch.dtype)
res[:, :, ::2, ::2] = minibatch
return scipy.ndimage.convolve(res, gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0, mode='mirror')
def generate_laplacian_pyramid(minibatch, num_levels):
pyramid = [np.float32(minibatch)]
for i in range(1, num_levels):
pyramid.append(pyr_down(pyramid[-1]))
pyramid[-2] -= pyr_up(pyramid[-1])
return pyramid
def reconstruct_laplacian_pyramid(pyramid):
minibatch = pyramid[-1]
for level in pyramid[-2::-1]:
minibatch = pyr_up(minibatch) + level
return minibatch
#----------------------------------------------------------------------------
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
self.nhood_size = 7
self.nhoods_per_image = 128
self.dir_repeats = 4
self.dirs_per_repeat = 128
self.resolutions = []
res = image_shape[1]
while res >= 16:
self.resolutions.append(res)
res //= 2
def get_metric_names(self):
return ['SWDx1e3_%d' % res for res in self.resolutions] + ['SWDx1e3_avg']
def get_metric_formatting(self):
return ['%-13.4f'] * len(self.get_metric_names())
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.descriptors = [[] for res in self.resolutions]
def feed(self, mode, minibatch):
for lod, level in enumerate(generate_laplacian_pyramid(minibatch, len(self.resolutions))):
desc = get_descriptors_for_minibatch(level, self.nhood_size, self.nhoods_per_image)
self.descriptors[lod].append(desc)
def end(self, mode):
desc = [finalize_descriptors(d) for d in self.descriptors]
del self.descriptors
if mode in ['warmup', 'reals']:
self.desc_real = desc
dist = [sliced_wasserstein(dreal, dfake, self.dir_repeats, self.dirs_per_repeat) for dreal, dfake in zip(self.desc_real, desc)]
del desc
dist = [d * 1e3 for d in dist] # multiply by 10^3
return dist + [np.mean(dist)]
#----------------------------------------------------------------------------
| 25,263 | 45.698706 | 135 | py |
Beholder-GAN | Beholder-GAN-master/metrics/frechet_inception_distance.py | #!/usr/bin/env python3
#
# Copyright 2017 Martin Heusel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from the original implementation by Martin Heusel.
# Source https://github.com/bioinf-jku/TTUR/blob/master/fid.py
''' Calculates the Frechet Inception Distance (FID) to evalulate GANs.
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectivly.
See --help to see further details.
'''
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy as sp
import os
import gzip, pickle
import tensorflow as tf
from scipy.misc import imread
import pathlib
import urllib
class InvalidFIDException(Exception):
pass
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile( pth, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString( f.read())
_ = tf.import_graph_def( graph_def, name='FID_Inception_Net')
#-------------------------------------------------------------------------------
# code for handling inception net derived from
# https://github.com/openai/improved-gan/blob/master/inception_score/model.py
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims is not None:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
try:
o._shape = tf.TensorShape(new_shape)
except ValueError:
o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0
return pool3
#-------------------------------------------------------------------------------
def get_activations(images, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
d0 = images.shape[0]
if batch_size > d0:
print("warning: batch size is bigger than the data size. setting batch size to data size")
batch_size = d0
n_batches = d0//batch_size
n_used_imgs = n_batches*batch_size
pred_arr = np.empty((n_used_imgs,2048))
for i in range(n_batches):
if verbose:
print("\rPropagating batch %d/%d" % (i+1, n_batches), end="", flush=True)
start = i*batch_size
end = start + batch_size
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size,-1)
if verbose:
print(" done")
return pred_arr
#-------------------------------------------------------------------------------
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- dist : The Frechet Distance.
Raises:
-- InvalidFIDException if nan occures.
"""
m = np.square(mu1 - mu2).sum()
#s = sp.linalg.sqrtm(np.dot(sigma1, sigma2)) # EDIT: commented out
s, _ = sp.linalg.sqrtm(np.dot(sigma1, sigma2), disp=False) # EDIT: added
dist = m + np.trace(sigma1+sigma2 - 2*s)
#if np.isnan(dist): # EDIT: commented out
# raise InvalidFIDException("nan occured in distance calculation.") # EDIT: commented out
#return dist # EDIT: commented out
return np.real(dist) # EDIT: added
#-------------------------------------------------------------------------------
def calculate_activation_statistics(images, sess, batch_size=50, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# The following functions aren't needed for calculating the FID
# they're just here to make this module work as a stand-alone script
# for calculating FID scores
#-------------------------------------------------------------------------------
def check_or_download_inception(inception_path):
''' Checks if the path to the inception file is valid, or downloads
the file if it is not present. '''
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def _handle_path(path, sess):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
x = np.array([imread(str(fn)).astype(np.float32) for fn in files])
m, s = calculate_activation_statistics(x, sess)
return m, s
def calculate_fid_given_paths(paths, inception_path):
''' Calculates the FID of two paths. '''
inception_path = check_or_download_inception(inception_path)
for p in paths:
if not os.path.exists(p):
raise RuntimeError("Invalid path: %s" % p)
create_inception_graph(str(inception_path))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
m1, s1 = _handle_path(paths[0], sess)
m2, s2 = _handle_path(paths[1], sess)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("path", type=str, nargs=2,
help='Path to the generated images or to .npz statistic files')
parser.add_argument("-i", "--inception", type=str, default=None,
help='Path to Inception model (will be downloaded if not provided)')
parser.add_argument("--gpu", default="", type=str,
help='GPU to use (leave blank for CPU only)')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
fid_value = calculate_fid_given_paths(args.path, args.inception)
print("FID: ", fid_value)
#----------------------------------------------------------------------------
# EDIT: added
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
import config
self.network_dir = os.path.join(config.result_dir, '_inception_fid')
self.network_file = check_or_download_inception(self.network_dir)
self.sess = tf.get_default_session()
create_inception_graph(self.network_file)
def get_metric_names(self):
return ['FID']
def get_metric_formatting(self):
return ['%-10.4f']
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.activations = []
def feed(self, mode, minibatch):
act = get_activations(minibatch.transpose(0,2,3,1), self.sess, batch_size=minibatch.shape[0])
self.activations.append(act)
def end(self, mode):
act = np.concatenate(self.activations)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
if mode in ['warmup', 'reals']:
self.mu_real = mu
self.sigma_real = sigma
fid = calculate_frechet_distance(mu, sigma, self.mu_real, self.sigma_real)
return [fid]
#----------------------------------------------------------------------------
| 11,441 | 39.574468 | 110 | py |
Beholder-GAN | Beholder-GAN-master/metrics/ms_ssim.py | #!/usr/bin/python
#
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Adapted from the original implementation by The TensorFlow Authors.
# Source: https://github.com/tensorflow/models/blob/master/research/compression/image_encoder/msssim.py
import numpy as np
from scipy import signal
from scipy.ndimage.filters import convolve
def _FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))
return g / g.sum()
def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
"""Return the Structural Similarity Map between `img1` and `img2`.
This function attempts to match the functionality of ssim_index_new.m by
Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape))
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim)
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)), axis=(1, 2, 3)) # Return for each image individually.
cs = np.mean(v1 / v2, axis=(1, 2, 3))
return ssim, cs
def _HoxDownsample(img):
return (img[:, 0::2, 0::2, :] + img[:, 1::2, 0::2, :] + img[:, 0::2, 1::2, :] + img[:, 1::2, 1::2, :]) * 0.25
def msssim(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03, weights=None):
"""Return the MS-SSIM score between `img1` and `img2`.
This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
similarity for image quality assessment" (2003).
Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
Author's MATLAB implementation:
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
weights: List of weights for each level; if none, use five levels and the
weights from the original paper.
Returns:
MS-SSIM score between `img1` and `img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape))
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim)
# Note: default weights don't sum to 1.0 but do match the paper / matlab code.
weights = np.array(weights if weights else [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
im1, im2 = [x.astype(np.float32) for x in [img1, img2]]
mssim = []
mcs = []
for _ in range(levels):
ssim, cs = _SSIMForMultiScale(
im1, im2, max_val=max_val, filter_size=filter_size,
filter_sigma=filter_sigma, k1=k1, k2=k2)
mssim.append(ssim)
mcs.append(cs)
im1, im2 = [_HoxDownsample(x) for x in [im1, im2]]
# Clip to zero. Otherwise we get NaNs.
mssim = np.clip(np.asarray(mssim), 0.0, np.inf)
mcs = np.clip(np.asarray(mcs), 0.0, np.inf)
# Average over images only at the end.
return np.mean(np.prod(mcs[:-1, :] ** weights[:-1, np.newaxis], axis=0) * (mssim[-1, :] ** weights[-1]))
#----------------------------------------------------------------------------
# EDIT: added
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
assert num_images % 2 == 0 and minibatch_size % 2 == 0
self.num_pairs = num_images // 2
def get_metric_names(self):
return ['MS-SSIM']
def get_metric_formatting(self):
return ['%-10.4f']
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.sum = 0.0
def feed(self, mode, minibatch):
images = minibatch.transpose(0, 2, 3, 1)
score = msssim(images[0::2], images[1::2])
self.sum += score * (images.shape[0] // 2)
def end(self, mode):
avg = self.sum / self.num_pairs
return [avg]
#----------------------------------------------------------------------------
| 8,160 | 39.60199 | 128 | py |
Beholder-GAN | Beholder-GAN-master/metrics/inception_score.py | # Copyright 2016 Wojciech Zaremba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from the original implementation by Wojciech Zaremba.
# Source: https://github.com/openai/improved-gan/blob/master/inception_score/model.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
#assert(np.max(images[0]) > 10) # EDIT: commented out
#assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session() as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in range(n_batches):
#sys.stdout.write(".") # EDIT: commented out
#sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) # EDIT: increased indent
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session() as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
try:
o._shape = tf.TensorShape(new_shape)
except ValueError:
o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3), w)
softmax = tf.nn.softmax(logits)
#if softmax is None: # EDIT: commented out
# _init_inception() # EDIT: commented out
#----------------------------------------------------------------------------
# EDIT: added
class API:
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
import config
globals()['MODEL_DIR'] = os.path.join(config.result_dir, '_inception')
self.sess = tf.get_default_session()
_init_inception()
def get_metric_names(self):
return ['IS_mean', 'IS_std']
def get_metric_formatting(self):
return ['%-10.4f', '%-10.4f']
def begin(self, mode):
assert mode in ['warmup', 'reals', 'fakes']
self.images = []
def feed(self, mode, minibatch):
self.images.append(minibatch.transpose(0, 2, 3, 1))
def end(self, mode):
images = list(np.concatenate(self.images))
with self.sess.as_default():
mean, std = get_inception_score(images)
return [mean, std]
#----------------------------------------------------------------------------
| 5,305 | 34.851351 | 110 | py |
Beholder-GAN | Beholder-GAN-master/metrics/__init__.py | # empty
| 8 | 3.5 | 7 | py |
Beholder-GAN | Beholder-GAN-master/utils/plot_beauty_distribution.py | import os
import csv
import numpy as np
import argparse
import matplotlib.pyplot as plt
# initialize parser arguments
parser = argparse.ArgumentParser()
parser.add_argument('--csv', '-csv', help='path to csv file', default='../All_Ratings.csv', type=str)
parser.add_argument('--density', '-density', help='configure plot density', default=0.05, type=float)
args = parser.parse_args()
# initiate list of beauty rates means
beauty_rates_mean = []
# read raters csv file
with open(args.csv, 'r') as csvfile:
# Dictionary to load images from csv
# key: image name
# value: list of 60 beauty rates from raters
csv_dict = {}
raw_dataset = csv.reader(csvfile, delimiter=',', quotechar='|')
# fill the dictionary
for i, row in enumerate(raw_dataset):
row = ','.join(row)
row = row.split(',')
# create list of rates for each image
if row[1] in csv_dict:
csv_dict[row[1]][0].append(float(row[2]))
else:
csv_dict[row[1]] = [[float(row[2])]]
# move dict to lists, convert beauty rates to numpy ranged in [0,1]
for key, value in csv_dict.items():
beauty_rates_mean.append(np.mean(np.asarray(value, dtype=np.float32)))
# create a x axis with the given density and zeros as y axis to be filled next
x_values = np.arange(0.0, 5.0, args.density)
y_values = [0]*len(x_values)
# for each mean, increase the counter in the correct location
for val in beauty_rates_mean:
y_values[int(round(val/args.density))] += 1
# plot the results
plt.plot(x_values, y_values)
plt.xlabel('beauty rates')
plt.ylabel('number of subjects')
plt.title('Beauty Rates Distribution')
plt.grid(True)
plt.savefig(os.path.basename(args.csv).split(".")[-2]+ ".png")
| 1,767 | 30.017544 | 101 | py |
Beholder-GAN | Beholder-GAN-master/utils/transform_images.py | import os
from PIL import Image
# select dataset folder to check and destination folder to put output images in
path = '../datasets/beauty_dataset/img/beauty_dataset'
dest_path = '../datasets/beauty_dataset/img/beauty_dataset_scaled'
# destination resolution
dest_res = 2 ** 8
for i, file in enumerate(os.listdir(path)):
# open image using PIL to detect resolution.
img = Image.open(os.path.join(path,file))
width, height = img.size
# pad image if necessary
if width != height:
# create a new black picture in size of (max(height,width), max(height,width))
padded_size = (max(height,width), max(height,width))
black_img = Image.new("RGB", padded_size) #
# define origin to paste the image on the newly created image
location_x = int((padded_size[0] - width) / 2)
location_y = int((padded_size[1] - height) / 2)
# paste the image
black_img.paste(img, (location_x,location_y))
img = black_img
# resize image to destination resolution and save in dest folder
img = img.resize((dest_res,dest_res),Image.ANTIALIAS)
img.save(os.path.join(dest_path,file),quality=95)
if i % 100 == 0:
print("saved {}/{} images".format(i,len(os.listdir(path))))
| 1,274 | 35.428571 | 86 | py |
Themis | Themis-master/Themis1.0/main.py | import math
import random
import sys
import xml.etree.ElementTree as ET
import Themis
def load_soft_from_settings():
names=[]
types=[]
values=[]
num_values=[]
tree = ET.parse('settings.xml')
root = tree.getroot()
software_name = root.find("name").text
command = root.find("command").text
inputs = root.find("inputs")
random.seed(int(root.find("seed").text))
for uid, obj in enumerate(inputs.findall("input")):
names[uid] = obj.find("name").text
types[uid] = obj.find("type").text
if types[uid] == "categorical":
values[uid] = [elt.text for elt in obj.find("values").findall("value")]
elif types[uid] == "continuousInt":
values[uid] = range(int(obj.find("bounds").find("lowerbound").text),
int(obj.find("bounds").find("upperbound").text))
else:
assert false
num_values[uid] = len(values[uid])
print names
print values
print num_values
print command
print types
return Themis.soft(names, values, num_values, command, types)
if __name__ == '__main__':
soft = load_soft_from_settings()
soft.printSoftwareDetails()
#D = soft.discriminationSearch(0.2,0.99,0.1,"groupandcausal")
#print "\n\n\nThemis has completed \n"
#print "Software discriminates against ", D
#X=[0,2]
#print soft.groupDiscrimination(X,99,0.1)
#print soft.causalDiscrimination(X,99,0.1)
#print soft.getTestSuite()
| 1,506 | 27.433962 | 83 | py |
Themis | Themis-master/Themis1.0/Themis.py | import sys
import itertools
import commands
import random
import math
import copy
class soft:
conf_zValue = {80:1.28,90:1.645,95:1.96, 98:2.33, 99:2.58}
MaxSamples=50
SamplingThreshold = 10
cache = {}
def __init__(self, names, values, num, command, type):
self.attr_names = copy.deepcopy(names)
self.values = copy.deepcopy(values)
self.num = copy.deepcopy(num)
self.type = copy.deepcopy(type)
self.command = copy.deepcopy(command)
def getValues(self):
return self.values
def getComand(self):
return self.command
def getAttributeNames(self):
return self.attr_names
def getRange(self, attr_name):
for index,att_name in self.attr_names.iteritems():
if(att_name == attr_name):
return self.num[index]
def getValues(self, attr_name):
for index,att_name in self.attr_names.iteritems():
if(att_name == attr_name):
return self.values[index]
def printSoftwareDetails(self):
print "Number of attributes are ", len(self.attr_names),"\n"
for attr_name in self.attr_names:
print "Attribute name is ",attr_name
print "Number of values taken by this attribute =",self.getRange(attr_name)
print "The different values taken are ",self.getValues(attr_name),"\n"
def randomInput (self, I, X, attr):
print I
print X
exit()
i=0
inp = []
while i < len(I):
if i in X:
inp.append(attr[X.index(i)])
else:
inp.append(random.randint(0,I[i]-1))
i+=1
return inp
def SoftwareTest(self, inp,num, values):
i=0
actual_inp = []
running_command = self.command
while i<len(inp):
actual_inp.append(values[i][inp[i]])
running_command += " "
running_command += str(values[i][inp[i]])
i+=1
return (commands.getstatusoutput(running_command)[1] == "1")
def decodeValues(self, index, num, X):
attr=[]
copy = index
for x in X:
a = num[x]
attr.append(copy%a)
copy -= (copy%a)
copy = copy/a
return attr
def discriminationSearch(self,theta, confidence, epsilon, type):
Scausal=[]
if("causal" in type and "group" in type):
Scausal = self.discriminationSearch(theta, confidence, epsilon, "causal")
i=0
lst = []
while i<len(self.attr_names):
lst.append(i)
i+=1
i=1
D = []
while i<= len(self.attr_names):
subsets = list(itertools.combinations(tuple(lst), i))
for X in subsets:
found = False
for d in D:
if(set(d)< set(list(X))):
found = True
break
if(found):
continue
if("group" in type):
score = self.groupDiscrimination(list(X),confidence,epsilon)
elif("causal" in type):
score = self.causalDiscrimination(list(X),confidence,epsilon)
if(score > theta):
D.append(list(X))
#print self.group_discrimination(list(X),90,0.1)
i+=1
S=[]
for d in D:
s = []
for att in d:
s.append(self.attr_names[att])
S.append(s)
if("group" in type and "causal" in type):
dict={"group":S, "causal":Scausal["causal"]}
elif("group" in type):
dict={"group":S}
else:
dict={"causal":S}
return dict
def getTestSuite(self):
inp_lst = []
print self.values
for inp,out in self.cache.iteritems():
curr=[]
i=0
while i<len(inp):
curr.append(self.values[i][inp[i]])
i+=1
inp_lst.append(curr)
return inp_lst
def ProcessCacheCausal (self,inp,X):
tr=0
fl=0
for cache_tuple in self.cache.items():
#print cache_tuple
i=0
match = True
j=0
while j<len(inp):
if(j in X):
j+=1
continue
if(not (cache_tuple[0][j] == inp[j])):
match = False
break
i+=1
j+=1
if(match==True and not cache_tuple[1]==self.cache[tuple(inp)]):
return True
return False
def merge(self, inp, attr, X):
#print inp, attr, X
i=0
while i<len(inp):
if(i in X):
inp[i] = attr[X.index(i)]
i+=1
return inp
def causalDiscrimination (self, X, confidence, epsilon):
count = 0
r = 0
numValues = 1
for x in X:
numValues *= self.num[x]
while r < self.MaxSamples :
inp = self.randomInput(self.num,[],[])
if(tuple(inp) in self.cache.keys()):
out = self.cache[tuple(inp)]
else:
out = self.SoftwareTest( inp,self.num, self.values)
self.cache[tuple(inp)] = out
found_in = self.ProcessCacheCausal(inp, X)
#Process cache to find atleast one with not out as output
if r > self.SamplingThreshold:
p = count*1.0/r
if (self.conf_zValue[int(100*confidence)] * math.sqrt(p*(1-p)*1.0/r) < epsilon):
break
if(found_in):
count+=1
r+=1
continue
i=0
found = False
while i < numValues:
#print "here",i, numValues
attr=self.decodeValues(i,self.num,X)
new_inp = self.merge(inp,attr,X)
if(tuple(new_inp) in self.cache.keys()):
tmpout = self.cache[tuple(new_inp)]
else:
tmpout = self.SoftwareTest(new_inp,self.num, self.values)
self.cache[tuple(new_inp)] = tmpout
if(not tmpout==out):
found = True
break
i+=1
if(found):
count+=1
r += 1
return p
def ProcessCache (self, X, attr):
tr=0
fl=0
for cache_tuple in self.cache.items():
#print cache_tuple
i=0
match = True
for x in X:
if(not (cache_tuple[0][x] == attr[i])):
match = False
break
i+=1
if(match==True and int(cache_tuple[1])==1):
tr+=1
elif (match):
fl+=1
return (tr,fl)
def groupDiscrimination (self, X, confidence, epsilon):
minGroup = float("inf")
maxGroup = 0
numValues = 1
for x in X:
numValues *= self.num[x]
maxPossible =1
i=0
while i<len(self.num):
if i in X:
i+=1
continue
maxPossible*=self.num[i]
i+=1
i=0
while i < numValues:
attr = self.decodeValues(i,self.num,X)
tr = 0
fal=0
r = tr+fal
count = tr
p=1
added_now=[]
while r <= self.MaxSamples:
inp = self.randomInput(self.num,X,attr)
if(tuple(inp) in self.cache.keys()):
out = self.cache[tuple(inp)]
if(r==maxPossible):
p = count*1.0/r
break
if(inp in added_now):
continue
else:
out = self.SoftwareTest(inp,self.num, self.values)
self.cache[tuple(inp)] = out
added_now.append(inp)
r+=1
if out:
count += 1
p = count*1.0/r
if r > self.SamplingThreshold:
if (self.conf_zValue[int(100*confidence)] * math.sqrt(p*(1-p)*1.0/r) < epsilon):
break
if(maxGroup < p):
maxGroup = p
if(minGroup > p):
minGroup = p
i+=1
return maxGroup - minGroup
| 8,590 | 27.44702 | 100 | py |
Themis | Themis-master/Themis1.0/software.py | import sys
sex = sys.argv[1]
race = sys.argv[3]
if(sex=="Male" and race=="Red"):
print "1"
else:
print "0"
| 116 | 12 | 32 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/wrapper.py | '''
Wrapper script to call each of the subject system depending on the input arguments
'''
import sys
import commands
'''
Usage :
argv[1] : Name of the subjecct system
argv[2] : The dataset to train the classifier
argv[3] : Type of discrimination (Group/Causal)
argv[4] : The sensitive argument to train the classfier
argv[5] : Tyep of attribute to test discrimination against
'''
subjectSystem = sys.argv[1]
data = sys.argv[2].strip()
discType = (sys.argv[3])
row = (sys.argv[4])
column = (sys.argv[5])
if discType == "group":
group=1
else:
group = 0
if data=="credit":
dataset = "Suites/Credit"
else:
dataset = "Suites/Census"
if row=="race":
r=8
else:
r=9
if column=="race":
c=8
else:
c=9
if subjectSystem=='A':
if "Credit" in dataset:
if group==1:
cmd1="cd A/"
cmd2 = "python AGroupCredit.py ../"+dataset
else:
cmd1="cd A/"
cmd2 = "python ACausalCredit.py ../"+dataset
else:
if group==1:
cmd1="cd A/"
cmd2="python AGroup.py ../"+dataset+" "+str(r)+" "+str(c)
else:
if c==8:
cmd1="cd A/"
cmd2="python ACausalRace.py ../"+dataset+" "+str(r)+" "+str(c)
else:
cmd1="cd A/"
cmd2="python ACausalGender.py ../"+dataset+" "+str(r)+" "+str(c)
print cmd1+";"+cmd2
out = commands.getoutput(cmd1+";"+cmd2)
print out
if subjectSystem=='B':
if "Credit" in dataset:
if group==1:
cmd1="cd B/BGroupCredit; make clean; make"
cmd2="./dtree ../../"+dataset
cmd3="cd ../.."
else:
cmd1="cd B/BCausalCredit; make clean; make"
cmd2="./dtree ../../"+dataset
cmd3="cd ../.."
else:
if group==1 and r==8:
cmd1="cd B/Brace; make clean; make"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
elif group==1 and r==9:
cmd1="cd B/Bgender; make clean; make"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
elif r==8:
cmd1="cd B/Bracecausal; make clean; make"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
elif r==9:
cmd1="cd B/Bgendercausal; make clean; make"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
print cmd1+";"+cmd2+";"+cmd3
out = commands.getoutput(cmd1+";"+cmd2+";"+cmd3)
print out
if subjectSystem =='C':
if "Credit" in dataset:
dataset = "Suites/CCreditdata"
cmd1="cd C/Ccredit"
if group==1:
cmd2="python Ccreditgroup.py ../../"+dataset
else:
cmd2="python Ccreditcausal.py ../../"+dataset
else:
if r==8:
dataset = "Suites/Cracedata"
if group==1:
cmd1="cd C/Crace"
cmd2="python Cgroup.py ../../"+dataset+" "+str(c)
else:
cmd1="cd C/Crace"
cmd2="python Ccausal.py ../../"+dataset+" "+str(c)
elif r==9:
dataset = "Suites/Cgenderdata"
if group==1:
cmd1="cd C/Cgender"
cmd2="python Cgroup.py ../../"+dataset+" "+str(c)
else:
cmd1="cd C/Cgender"
cmd2="python Ccausal.py ../../"+dataset+" "+str(c)
print cmd1+";"+cmd2
out = commands.getoutput(cmd1+";"+cmd2)
print out
if subjectSystem=='D':
if "Credit" in dataset:
dataset = "Suites/DCreditdata"
if group==1:
cmd1="cd D/DGroupCredit; make clean; make;"
cmd2="./dtree ../../"+dataset
cmd3="cd ../.."
else:
cmd1="cd D/DCausalCredit; make clean; make;"
cmd2="./dtree ../../"+dataset
cmd3="cd ../.."
else:
if group==1 and r==8:
dataset = "Suites/Dracedata"
cmd1="cd D/Drace; make clean; make;"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
elif group==1 and r==9:
dataset = "Suites/Dgenderdata"
cmd1="cd D/Dgender; make clean; make;"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
elif r==8:
dataset = "Suites/Dracedata"
cmd1="cd D/Dracecausal; make clean; make;"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
elif r==9:
dataset = "Suites/Dgenderdata"
cmd1="cd D/Dgendercausal; make clean; make;"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
print cmd1+";"+cmd2+";"+cmd3
out = commands.getoutput(cmd1+cmd2+";"+cmd3)
print out
if subjectSystem =='E':
if "Credit" in dataset:
cmd1="cd E/"
if group==1:
cmd2="python Ecreditgroup.py ../"+dataset
else:
cmd2="python Ecreditcausal.py ./../"+dataset
else:
if group==1:
cmd1="cd E/"
cmd2="python Egroup.py ./../"+dataset+" "+str(r)+" "+str(c)
else:
cmd1="cd E/"
cmd2="python Ecausal.py ./../"+dataset+" "+str(r)+" "+str(c)
print cmd1+";"+cmd2
out = commands.getoutput(cmd1+";"+cmd2)
print out
if subjectSystem=='F':
if "Credit" in dataset:
if group==1:
cmd1="cd F/FGroupCredit; make clean; make;"
cmd2="./dtree ../../"+dataset
cmd3="cd ../.."
else:
cmd1="cd F/FCausalCredit; make clean; make;"
cmd2="./dtree ../../"+dataset
cmd3="cd ../.."
else:
if group==1 :
cmd1="cd F/Fgroup; make clean; make;"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
else:
cmd1="cd F/Fcausal; make clean; make;"
cmd2="./dtree ../../"+dataset+" "+str(c)
cmd3="cd ../.."
print cmd1+";"+cmd2+";"+cmd3
out = commands.getoutput(cmd1+cmd2+";"+cmd3)
print out
if subjectSystem =='G':
if "Credit" in dataset:
cmd1="cd G/"
if group==1:
cmd2="python Gcreditgroup.py ./../"+dataset
else:
cmd2="python Gcreditcausal.py ./../"+dataset
else:
if group==1:
cmd1="cd G/"
cmd2="python Ggroup.py ./../"+dataset+" "+str(r)+" "+str(c)
else:
cmd1="cd G/"
cmd2="python Gcausal.py ./../"+dataset+" "+str(r)+" "+str(c)
print cmd1+";"+cmd2
out = commands.getoutput(cmd1+";"+cmd2)
print out
if subjectSystem =='H':
if "Credit" in dataset:
cmd1="cd H/"
if group==1:
cmd2="python Hcreditgroup.py ./../"+dataset
else:
cmd2="python Hcreditcausal.py ./../"+dataset
else:
if group==1:
cmd1="cd H/"
cmd2="python Hgroup.py ./../"+dataset+" "+str(r)+" "+str(c)
else:
cmd1="cd H/"
cmd2="python Hcausal.py ./../"+dataset+" "+str(r)+" "+str(c)
print cmd1+";"+cmd2
out = commands.getoutput(cmd1+";"+cmd2)
print out
| 5,896 | 23.26749 | 82 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for race
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==5 and type==0):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
if(rowNum==2 and type==1):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 1,106 | 17.762712 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/Table1GroupScore.py | '''
This script calculates the Group discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
if(type==0):
pos = [0,0,0,0,0]
neg = [0,0,0,0,0]
else:
pos =[0,0]
neg =[0,0]
for line in f:
line = line.strip()
line =line.split(',')
if(float(line[-1])>0):
if(type==1):
if(int(line[8])<len(pos)):
pos[int(line[8])]+=1
else:
pos[int(line[7])]+=1
if(float(line[-1])<=0):
if(type==1):
if(int(line[8])<len(neg)):
neg[int(line[8])]+=1
else:
neg[int(line[7])]+=1
i =0
max = 0
min = 1
while i<len(pos):
ratio = pos[i]*1.0/(pos[i]+neg[i])
if(ratio >= max):
max = ratio
if(ratio < min):
min = ratio
i+=1
val = 100*(max-min)
if(val < 0.01):
val=0.01
print("%.2f" %val)
#print 100*(max-min)
| 1,077 | 17.912281 | 107 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/Atestcredit.py | '''
Test the Subject System A on Credit dataset to generate the output for the input given as argv arguments.
All the inputs are assumed to be space separated.
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import itertools
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
sens_arg =9# int(sys.argv[2])
minInp = 50000
max_inp=5
trainfile = sys.argv[1]
random.seed(12)
#fixed seed to get the same test suite each time
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {};
sens = []
option =1
with open(trainfile, "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float);
Y = np.array(Y, dtype = float);
sensitive[name] = np.array(sens, dtype = float);
loss_function = lf._logistic_loss;
sep_constraint = 0;
sensitive_attrs = [name];
sensitive_attrs_to_cov_thresh = {name:cov};
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma);
i=2
inp=[]
while len(inp)<20:
inp.append(int(sys.argv[i]))
i+=1
print np.sign(np.dot(w, inp))
| 1,801 | 20.97561 | 128 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/ACausalGender.py | '''
Causal discrimination testing for Subject System A
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
8 means race and 9 means gender
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
minInp=50000
#set prinsuite to 0 to not print the test suite
printout = 1
sens_arg = int(sys.argv[2])
fixval=9
#Output file for the test suite
outputfile = "../Suites/freshAcausal"+str(sens_arg)+str(fixval)
random.seed(11)
#print sens_arg
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {}
sens = []
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float)
Y = np.array(Y, dtype = float)
sensitive[name] = np.array(sens, dtype = float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {name:cov}
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
option =4
f = open(outputfile,"w")
already = "../Suites/A"+str(sens_arg)+str(fixval)+"causal"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
inpstr=''
for a in line:
inp.append(int(a))
inpstr+=a+' '
if option ==4 :
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
else:
out = np.sign(np.dot(w, inp))
if printout==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den,num,den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
out = np.sign(np.dot(w, inp))
str1=""
inpstr=""
for x in inp:
str1 += str(x)+","
inpstr +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
else:
out = np.sign(np.dot(w, inp))
if printout:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg);
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
def findsubsets(S,m):
return set(itertools.combinations(S, m))
#fixed_atr = 9
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0]
#fixed[fixed_str-1]=1
check_ratio(fixed)
print "output is in ",outputfile
f.close()
| 6,388 | 25.620833 | 127 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/ACausalRace.py | '''
Group discrimination testing for Subject System A
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
argv[3] : Argument to test discriminationa gainst
For argv[2] and argv[3] : 8 means race and 9 means gender
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
random.seed(1996)
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
#set prinsuite to 0 to not print the test suite
printout=1
sens_arg = int(sys.argv[2])
fixval=8
outputfile = "../Suites/freshAcausal"+str(sens_arg)+str(fixval)
#print sens_arg
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {}
sens = []
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float)
Y = np.array(Y, dtype = float)
sensitive[name] = np.array(sens, dtype = float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {name:cov}
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
option =4
f = open(outputfile,"w")
already = "../Suites/A"+str(sens_arg)+str(fixval)+"causal"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed):
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
inpstr=''
for a in line:
inp.append(int(a))
inpstr+=a+' '
if option ==4 :
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
else:
out = np.sign(np.dot(w, inp))
if printout==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, 10000) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
#out = np.sign(np.dot(w, inp))
str1=""
inpstr=""
for x in inp:
str1 += str(x)+","
inpstr+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
else:
out = np.sign(np.dot(w, inp))
if printout:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
#print pos*100.0/(neg+pos)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
#fixed_atr = 9
fixed = [0,0,0,0,0,0,0,1,0,0,0,0,0]
#fixed[fixed_str-1]=1
check_ratio(fixed)
f.close()
print "output is in ",outputfile
| 6,473 | 25.752066 | 127 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/ACausalCredit.py | '''
Causal discrimination testing for Subject System A on Credit dataset
Inputs :
argv[1] : Train file
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
random.seed(22)
max_inp = 4000
minInp=4000
printout=1
sens_arg = 9
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
fixval=9
outputfile = "../Suites/freshAcausalcredit"+str(sens_arg)+str(fixval)
option=4
sensitive = {}
sens = []
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float)
Y = np.array(Y, dtype = float)
sensitive[name] = np.array(sens, dtype = float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {name:cov}
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
already= "../Suites/Acausalcredit"
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
f = open(outputfile,"w")
def check_ratio(fixed):
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
while i<2:
line[8] = str(i)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
inpstr=''
for a in line:
inp.append(int(a))
inpstr+=a+' '
if option ==4 :
out = commands.getoutput("python Atestcredit.py "+sys.argv[1]+" "+inpstr)
else:
out = np.sign(np.dot(w, inp))
if printout==1:
f.write(strinp+","+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
den+=1
print "Score is ",num*100.0/den
return
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, 10000) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
num_test+=1
out = np.sign(np.dot(w, inp))
str1=""
inpstr=""
for x in inp:
str1 += str(x)+","
inpstr += str(x)+" "
if option ==1 :
out = commands.getoutput("python Atestcredit.py "+sys.argv[1]+" "+inpstr)
else:
out = np.sign(np.dot(w, inp))
if printout==1:
f.write(str1+" "+str(out)+"\n")
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
def findsubsets(S,m):
return set(itertools.combinations(S, m))
#fixed_atr = 9
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
#fixed[fixed_str-1]=1
check_ratio(fixed)
f.close()
print "output is in ",outputfile
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
| 6,127 | 25.643478 | 127 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/AGroupCredit.py | '''
Group discrimination testing for Subject System A for Credit dataset
Inputs :
argv[1] : Train file
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import itertools
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 15000
#Maximum number of inputs to test against
max_inp=15000
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Training file
trainfile = sys.argv[1]
random.seed(2)
#fixed seed to get the same test suite each time
sens_arg=9
fixval=9
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {};
sens = []
outputfile = "../Suites/freshAcredit"+str(sens_arg)+str(fixval)+".txt"
option=4
already = "../Suites/Acredit"
with open(trainfile, "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float);
Y = np.array(Y, dtype = float);
sensitive[name] = np.array(sens, dtype = float);
loss_function = lf._logistic_loss;
sep_constraint = 0;
sensitive_attrs = [name];
sensitive_attrs_to_cov_thresh = {name:cov};
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma);
#print "software has been trained"
f = open(already,"r")
done = {}
for line in f:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
f = open(outputfile,"w")
def check_ratio(fixed):
if option==3 or option==4:
pos0=0
pos1=0
neg0=0
neg1=0
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr +=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = np.sign(np.dot(w, inp))
else:
out = commands.getoutput("python Atestcredit.py "+sys.argv[1]+" "+inpstr)
if(out>0):
if((inp[8]) ==1):
pos1+=1
else:
pos0+=1
else:
if((inp[8]) ==1):
neg1+=1
else:
neg0+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
print "score is ",100*(pos1*1.0/(pos1+neg1) - pos0*1.0/(pos0+neg0))
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
inpstr=""
str1=""
for x in inp:
str1 += str(x)+","
inpstr+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Atestcredit.py "+sys.argv[1]+" "+inpstr)
elif option==2:
out = np.sign(np.dot(w, inp))
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
# print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
check_ratio(fixed)
f.close()
print "output is in ",outputfile
| 5,298 | 23.307339 | 128 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/Atestcensus.py | '''
Test the Subject System A on Census dataset to generate the output for the input given as argv arguments.
All the inputs are assumed to be space separated.
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import itertools
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
sens_arg = int(sys.argv[2])
trainfile = sys.argv[1]
random.seed(12)
#fixed seed to get the same test suite each time
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {};
sens = []
option =1
with open(trainfile, "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float);
Y = np.array(Y, dtype = float);
sensitive[name] = np.array(sens, dtype = float);
loss_function = lf._logistic_loss;
sep_constraint = 0;
sensitive_attrs = [name];
sensitive_attrs_to_cov_thresh = {name:cov};
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma);
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print np.sign(np.dot(w, inp))
| 1,768 | 21.679487 | 128 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/AGroup.py | '''
Group discrimination testing for Subject System A
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
argv[3] : Argument to test discriminationa gainst
For argv[2] and argv[3] : 8 means race and 9 means gender
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import itertools
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
sens_arg = int(sys.argv[2])
fixval = int(sys.argv[3])
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
#Maximum number of inputs to test against
max_inp=50000
#Training file
trainfile = sys.argv[1]
#Output file for the test suite
outputfile = "../Suites/freshA"+str(sens_arg)+str(fixval)
random.seed(12)
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {};
sens = []
option=4
with open(trainfile, "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float);
Y = np.array(Y, dtype = float);
sensitive[name] = np.array(sens, dtype = float);
loss_function = lf._logistic_loss;
sep_constraint = 0;
sensitive_attrs = [name];
sensitive_attrs_to_cov_thresh = {name:cov};
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma);
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
f = open(outputfile,"w")
already = "../Suites/A"+str(sens_arg)+str(fixval)
def check_ratio(fixed):
fix_atr = []
if option==3 or option==4:
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = np.sign(np.dot(w, inp))
else:
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = np.sign(np.dot(w, inp))
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
#print (max-min)*100
def findsubsets(S,m):
return set(itertools.combinations(S, m))
if(sens_arg==9 and fixval==8):
fixed = [0,0,0,0,0,0,0,1,0,0,0,0,0]
elif (sens_arg==9 and fixval==9):
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0]
elif (sens_arg==8 and fixval==9):
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0]
else:
fixed = [0,0,0,0,0,0,0,1,0,0,0,0,0]
check_ratio(fixed)
print "output written in ", outputfile
f.close()
| 6,049 | 24.854701 | 128 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/fair_classification/loss_funcs.py | import sys
import os
import numpy as np
import scipy.special
from collections import defaultdict
import traceback
from copy import deepcopy
def _hinge_loss(w, X, y):
yz = y * np.dot(X,w) # y * (x.w)
yz = np.maximum(np.zeros_like(yz), (1-yz)) # hinge function
return sum(yz)
def _logistic_loss(w, X, y, return_arr=None):
"""Computes the logistic loss.
This function is used from scikit-learn source code
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
if return_arr == True:
out = -(log_logistic(yz))
else:
out = -np.sum(log_logistic(yz))
return out
def _logistic_loss_l2_reg(w, X, y, lam=None):
if lam is None:
lam = 1.0
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
logistic_loss = -np.sum(log_logistic(yz))
l2_reg = (float(lam)/2.0) * np.sum([elem*elem for elem in w])
out = logistic_loss + l2_reg
return out
def log_logistic(X):
""" This function is used from scikit-learn source code. Source link below """
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
Source code at:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
if X.ndim > 1: raise Exception("Array of samples cannot be more than 1-D!")
out = np.empty_like(X) # same dimensions and data types
idx = X>0
out[idx] = -np.log(1.0 + np.exp(-X[idx]))
out[~idx] = X[~idx] - np.log(1.0 + np.exp(X[~idx]))
return out
| 2,268 | 22.884211 | 82 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/A/fair_classification/utils.py | import numpy as np
from random import seed, shuffle
import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma=None):
#print x[0],y[0],x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: #its not the reverse problem, just train w with cross cov constraints
f_args=(x, y)
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = f_args,
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = constraints
)
else:
# train on just the loss function
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = (x, y),
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = []
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w,x,y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w,ind,old_loss,x,y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args':(x[i], y[i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people, 'args':(i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args':(x,y,unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun = cross_cov_abs_optm_func,
x0 = old_w,
args = (x, x_control[sensitive_attrs[0]]),
method = 'SLSQP',
options = {"maxiter":100000},
constraints = constraints
)
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh_arr, gamma=None):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0,num_folds):
perm = range(0,n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train, x_test, y_test, None, None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train, sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train, sensitive_attrs)
output_folds.put([fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold, args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x])) for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key = lambda x : x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
print "Accuracy: %0.2f" % (np.mean(acc_arr))
print "Protected/non-protected in +ve class: %0.0f%% / %0.0f%%" % (prot_pos, non_prot_pos)
print "P-rule achieved: %0.0f%%" % (p_rule)
print "Covariance between sensitive feature and decision from distance boundary : %0.3f" % (np.mean([v[s_attr_name] for v in cov_dict_arr]))
print
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
print
print "Total data points: %d" % (len(x_control))
print "# non-protected examples: %d" % (non_prot_all)
print "# protected examples: %d" % (prot_all)
print "Non-protected in positive class: %d (%0.0f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all)
print "Protected in positive class: %d (%0.0f%%)" % (prot_pos, prot_pos * 100.0 / prot_all)
print "P-rule is: %0.0f%%" % ( p_rule )
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m,n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis = 1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
print str(type(k))
print "************* ERROR: Input arr does not have integer types"
return None
in_arr = np.array(in_arr, dtype=int)
assert(len(in_arr.shape)==1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0,len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0,len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
print "Either the model (w) or the predicted labels should be None"
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert(x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert(x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr ) / float(len(x_control))
ans = thresh - abs(cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
if verbose is True:
print "Covariance is", cov
print "Diff is:", ans
print
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:,ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert(len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1,v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, attr_arr_transformed,thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
#print attr, attr_name, sensitive_attrs_to_cov_thresh[attr]
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:,ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, t ,thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k,v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k,v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k,v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0-it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0,1,5,10,20,50,100,500,1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint, sensitive_attrs, [{} for i in range(0,num_folds)], 0)
for c in cov_range:
print "LOG: testing for multiplicative factor: %0.2f" % c
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied, c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k,v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2,1,1)
plt.plot(cov_range, positive_per_category[0], "-o" , color="green", label = "Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label = "Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2,1,2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2
| 27,188 | 41.350467 | 357 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/H/Htest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import itertools
import sys
random.seed(1)
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
out = clf.predict([inp])[0]
print out | 747 | 14.914894 | 54 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/H/Hcreditgroup.py | '''
Group discrimination testing for Subject System H for credit dataset
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 30000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=30000
#set printsuite to 0 to not print the test suite
printsuite = 1
random.seed(1)
outputfile = "../Suites/freshHcredit9.txt"
option =4
f = open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
already = "../Suites/Hcredit"
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
def check_ratio(fixed,clf):
if option==3 or option==4:
fixval=9
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr =''
while i<len(inp):
inpstr +=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = int(commands.getoutput("python Hcredittest.py "+sys.argv[1]+" "+inpstr))
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Hcredittest.py "+sys.argv[1]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
num_inp+=1
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
#print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
check_ratio(fixed,clf)
f.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,206 | 25.840206 | 94 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/H/Hcreditcausal.py | '''
Causal discrimination testing for Subject System H for credit dataset
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 30000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=30000
#set printsuite to 0 to not print the test suite
printsuite = 1
random.seed(1)
num_test=0
outputfile = "../Suites/freshHcreditcausal9.txt"
option =4
f = open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
already = "../Suites/Hcausalcredit"
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=9
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Hcredittest.py "+sys.argv[1]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if out>0:
out = 1
else:
out = 0
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
out = clf.predict([inp])[0]
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Hcredittest.py "+sys.argv[1]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 6,103 | 25.889868 | 102 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/H/Hcredittest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import itertools
import sys
random.seed(1)
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
i=2
inp=[]
while len(inp)<20:
inp.append(int(sys.argv[i]))
i+=1
out = clf.predict([inp])[0]
print out | 747 | 14.914894 | 54 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/H/Hgroup.py | '''
Group discrimination testing for Subject System H
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 30000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=30000
#set printsuite to 0 to not print the test suite
printsuite = 1
random.seed(1)
outputfile = "../Suites/freshH"+sys.argv[2]+".txt"
option =4
f = open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
already = "../Suites/H"+sys.argv[2]
def check_ratio(fixed,clf):
if option==3 or option==4:
fixval=int(sys.argv[2])
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr = ''
while i<len(inp):
inpstr +=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = int(commands.getoutput("python Htest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Htest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
num_inp+=1
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
break
#print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
sense_arg = int(sys.argv[2])
fixed[sense_arg-1]=1
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,202 | 25.545918 | 104 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/H/Hcausal.py | '''
Causal discrimination testing for Subject System H
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import commands
import itertools
import sys
#Maximum number of inputs to test against
maxinp = 30000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=30000
#set printsuite to 0 to not print the test suite
printsuite = 1
random.seed(1)
outputfile = "../Suites/freshHcausal"+sys.argv[2]+".txt"
option =4
f = open(outputfile,"w")
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
already = "../Suites/H"+sys.argv[2]+"causal"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=int(sys.argv[2])
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
inpstr=''
for a in line:
inpstr +=a+' '
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Htest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if out>0:
out = 1
else:
out = 0
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, maxinp) :
#print inp_fix
if(num_inp>=maxinp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Htest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0]
check_ratio(fixed,clf)
f.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 6,006 | 26.180995 | 112 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Cgender/Cgroup.py | '''
Group discrimination testing for Subject System C
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite=1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp =50000
random.seed(13)
#Output file for the test suite
outputfile = "../../Suites/freshC9"+sys.argv[2]+".txt"
option=4
fout = open(outputfile,"w")
sens_arg = int(sys.argv[2])
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../../Suites/C9"+sys.argv[2]
num_atr=[ 10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
if option==3 or option==4:
fixval=int(sys.argv[2])
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = int(commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
fout.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
num_test=0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
str1=""
strinp = ""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str1+" "+str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
print 100*(max-min)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[sens_arg-1]=1
check_ratio(fixed,clf)
fout.close()
print "output is in",outputfile | 5,039 | 25.526316 | 104 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Cgender/Ctest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
map={}
option =1
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 720 | 16.166667 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Cgender/Ccausal.py | '''
Causal discrimination testing for Subject System C
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
random.seed(2)
#Output file for the test suite
outputfile = "../../Suites/freshCcausal9"+sys.argv[2]+".txt"
option=4
num_test=0
fout = open(outputfile,"w")
#set prinsuite to 0 to not print the test suite
printsuite=1
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../../Suites/C9"+sys.argv[2]+"causal"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=int(sys.argv[2])
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr =' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, 20000) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
num_test+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str1+" "+str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = int(sys.argv[2])
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
fout.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile
| 5,925 | 25.936364 | 112 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Ccredit/Ccreditcausal.py | '''
Causal discrimination testing for Subject System C(Credit dataset)
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
argv[3] : Argument to test discriminationa gainst
For argv[2] and argv[3] : 8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
#Output file for the test suite
outputfile = "../../Suites/freshCcreditcausal99.txt"
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../../Suites/Ccausalcredit"
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
fout = open(outputfile,"w")
option =4
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=9
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr=' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Ctestcredit.py "+sys.argv[1]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print inp_fix
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = int(tmp_val%num_atr[fix_atr[i]])
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
out=""
str1=""
strinp=""
for ax in inp:
str1 += str(ax)+","
strinp+=str(ax)+" "
fout.write(str(ax)+",")
if option ==1 :
out = commands.getoutput("python Ctestcredit.py "+sys.argv[1]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
check_ratio(fixed,clf)
fout.close()
print "output is in",outputfile | 5,841 | 26.556604 | 102 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Ccredit/Ctestcredit.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
max_inp = 50000
printsuite = 1
minInp = 50000
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
option =1
i=2
inp=[]
while len(inp)<20:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 821 | 16.869565 | 54 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Ccredit/Ccreditgroup.py | '''
Group discrimination testing for Subject System C(Credit dataset)
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
argv[3] : Argument to test discriminationa gainst
For argv[2] and argv[3] : 8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite=1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp =50000
random.seed(1998)
trainfile = sys.argv[1]
#Output file for the test suite
outputfile = "../../Suites/freshCcredit99.txt"
option = 4
f = open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
already = "../../Suites/Ccredit"
def check_ratio(fixed,clf):
if option==3 or option==4:
fixval=9
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = int(commands.getoutput("python Ctestcredit.py "+sys.argv[1]+" "+inpstr))
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
num_test=0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
num_inp+=1
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctestcredit.py "+sys.argv[1]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
fixed[9-1]=1
check_ratio(fixed,clf)
f.close()
print "output is in",outputfile | 5,005 | 25.209424 | 94 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Crace/Cgroup.py | '''
Group discrimination testing for Subject System C
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite=1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp =50000
random.seed(13)
#Output file for the test suite
outputfile = "../../Suites/freshC8"+sys.argv[2]+".txt"
option=4
fout = open(outputfile,"w")
sens_arg = int(sys.argv[2])
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../../Suites/C8"+sys.argv[2]
num_atr=[ 10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test=0
fix_atr = []
if option==3 or option==4:
fixval=int(sys.argv[2])
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = int(commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
fout.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
strinp=""
str1=""
for x in inp:
str1 += str(x)+","
strinp+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str1+" "+str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
#print pos,neg
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[sens_arg-1]=1
check_ratio(fixed,clf)
fout.close()
print "output is in",outputfile | 5,027 | 25.1875 | 104 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Crace/Ctest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
map={}
option =1
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 720 | 16.166667 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/C/Crace/Ccausal.py | '''
Causal discrimination testing for Subject System C
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=50000
random.seed(2)
#Output file for the test suite
outputfile = "../../Suites/freshCcausal8"+sys.argv[2]+".txt"
fout = open(outputfile,"w")
option=4
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
already = "../../Suites/C8"+sys.argv[2]+"causal"
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=int(sys.argv[2])
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr=' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
num_test+=1
out = clf.predict([inp])[0]
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str1+" "+str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
#print pos,neg
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = int(sys.argv[2])
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
fout.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,991 | 25.990991 | 112 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/E/Ecreditgroup.py | '''
Group discrimination testing for Subject System E for credit dataset
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set printsuite to 0 to not print the test suite
printsuite=1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp =50000
random.seed(13)
outputfile = "../Suites/freshEcredit9.txt"
f = open(outputfile,"w")
option =4
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../Suites/Ecredit"
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
def check_ratio(fixed,clf):
if option==3 or option==4:
fixval=9
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = int(commands.getoutput("python Etestcredit.py "+sys.argv[1]+" "+inpstr))
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
num_test=0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
out = clf.predict([inp])[0]
num_inp+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
out = commands.getoutput("python Etestcredit.py "+sys.argv[1]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
fixed[9-1]=1
check_ratio(fixed,clf)
f.close()
print "output is in",outputfile | 4,915 | 25.717391 | 94 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/E/Ecausal.py | '''
Causal discrimination testing for Subject System E
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=50000
#set prinsuite to 0 to not print the test suite
printsuite = 1
random.seed(2)
num_test=0
outputfile = "../Suites/freshEcausal9"+sys.argv[2]
f = open(outputfile,"w")
option =4
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
already = "../Suites/E"+sys.argv[2]+"causal"
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=int(sys.argv[2])
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Etest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, 10000) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
out = clf.predict([inp])[0]
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Etest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
#print pos,neg
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = int(sys.argv[2])
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,919 | 25.428571 | 112 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/E/Ecreditcausal.py | '''
Causal discrimination testing for Subject System E for causla dataset
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=50000
#set printsuite to 0 to not print the test suite
printsuite = 1
random.seed(13)
num_test=0
outputfile = "../Suites/freshEcreditcausal9.txt"
f = open(outputfile,"w")
option =4
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../Suites/Ecausalcredit"
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=9
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Etestcredit.py "+sys.argv[1]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
out = commands.getoutput("python Etestcredit.py "+sys.argv[1]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
#print pos,neg
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,911 | 25.995434 | 102 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/E/Etestcredit.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import sys
max_inp = 50000
printsuite=0
minInp =5
random.seed(2)
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
i=2
inp=[]
while len(inp)<20:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 692 | 15.902439 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/E/Egroup.py | '''
Group discrimination testing for Subject System E
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set printsuite to 0 to not print the test suite
printsuite=1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp =50000
random.seed(2)
sens_arg = int(sys.argv[2])
outputfile = "../Suites/freshE"+sys.argv[2]+".txt"
option=4
f = open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[ 10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
already = "../Suites/E"+sys.argv[2]
def check_ratio(fixed,clf):
num_test=0
if option==3 or option==4:
fixval=int(sys.argv[2])
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = int(commands.getoutput("python Etest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
out = commands.getoutput("python Etest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
#print pos,neg
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[sens_arg-1]=1
check_ratio(fixed,clf)
print "output is in",outputfile | 4,900 | 25.491892 | 104 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/E/Etest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import sys
max_inp = 50000
printsuite=0
minInp =5
random.seed(2)
sens_arg = int(sys.argv[2])
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 720 | 16.166667 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/G/Gtest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import sys
max_inp = 30000
printout = 0
minInp=30000
random.seed(1997)
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
out = clf.predict([inp])[0]
if out > 0:
print "1"
else:
print "0" | 703 | 16.170732 | 37 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/G/Gcreditcausal.py | '''
Causal discrimination testing for Subject System G for credit dataset
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 30000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=30000
#set printsuite to 0 to not print the test suite
printsuite = 1
random.seed(12)
num_test=0
outputfile = "../Suites/freshGcreditcausal9.txt"
f = open(outputfile,"w")
option=4
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
already = "../Suites/Gcausalcredit"
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=9
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
inpstr = ' '.join(line)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Gcredittest.py "+sys.argv[1]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if out>0:
out = 1
else:
out = 0
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x) +" "
#print inp,num_inp,max_inp
num_test+=1
if option ==1 :
#print "python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp
out = commands.getoutput("python Gcredittest.py "+sys.argv[1]+" "+strinp)
out = int(out)
else:
out = clf.predict([inp])[0]
if out>0:
out=1
else:
out=0
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 6,129 | 26.244444 | 102 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/G/Gcausal.py | '''
Causal discrimination testing for Subject System G
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 30000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=30000
#set printsuite to 0 to not print the test suite
printsuite=1
random.seed(12)
outputfile = "../Suites/freshGcausal"+sys.argv[2]+".txt"
num_test=0
option =4
f = open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
already = "../Suites/G"+sys.argv[2]+"causal"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=int(sys.argv[2])
if fixval ==8:
numval=5
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
inpstr=' '.join(line)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if out>0:
out = 1
else:
out = 0
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
#print "python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp
out = commands.getoutput("python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
out = int(out)
else:
out = clf.predict([inp])[0]
if out>0:
out=1
else:
out=0
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = int(sys.argv[2])
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 6,227 | 25.615385 | 112 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/G/Gcreditgroup.py | '''
Group discrimination testing for Subject System G for Credit dataset
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 30000
#set printsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp=30000
random.seed(1997)
trainfile = sys.argv[1]
outputfile = "../Suites/freshGcredit9.txt"
option=4
f=open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
already = "../Suites/Gcredit"
def check_ratio(fixed,clf):
if option==3 or option==4:
fixval=9
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr +=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = commands.getoutput("python Gcredittest.py "+sys.argv[1]+" "+inpstr)
out = int(out)
if out>0:
out=1
else:
out=0
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
out = commands.getoutput("python Gcredittest.py "+sys.argv[1]+" "+strinp)
out = int(out)
else:
out = clf.predict([inp])[0]
if out>0:
out=1
else:
out=0
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
#if(','.join(str(inp)) in map.keys()):
# continue
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
#print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
check_ratio(fixed,clf)
f.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,476 | 25.980296 | 89 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/G/Gcredittest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import sys
max_inp = 30000
printout = 0
minInp=3
random.seed(1997)
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
i=2
inp=[]
while len(inp)<20:
inp.append(int(sys.argv[i]))
i+=1
out = clf.predict([inp])[0]
if out > 0:
out=1
else:
out=0
print out | 701 | 15.714286 | 37 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure1/G/Ggroup.py | '''
Group discrimination testing for Subject System G
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 30000
#Minimum number of inputs tos tart applying the confidence check optimization
minInp=30000
#set printsuite to 0 to not print the test suite
printsuite = 1
sense_arg = int(sys.argv[2])
random.seed(15)
outputfile = "../Suites/freshG"+str(sense_arg)+".txt"
f = open(outputfile,"w")
option=4
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
already = "../Suites/G"+sys.argv[2]
def check_ratio(fixed,clf):
fix_atr = []
if option==3 or option==4:
fixval=int(sys.argv[2])
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inpstr=''
inp = inp.split(',')
inp=inp[:-1]
i=0
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = clf.predict([inp])[0]
else:
out = commands.getoutput("python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
out = int(out)
if out>0:
out=1
else:
out=0
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
strinp=""
str1=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
#print "python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp
out = commands.getoutput("python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
out = int(out)
else:
out = clf.predict([inp])[0]
if out>0:
out=1
else:
out=0
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
#if(','.join(str(inp)) in map.keys()):
# continue
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
#print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[sense_arg-1] = 1
check_ratio(fixed,clf)
f.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,525 | 25.825243 | 99 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==5 and type==0):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
if(rowNum==2 and type==1):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 1,111 | 17.533333 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/D/Dgenderm/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==7):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 934 | 16.641509 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/D/Dgenderc/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==40):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/D/Dracemr/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==35):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/D/Dracem/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==7):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 934 | 16.641509 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/A/ACausal.py | '''
Group discrimination testing for Subject System A
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
argv[3] : Argument to test discriminationa gainst
For argv[2] and argv[3] : 8 means race and 9 means gender
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
random.seed(1996)
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
#set prinsuite to 0 to not print the test suite
printout=1
sens_arg = 8
fixval=8
outputfile = "../Suites/freshAcausal"+str(sens_arg)+str(fixval)
#print sens_arg
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {}
sens = []
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float)
Y = np.array(Y, dtype = float)
sensitive[name] = np.array(sens, dtype = float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {name:cov}
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
option =4
f = open(outputfile,"w")
already = "../Suites/Acausal"+str(sens_arg)+str(fixval)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed):
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
outval = float(line[-1])
line = line[:-1]
i=0
pos=0
neg=0
if fixval ==8:
numval=10
else:
numval=2
while i<numval:
#print i,den
line[7] = str(int(i%5))
line[8] = str(int(i/5))
#line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = float(commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+str(sens_arg)+" "+inpstr))
else:
out = np.sign(np.dot(w, inp))
if printout==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print i3
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = int(tmp_val%num_atr[fix_atr[i]])
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
#out = np.sign(np.dot(w, inp))
str1=""
inpstr=""
for x in inp:
str1 += str(x)+","
inpstr+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
else:
out = np.sign(np.dot(w, inp))
if printout:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
print pos*100.0/(neg+pos)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
#fixed_atr = 9
fixed = [0,0,0,0,0,0,0,1,1,0,0,0,0]
#fixed[fixed_str-1]=1
check_ratio(fixed)
f.close()
print "output is in ",outputfile
| 6,556 | 25.872951 | 127 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/A/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==10):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/A/Atestcensus.py | '''
Test the Subject System A on Census dataset to generate the output for the input given as argv arguments.
All the inputs are assumed to be space separated.
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import itertools
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
sens_arg = int(sys.argv[2])
trainfile = sys.argv[1]
random.seed(12)
#fixed seed to get the same test suite each time
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {};
sens = []
option =1
with open(trainfile, "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float);
Y = np.array(Y, dtype = float);
sensitive[name] = np.array(sens, dtype = float);
loss_function = lf._logistic_loss;
sep_constraint = 0;
sensitive_attrs = [name];
sensitive_attrs_to_cov_thresh = {name:cov};
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma);
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print np.sign(np.dot(w, inp))
| 1,768 | 21.679487 | 128 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/A/fair_classification/loss_funcs.py | import sys
import os
import numpy as np
import scipy.special
from collections import defaultdict
import traceback
from copy import deepcopy
def _hinge_loss(w, X, y):
yz = y * np.dot(X,w) # y * (x.w)
yz = np.maximum(np.zeros_like(yz), (1-yz)) # hinge function
return sum(yz)
def _logistic_loss(w, X, y, return_arr=None):
"""Computes the logistic loss.
This function is used from scikit-learn source code
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
if return_arr == True:
out = -(log_logistic(yz))
else:
out = -np.sum(log_logistic(yz))
return out
def _logistic_loss_l2_reg(w, X, y, lam=None):
if lam is None:
lam = 1.0
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
logistic_loss = -np.sum(log_logistic(yz))
l2_reg = (float(lam)/2.0) * np.sum([elem*elem for elem in w])
out = logistic_loss + l2_reg
return out
def log_logistic(X):
""" This function is used from scikit-learn source code. Source link below """
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
Source code at:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
if X.ndim > 1: raise Exception("Array of samples cannot be more than 1-D!")
out = np.empty_like(X) # same dimensions and data types
idx = X>0
out[idx] = -np.log(1.0 + np.exp(-X[idx]))
out[~idx] = X[~idx] - np.log(1.0 + np.exp(X[~idx]))
return out
| 2,268 | 22.884211 | 82 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/A/fair_classification/utils.py | import numpy as np
from random import seed, shuffle
import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma=None):
#print x[0],y[0],x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: #its not the reverse problem, just train w with cross cov constraints
f_args=(x, y)
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = f_args,
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = constraints
)
else:
# train on just the loss function
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = (x, y),
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = []
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w,x,y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w,ind,old_loss,x,y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args':(x[i], y[i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people, 'args':(i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args':(x,y,unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun = cross_cov_abs_optm_func,
x0 = old_w,
args = (x, x_control[sensitive_attrs[0]]),
method = 'SLSQP',
options = {"maxiter":100000},
constraints = constraints
)
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh_arr, gamma=None):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0,num_folds):
perm = range(0,n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train, x_test, y_test, None, None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train, sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train, sensitive_attrs)
output_folds.put([fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold, args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x])) for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key = lambda x : x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
print "Accuracy: %0.2f" % (np.mean(acc_arr))
print "Protected/non-protected in +ve class: %0.0f%% / %0.0f%%" % (prot_pos, non_prot_pos)
print "P-rule achieved: %0.0f%%" % (p_rule)
print "Covariance between sensitive feature and decision from distance boundary : %0.3f" % (np.mean([v[s_attr_name] for v in cov_dict_arr]))
print
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
print
print "Total data points: %d" % (len(x_control))
print "# non-protected examples: %d" % (non_prot_all)
print "# protected examples: %d" % (prot_all)
print "Non-protected in positive class: %d (%0.0f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all)
print "Protected in positive class: %d (%0.0f%%)" % (prot_pos, prot_pos * 100.0 / prot_all)
print "P-rule is: %0.0f%%" % ( p_rule )
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m,n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis = 1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
print str(type(k))
print "************* ERROR: Input arr does not have integer types"
return None
in_arr = np.array(in_arr, dtype=int)
assert(len(in_arr.shape)==1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0,len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0,len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
print "Either the model (w) or the predicted labels should be None"
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert(x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert(x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr ) / float(len(x_control))
ans = thresh - abs(cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
if verbose is True:
print "Covariance is", cov
print "Diff is:", ans
print
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:,ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert(len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1,v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, attr_arr_transformed,thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
#print attr, attr_name, sensitive_attrs_to_cov_thresh[attr]
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:,ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, t ,thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k,v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k,v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k,v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0-it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0,1,5,10,20,50,100,500,1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint, sensitive_attrs, [{} for i in range(0,num_folds)], 0)
for c in cov_range:
print "LOG: testing for multiplicative factor: %0.2f" % c
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied, c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k,v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2,1,1)
plt.plot(cov_range, positive_per_category[0], "-o" , color="green", label = "Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label = "Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2,1,2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2
| 27,188 | 41.350467 | 357 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Cgender/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==7):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 934 | 16.641509 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Cgender/Ctest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
map={}
option =1
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 720 | 16.166667 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Cgender/Ccausalm.py | '''
Causal discrimination testing for Subject System C
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
random.seed(2)
#Output file for the test suite
outputfile = "../../Suites/freshCcausalmarital.txt"
option=4
num_test=0
fout = open(outputfile,"w")
#set prinsuite to 0 to not print the test suite
printsuite=1
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../../Suites/Ccausalmarital"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=5
if fixval ==5:
numval=7
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
inpstr = ' '.join(line)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = float(commands.getoutput("python Ctest.py "+sys.argv[1]+" 9 "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print i3
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
num_test+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str1+" "+str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
#print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = 5#int(sys.argv[2])
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
fout.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,883 | 25.86758 | 100 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Ccredit/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==75):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Ccredit/Ccreditcausal.py | '''
Causal discrimination testing for Subject System C(Credit dataset)
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
argv[3] : Argument to test discriminationa gainst
For argv[2] and argv[3] : 8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
#Output file for the test suite
outputfile = "../../Suites/freshCcreditcausal99.txt"
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../../Suites/Ccreditcausal99.txt"
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
fout = open(outputfile,"w")
option =4
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=13
if fixval ==13:
numval=75
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
inpstr = ' '.join(line)
strinp = ','.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = float(commands.getoutput("python Ctestcredit.py "+sys.argv[1]+" "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print inp_fix
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = int(tmp_val%num_atr[fix_atr[i]])
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
out=""
str1=""
strinp=""
for ax in inp:
str1 += str(ax)+","
strinp+=str(ax)+" "
fout.write(str(ax)+",")
if option ==1 :
out = commands.getoutput("python Ctestcredit.py "+sys.argv[1]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
num_inp+=1
print "score is " pos*1.0/(neg+pos)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
check_ratio(fixed,clf)
fout.close()
print "output is in",outputfile
| 5,838 | 26.542453 | 104 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Ccredit/Ctestcredit.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
max_inp = 50000
printsuite = 1
minInp = 50000
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[4,80,5,11,200,5,5,4,2,3,4,4,75,3,3,4,4,3,2,2]
map={}
option =1
i=2
inp=[]
while len(inp)<20:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 821 | 16.869565 | 54 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Cracerg/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==10):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Cracerg/Ctest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
map={}
option =1
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 720 | 16.166667 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Cracerg/Ccausal.py | '''
Causal discrimination testing for Subject System C
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=50000
random.seed(2)
#Output file for the test suite
outputfile = "../../Suites/freshCcausalrg.txt"
fout = open(outputfile,"w")
option=4
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
already = "../../Suites/Ccausalrg.txt"
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=9#int(sys.argv[2])
if fixval ==9:
numval=10
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(int(i/5))
line[8-1] = str(int(i%5))
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = float(commands.getoutput("python Ctest.py "+sys.argv[1]+" 8 "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = int(tmp_val%num_atr[fix_atr[i]])
tmp_val = int((tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]])
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print i3
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = int(tmp_val%num_atr[fix_atr[i]])
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
num_test+=1
out = clf.predict([inp])[0]
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str1+" "+str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
#print pos,neg
break
num_inp+=1
print "output is in ",outputfile
print pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = 9#int(sys.argv[2])
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
fixed[8-1]=1
check_ratio(fixed,clf)
fout.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 6,061 | 25.823009 | 100 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Crace/Ccausala.py | '''
Causal discrimination testing for Subject System C
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=50000
random.seed(2)
#Output file for the test suite
outputfile = "../../Suites/freshCcausalage.txt"
fout = open(outputfile,"w")
option=4
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
already = "../../Suites/Ccausalage.txt"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=1#int(sys.argv[2])
if fixval ==1:
numval=10
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = float(commands.getoutput("python Ctest.py "+sys.argv[1]+" 8 "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print i3
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
num_test+=1
out = clf.predict([inp])[0]
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Ctest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
fout.write(str1+" "+str(out)+"\n")
#f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
#print pos,neg
break
num_inp+=1
print "score is ",pos*1.0/(neg+pos)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = 1
fixed = [1,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
fout.close()
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,932 | 25.846154 | 100 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Crace/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==10):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/C/Crace/Ctest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import commands
import sys
random.seed(1991)
trainfile = sys.argv[1]
num_test=0
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
map={}
option =1
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 720 | 16.166667 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/F/Fcountryrace/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==200):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 936 | 16.679245 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/F/Frelationrace/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==30):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/B/Bracecausal/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==2):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 934 | 16.641509 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/B/Bgendercausalm/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==7):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 934 | 16.641509 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/B/Bgendercausalmrg/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==70):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/B/Bgendercausalrg/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==10):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 935 | 16.660377 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/E/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==7):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 934 | 16.641509 | 108 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/E/Ecausalmarital.py | '''
Causal discrimination testing for Subject System E
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import commands
import itertools
import sys
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=50000
#set prinsuite to 0 to not print the test suite
printsuite = 1
random.seed(2)
num_test=0
outputfile = "../Suites/freshEcausalmarital"
f = open(outputfile,"w")
option =4
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
already = "../Suites/Ecausalmarital"
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=5
if fixval ==5:
numval=7
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Etest.py "+sys.argv[1]+" 9 "+inpstr))
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print i3
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
out = clf.predict([inp])[0]
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp +=str(x)+" "
if option ==1 :
out = commands.getoutput("python Etest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = clf.predict([inp])[0]
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
#print pos,neg
break
num_inp+=1
print "score is ",pos*1.0/(neg+pos)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = 5#int(sys.argv[2])
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 5,866 | 25.191964 | 99 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/E/Etest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import sys
max_inp = 50000
printsuite=0
minInp =5
random.seed(2)
sens_arg = int(sys.argv[2])
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
print str(clf.predict([inp])[0]) | 720 | 16.166667 | 42 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/G/Gtest.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import sys
max_inp = 30000
printout = 0
minInp=30000
random.seed(1997)
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
i=3
inp=[]
while len(inp)<13:
inp.append(int(sys.argv[i]))
i+=1
out = clf.predict([inp])[0]
if out > 0:
print "1"
else:
print "0" | 703 | 16.170732 | 37 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/G/Gcausale.py | '''
Causal discrimination testing for Subject System G
Inputs :
argv[1] : Train file
argv[2] : Argument to test discriminationa gainst
8 means race and 9 means gender
'''
from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import commands
import sys
#Maximum number of inputs to test against
max_inp = 50000
#Minimum number of inputs tos tart applying the confidence check optimization
mininp=50000
#set printsuite to 0 to not print the test suite
printsuite=1
random.seed(12)
outputfile = "../Suites/freshGcausalrelation.txt"
num_test=0
option =4
f = open(outputfile,"w")
X=[]
Y=[]
i=0
with open(sys.argv[1], "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
already = "../Suites/Gcausalrelation.txt"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
if option==3 or option==4:
fin = open(already,"r")
requeried={}
num=0
den=0
for line in fin:
line = line.strip()
line = line.split(',')
line = line[:-1]
i=0
pos=0
neg=0
fixval=7
if fixval ==7:
numval=6
else:
numval=2
while i<numval:
#print i,den
line[fixval-1] = str(i)
strinp = ','.join(line)
inpstr = ' '.join(line)
if(strinp in requeried.keys()):
i+=1
continue
else:
#query here
inp=[]
for a in line:
inp.append(int(a))
if option ==4 :
out = int(commands.getoutput("python Gtest.py "+sys.argv[1]+" 8 "+inpstr))
else:
out = clf.predict([inp])[0]
#print out, clf.predict([inp])[0]
if out>0:
out = 1
else:
out = 0
if printsuite==1:
f.write(strinp+", "+str(out)+"\n")
#print strinp+", "+str(out)
if out>0:
pos+=1
else:
neg+=1
requeried[strinp] = 1
i+=1
if pos>0 and neg > 0:
num+=1
if pos>0 or neg>0:
#print pos,neg,line,num
den+=1
print "Score is ",num*100.0/den
return
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
#print i3
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
#print inp,num_inp,max_inp
num_test+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp += str(x)+" "
if option ==1 :
#print "python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp
out = commands.getoutput("python Gtest.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
out = int(out)
else:
out = clf.predict([inp])[0]
if out>0:
out=1
else:
out=0
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
frac = pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>mininp:
break
num_inp+=1
print "score is ", pos*1.0/(neg+pos)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
fixedatr = 7
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
fixed[fixedatr-1]=1
check_ratio(fixed,clf)
'''
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
'''
print "output is in",outputfile | 6,268 | 25.563559 | 99 | py |
Themis | Themis-master/ESEC.FSE.2017.Experimental.Replication/Figure2/G/Table1CausalScore.py | '''
This script calculates the Causal discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
pos = 0
neg = 1
rowNum = 0
num=0
den=0
posFound = 0
negFound = 0
iter = 0
lst = []
lines =[]
for line in f:
line = line.strip()
l1 = line
lines.append(l1)
line =line.split(',')
if(float(line[-1])>0):
posFound=1
if(float(line[-1])<=0):
negFound=1
rowNum+=1
if(rowNum==6):
rowNum=0
if(posFound==1 and negFound==1):
num+=1
lst.append(iter/5*5)
#print l1,iter
den+=1
posFound = 0
negFound = 0
iter +=1
val = num*100.0/den
if(val < 0.01):
val=0.01
print("%.2f"%val)
| 934 | 16.641509 | 108 | py |
Themis | Themis-master/subjectSystems/A/svm.py | from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
if(len(sys.argv)!=4):
print "USAGE python svm.py 8/9 SEX AGE"
print "Curently working with 9 only as it doesnt converge with 8"
exit(-1)
sens_arg = int(sys.argv[1])
print sens_arg
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0,0.6,0.4,0,0,0]
X=[]
Y=[]
i=0
sensitive = {}
sens = []
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float)
Y = np.array(Y, dtype = float)
sensitive[name] = np.array(sens, dtype = float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {name:cov}
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
x = [ 5,1,10,3,0,4,2,0,1,0,0,45,0]
predicted_labels = np.sign(np.dot(w, x))
print predicted_labels
print "DONE"
sex = sys.argv[2]
race = sys.argv[3]
inp=[0,0,0,0,0,0,0,0,0,0,0,0,0]
inp = np.array(inp, dtype = float)
for i3 in range(0, 70) :
for i12 in range(0,100):
for i10 in range(0,100):
for i11 in range(0,40):
for i6 in range(0,14):
for i5 in range(0,7):
for i7 in range(0,6):
for i4 in range(0,16):
for i13 in range(0,40):
for i1 in range(0,10):
for i2 in range(0,8):
for i9 in range(0,1):
for i8 in range(0,1):
i9 = sex
i8 = race
inp[0]=(random.randint(0,9));
inp[1]=(random.randint(0,7));
inp[2]=(random.randint(0,39));
inp[3]=(random.randint(0,15));
inp[4]=(random.randint(0,6));
inp[5]=(random.randint(0,13));
inp[6]=(random.randint(0,5));
inp[7]=(i8);
inp[8]=(i9);
inp[9]=(random.randint(0,99));
inp[10]=(random.randint(0,39));
inp[11]=(random.randint(0,99));
inp[12]=(random.randint(0,39));
out = np.sign(np.dot(w, inp))
if(out>0):
print inp, 1
else:
print inp, 0
| 3,863 | 32.894737 | 127 | py |
Themis | Themis-master/subjectSystems/A/A_causal.py | from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
if(len(sys.argv)!=4):
print "USAGE python svm.py 8/9 SEX AGE"
print "Curently working with 9 only as it doesnt converge with 8"
exit(-1)
random.seed()
sens_arg = int(sys.argv[1])
print sens_arg
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0,0.6,0.4,0,0,0]
X=[]
Y=[]
i=0
sensitive = {}
sens = []
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float)
Y = np.array(Y, dtype = float)
sensitive[name] = np.array(sens, dtype = float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {name:cov}
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
x = [ 5,1,10,3,0,4,2,0,1,0,0,45,0]
predicted_labels = np.sign(np.dot(w, x))
print predicted_labels
print "DONE"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, 70) :
if(num_inp>=max_inp):
break;
for i12 in range(0,100):
if(num_inp>=max_inp):
break;
for i10 in range(0,100):
if(num_inp>=max_inp):
break;
for i11 in range(0,40):
if(num_inp>=max_inp):
break;
for i6 in range(0,14):
if(num_inp>=max_inp):
break;
for i5 in range(0,7):
if(num_inp>=max_inp):
break;
for i7 in range(0,6):
if(num_inp>=max_inp):
break;
for i4 in range(0,16):
if(num_inp>=max_inp):
break;
for i13 in range(0,40):
if(num_inp>=max_inp):
break;
for i1 in range(0,10):
if(num_inp>=max_inp):
break;
for i2 in range(0,8):
if(num_inp>=max_inp):
break;
for i9 in range(0,2):
if(num_inp>=max_inp):
break;
for i8 in range(0,5):
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
if(','.join(str(inp)) in curr_map.keys()):
pos_found=-1
print "here",inp
break
#print inp,num_inp,max_inp
num_test+=1
inp = str(inp)
out = np.sign(np.dot(w, inp))
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
break
if(pos_found==-1):
continue
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
num_inp+=1
print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 11,777 | 45.007813 | 133 | py |
Themis | Themis-master/subjectSystems/A/A_group.py | from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
if(len(sys.argv)!=4):
print "USAGE python svm.py 8/9 SEX AGE"
print "Curently working with 9 only as it doesnt converge with 8"
exit(-1)
random.seed()
sens_arg = int(sys.argv[1])
print sens_arg
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0,0.6,0.4,0,0,0]
X=[]
Y=[]
i=0
sensitive = {}
sens = []
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float)
Y = np.array(Y, dtype = float)
sensitive[name] = np.array(sens, dtype = float)
loss_function = lf._logistic_loss
sep_constraint = 0
sensitive_attrs = [name]
sensitive_attrs_to_cov_thresh = {name:cov}
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
x = [ 5,1,10,3,0,4,2,0,1,0,0,45,0]
predicted_labels = np.sign(np.dot(w, x))
print predicted_labels
print "DONE"
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, 70) :
if(num_inp>=max_inp):
break;
for i12 in range(0,100):
if(num_inp>=max_inp):
break;
for i10 in range(0,100):
if(num_inp>=max_inp):
break;
for i11 in range(0,40):
if(num_inp>=max_inp):
break;
for i6 in range(0,14):
if(num_inp>=max_inp):
break;
for i5 in range(0,7):
if(num_inp>=max_inp):
break;
for i7 in range(0,6):
if(num_inp>=max_inp):
break;
for i4 in range(0,16):
if(num_inp>=max_inp):
break;
for i13 in range(0,40):
if(num_inp>=max_inp):
break;
for i1 in range(0,10):
if(num_inp>=max_inp):
break;
for i2 in range(0,8):
if(num_inp>=max_inp):
break;
for i9 in range(0,2):
if(num_inp>=max_inp):
break;
for i8 in range(0,5):
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
out = np.sign(np.dot(w, inp))
num_inp+=1
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 10,117 | 42.055319 | 127 | py |
Themis | Themis-master/subjectSystems/A/fair_classification/loss_funcs.py | import sys
import os
import numpy as np
import scipy.special
from collections import defaultdict
import traceback
from copy import deepcopy
def _hinge_loss(w, X, y):
yz = y * np.dot(X,w) # y * (x.w)
yz = np.maximum(np.zeros_like(yz), (1-yz)) # hinge function
return sum(yz)
def _logistic_loss(w, X, y, return_arr=None):
"""Computes the logistic loss.
This function is used from scikit-learn source code
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
if return_arr == True:
out = -(log_logistic(yz))
else:
out = -np.sum(log_logistic(yz))
return out
def _logistic_loss_l2_reg(w, X, y, lam=None):
if lam is None:
lam = 1.0
yz = y * np.dot(X,w)
# Logistic loss is the negative of the log of the logistic function.
logistic_loss = -np.sum(log_logistic(yz))
l2_reg = (float(lam)/2.0) * np.sum([elem*elem for elem in w])
out = logistic_loss + l2_reg
return out
def log_logistic(X):
""" This function is used from scikit-learn source code. Source link below """
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
Source code at:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
if X.ndim > 1: raise Exception("Array of samples cannot be more than 1-D!")
out = np.empty_like(X) # same dimensions and data types
idx = X>0
out[idx] = -np.log(1.0 + np.exp(-X[idx]))
out[~idx] = X[~idx] - np.log(1.0 + np.exp(X[~idx]))
return out
| 2,268 | 22.884211 | 82 | py |
Themis | Themis-master/subjectSystems/A/fair_classification/utils.py | import numpy as np
from random import seed, shuffle
import loss_funcs as lf # our implementation of loss funcs
from scipy.optimize import minimize # for loss func minimization
from multiprocessing import Pool, Process, Queue
from collections import defaultdict
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
import sys
SEED = 1122334455
seed(SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(SEED)
def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma=None):
print x[0],y[0],x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh
"""
Function that trains the model subject to various fairness constraints.
If no constraints are given, then simply trains an unaltered classifier.
Example usage in: "synthetic_data_demo/decision_boundary_demo.py"
----
Inputs:
X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept
y: 1-d numpy array (n entries)
x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values
loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added
apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values)
apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values)
sep_constraint: apply the fine grained accuracy constraint
for details, see Section 3.3 of arxiv.org/abs/1507.05259v3
For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py"
Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time
sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control
sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints)
gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint
----
Outputs:
w: the learned weight vector for the classifier
"""
assert((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time
max_iter = 100000 # maximum number of iterations for the minimization algorithm
if apply_fairness_constraints == 0:
constraints = []
else:
constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh)
if apply_accuracy_constraint == 0: #its not the reverse problem, just train w with cross cov constraints
f_args=(x, y)
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = f_args,
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = constraints
)
else:
# train on just the loss function
w = minimize(fun = loss_function,
x0 = np.random.rand(x.shape[1],),
args = (x, y),
method = 'SLSQP',
options = {"maxiter":max_iter},
constraints = []
)
old_w = deepcopy(w.x)
def constraint_gamma_all(w, x, y, initial_loss_arr):
gamma_arr = np.ones_like(y) * gamma # set gamma for everyone
new_loss = loss_function(w, x, y)
old_loss = sum(initial_loss_arr)
return ((1.0 + gamma) * old_loss) - new_loss
def constraint_protected_people(w,x,y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class
return np.dot(w, x.T) # if this is positive, the constraint is satisfied
def constraint_unprotected_people(w,ind,old_loss,x,y):
new_loss = loss_function(w, np.array([x]), np.array(y))
return ((1.0 + gamma) * old_loss) - new_loss
constraints = []
predicted_labels = np.sign(np.dot(w.x, x.T))
unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True)
if sep_constraint == True: # separate gemma for different people
for i in range(0, len(predicted_labels)):
if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs
c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args':(x[i], y[i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier
constraints.append(c)
else:
c = ({'type': 'ineq', 'fun': constraint_unprotected_people, 'args':(i, unconstrained_loss_arr[i], x[i], y[i])})
constraints.append(c)
else: # same gamma for everyone
c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args':(x,y,unconstrained_loss_arr)})
constraints.append(c)
def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr):
cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T)
return float(abs(sum(cross_cov))) / float(x_in.shape[0])
w = minimize(fun = cross_cov_abs_optm_func,
x0 = old_w,
args = (x, x_control[sensitive_attrs[0]]),
method = 'SLSQP',
options = {"maxiter":100000},
constraints = constraints
)
try:
assert(w.success == True)
except:
print "Optimization problem did not converge.. Check the solution returned by the optimizer."
print "Returned solution is:"
print w
return w.x
def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh_arr, gamma=None):
"""
Computes the cross validation error for the classifier subject to various fairness constraints
This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info.
Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds).
"""
train_folds = []
test_folds = []
n_samples = len(y_all)
train_fold_size = 0.7 # the rest of 0.3 is for testing
# split the data into folds for cross-validation
for i in range(0,num_folds):
perm = range(0,n_samples) # shuffle the data before creating each fold
shuffle(perm)
x_all_perm = x_all[perm]
y_all_perm = y_all[perm]
x_control_all_perm = {}
for k in x_control_all.keys():
x_control_all_perm[k] = np.array(x_control_all[k])[perm]
x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(x_all_perm, y_all_perm, x_control_all_perm, train_fold_size)
train_folds.append([x_all_train, y_all_train, x_control_all_train])
test_folds.append([x_all_test, y_all_test, x_control_all_test])
def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh):
x_train, y_train, x_control_train = train_data
x_test, y_test, x_control_test = test_data
w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train, x_test, y_test, None, None)
distances_boundary_test = (np.dot(x_test, w)).tolist()
all_class_labels_assigned_test = np.sign(distances_boundary_test)
correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs)
cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs)
distances_boundary_train = (np.dot(x_train, w)).tolist()
all_class_labels_assigned_train = np.sign(distances_boundary_train)
correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train, sensitive_attrs)
cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train, sensitive_attrs)
output_folds.put([fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train])
return
output_folds = Queue()
processes = [Process(target=train_test_single_fold, args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x])) for x in range(num_folds)]
# Run processes
for p in processes:
p.start()
# Get the reuslts
results = [output_folds.get() for p in processes]
for p in processes:
p.join()
test_acc_arr = []
train_acc_arr = []
correlation_dict_test_arr = []
correlation_dict_train_arr = []
cov_dict_test_arr = []
cov_dict_train_arr = []
results = sorted(results, key = lambda x : x[0]) # sort w.r.t fold num
for res in results:
fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res
test_acc_arr.append(test_score)
train_acc_arr.append(train_score)
correlation_dict_test_arr.append(correlation_dict_test)
correlation_dict_train_arr.append(correlation_dict_train)
cov_dict_test_arr.append(cov_dict_test)
cov_dict_train_arr.append(cov_dict_train)
return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr
def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name):
correlation_dict = get_avg_correlation_dict(correlation_dict_arr)
non_prot_pos = correlation_dict[s_attr_name][1][1]
prot_pos = correlation_dict[s_attr_name][0][1]
p_rule = (prot_pos / non_prot_pos) * 100.0
print "Accuracy: %0.2f" % (np.mean(acc_arr))
print "Protected/non-protected in +ve class: %0.0f%% / %0.0f%%" % (prot_pos, non_prot_pos)
print "P-rule achieved: %0.0f%%" % (p_rule)
print "Covariance between sensitive feature and decision from distance boundary : %0.3f" % (np.mean([v[s_attr_name] for v in cov_dict_arr]))
print
return p_rule
def compute_p_rule(x_control, class_labels):
""" Compute the p-rule based on Doctrine of disparate impact """
non_prot_all = sum(x_control == 1.0) # non-protected group
prot_all = sum(x_control == 0.0) # protected group
non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0
print
print "Total data points: %d" % (len(x_control))
print "# non-protected examples: %d" % (non_prot_all)
print "# protected examples: %d" % (prot_all)
print "Non-protected in positive class: %d (%0.0f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all)
print "Protected in positive class: %d (%0.0f%%)" % (prot_pos, prot_pos * 100.0 / prot_all)
print "P-rule is: %0.0f%%" % ( p_rule )
return p_rule
def add_intercept(x):
""" Add intercept to the data before linear classification """
m,n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis = 1)
def check_binary(arr):
"give an array of values, see if the values are only 0 and 1"
s = sorted(set(arr))
if s[0] == 0 and s[1] == 1:
return True
else:
return False
def get_one_hot_encoding(in_arr):
"""
input: 1-D arr with int vals -- if not int vals, will raise an error
output: m (ndarray): one-hot encoded matrix
d (dict): also returns a dictionary original_val -> column in encoded matrix
"""
for k in in_arr:
if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64:
print str(type(k))
print "************* ERROR: Input arr does not have integer types"
return None
in_arr = np.array(in_arr, dtype=int)
assert(len(in_arr.shape)==1) # no column, means it was a 1-D arr
attr_vals_uniq_sorted = sorted(list(set(in_arr)))
num_uniq_vals = len(attr_vals_uniq_sorted)
if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1):
return in_arr, None
index_dict = {} # value to the column number
for i in range(0,len(attr_vals_uniq_sorted)):
val = attr_vals_uniq_sorted[i]
index_dict[val] = i
out_arr = []
for i in range(0,len(in_arr)):
tup = np.zeros(num_uniq_vals)
val = in_arr[i]
ind = index_dict[val]
tup[ind] = 1 # set that value of tuple to 1
out_arr.append(tup)
return np.array(out_arr), index_dict
def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted):
"""
returns the train/test accuracy of the model
we either pass the model (w)
else we pass y_predicted
"""
if model is not None and y_test_predicted is not None:
print "Either the model (w) or the predicted labels should be None"
raise Exception("Either the model (w) or the predicted labels should be None")
if model is not None:
y_test_predicted = np.sign(np.dot(x_test, model))
y_train_predicted = np.sign(np.dot(x_train, model))
def get_accuracy(y, Y_predicted):
correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy, sum(correct_answers)
train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted)
test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted)
return train_score, test_score, correct_answers_train, correct_answers_test
def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose):
"""
The covariance is computed b/w the sensitive attr val and the distance from the boundary
If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary
If the model is not None, we just compute a dot product or model and x_arr
for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class
and we have compute the distance using the project function
this function will return -1 if the constraint specified by thresh parameter is not satifsified
otherwise it will reutrn +1
if the return value is >=0, then the constraint is satisfied
"""
assert(x_arr.shape[0] == x_control.shape[0])
if len(x_control.shape) > 1: # make sure we just have one column in the array
assert(x_control.shape[1] == 1)
arr = []
if model is None:
arr = y_arr_dist_boundary # simply the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
arr = np.array(arr, dtype=np.float64)
cov = np.dot(x_control - np.mean(x_control), arr ) / float(len(x_control))
ans = thresh - abs(cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
# ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied
if verbose is True:
print "Covariance is", cov
print "Diff is:", ans
print
return ans
def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs):
"""
reutrns the covariance between sensitive features and distance from decision boundary
"""
arr = []
if model is None:
arr = y_arr_dist_boundary # simplt the output labels
else:
arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label
sensitive_attrs_to_cov_original = {}
for attr in sensitive_attrs:
attr_arr = x_control[attr]
bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals
if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
thresh = 0
if bin_attr:
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False)
sensitive_attrs_to_cov_original[attr] = cov
else: # sensitive feature has more than 2 categorical values
cov_arr = []
sensitive_attrs_to_cov_original[attr] = {}
for attr_val, ind in index_dict.items():
t = attr_arr_transformed[:,ind]
cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False)
sensitive_attrs_to_cov_original[attr][attr_val] = cov
cov_arr.append(abs(cov))
cov = max(cov_arr)
return sensitive_attrs_to_cov_original
def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs):
"""
returns the fraction in positive class for sensitive feature values
"""
if model is not None:
y_predicted = np.sign(np.dot(x_test, model))
y_predicted = np.array(y_predicted)
out_dict = {}
for attr in sensitive_attrs:
attr_val = []
for v in x_control_test[attr]: attr_val.append(v)
assert(len(attr_val) == len(y_predicted))
total_per_val = defaultdict(int)
attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int))
for i in range(0, len(y_predicted)):
val = attr_val[i]
label = y_predicted[i]
# val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names
total_per_val[val] += 1
attr_to_class_labels_dict[val][label] += 1
class_labels = set(y_predicted.tolist())
local_dict_1 = {}
for k1,v1 in attr_to_class_labels_dict.items():
total_this_val = total_per_val[k1]
local_dict_2 = {}
for k2 in class_labels: # the order should be the same for printing
v2 = v1[k2]
f = float(v2) * 100.0 / float(total_this_val)
local_dict_2[k2] = f
local_dict_1[k1] = local_dict_2
out_dict[attr] = local_dict_1
return out_dict
def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh):
"""
get the list of constraints to be fed to the minimizer
"""
constraints = []
for attr in sensitive_attrs:
attr_arr = x_control_train[attr]
attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr)
if index_dict is None: # binary attribute
thresh = sensitive_attrs_to_cov_thresh[attr]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, attr_arr_transformed,thresh, False)})
constraints.append(c)
else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately
for attr_val, ind in index_dict.items():
attr_name = attr_val
print attr, attr_name, sensitive_attrs_to_cov_thresh[attr]
thresh = sensitive_attrs_to_cov_thresh[attr][attr_name]
t = attr_arr_transformed[:,ind]
c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, t ,thresh, False)})
constraints.append(c)
return constraints
def split_into_train_test(x_all, y_all, x_control_all, train_fold_size):
split_point = int(round(float(x_all.shape[0]) * train_fold_size))
x_all_train = x_all[:split_point]
x_all_test = x_all[split_point:]
y_all_train = y_all[:split_point]
y_all_test = y_all[split_point:]
x_control_all_train = {}
x_control_all_test = {}
for k in x_control_all.keys():
x_control_all_train[k] = x_control_all[k][:split_point]
x_control_all_test[k] = x_control_all[k][split_point:]
return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test
def get_avg_correlation_dict(correlation_dict_arr):
# make the structure for the correlation dict
correlation_dict_avg = {}
# print correlation_dict_arr
for k,v in correlation_dict_arr[0].items():
correlation_dict_avg[k] = {}
for feature_val, feature_dict in v.items():
correlation_dict_avg[k][feature_val] = {}
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = []
# populate the correlation dict
for correlation_dict in correlation_dict_arr:
for k,v in correlation_dict.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label].append(frac_class)
# now take the averages
for k,v in correlation_dict_avg.items():
for feature_val, feature_dict in v.items():
for class_label, frac_class_arr in feature_dict.items():
correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr)
return correlation_dict_avg
def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs):
# very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness
it = 0.05
cov_range = np.arange(1.0, 0.0-it, -it).tolist()
if apply_accuracy_constraint == True:
if sep_constraint == False:
it = 0.1
cov_range = np.arange(0.0, 1.0 + it, it).tolist()
if sep_constraint == True:
cov_range = [0,1,5,10,20,50,100,500,1000]
positive_class_label = 1 # positive class is +1
train_acc = []
test_acc = []
positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive
# first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint, sensitive_attrs, [{} for i in range(0,num_folds)], 0)
for c in cov_range:
print "LOG: testing for multiplicative factor: %0.2f" % c
sensitive_attrs_to_cov_original_arr_multiplied = []
for sensitive_attrs_to_cov_original in cov_dict_train_arr:
sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original)
for k in sensitive_attrs_to_cov_thresh.keys():
v = sensitive_attrs_to_cov_thresh[k]
if type(v) == type({}):
for k1 in v.keys():
v[k1] = v[k1] * c
else:
sensitive_attrs_to_cov_thresh[k] = v * c
sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh)
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied, c)
test_acc.append(np.mean(test_acc_arr))
correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr)
correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr)
# just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one
s = sensitive_attrs[0]
for k,v in correlation_dict_test[s].items():
if v.get(positive_class_label) is None:
positive_per_category[k].append(0.0)
else:
positive_per_category[k].append(v[positive_class_label])
positive_per_category = dict(positive_per_category)
p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0
ax = plt.subplot(2,1,1)
plt.plot(cov_range, positive_per_category[0], "-o" , color="green", label = "Protected")
plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label = "Non-protected")
ax.set_xlim([min(cov_range), max(cov_range)])
plt.xlabel('Multiplicative loss factor')
plt.ylabel('Perc. in positive class')
if apply_accuracy_constraint == False:
plt.gca().invert_xaxis()
plt.xlabel('Multiplicative covariance factor (c)')
ax.legend()
ax = plt.subplot(2,1,2)
plt.scatter(p_rule_arr, test_acc, color="red")
ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)])
plt.xlabel('P% rule')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.show()
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2 | 27,528 | 41.352308 | 357 | py |
Themis | Themis-master/subjectSystems/fairness_unaware/nb_group.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import sys
max_inp = 1000
random.seed()
X=[]
Y=[]
i=0
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test=0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, 70) :
if(num_inp>=max_inp):
break;
for i12 in range(0,100):
if(num_inp>=max_inp):
break;
for i10 in range(0,100):
if(num_inp>=max_inp):
break;
for i11 in range(0,40):
if(num_inp>=max_inp):
break;
for i6 in range(0,14):
if(num_inp>=max_inp):
break;
for i5 in range(0,7):
if(num_inp>=max_inp):
break;
for i7 in range(0,6):
if(num_inp>=max_inp):
break;
for i4 in range(0,16):
if(num_inp>=max_inp):
break;
for i13 in range(0,40):
if(num_inp>=max_inp):
break;
for i1 in range(0,10):
if(num_inp>=max_inp):
break;
for i2 in range(0,8):
if(num_inp>=max_inp):
break;
for i9 in range(0,2):
if(num_inp>=max_inp):
break;
for i8 in range(0,5):
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
out = clf.predict([inp])[0]
num_test+=1
num_inp+=1
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
print fixed,max,min, max-min, num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 9,066 | 47.228723 | 95 | py |
Themis | Themis-master/subjectSystems/fairness_unaware/nb_causal.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
import itertools
import sys
max_inp = 1000
random.seed()
num_test=0
X=[]
Y=[]
i=0
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = GaussianNB()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, 70) :
if(num_inp>=max_inp):
break;
for i12 in range(0,100):
if(num_inp>=max_inp):
break;
for i10 in range(0,100):
if(num_inp>=max_inp):
break;
for i11 in range(0,40):
if(num_inp>=max_inp):
break;
for i6 in range(0,14):
if(num_inp>=max_inp):
break;
for i5 in range(0,7):
if(num_inp>=max_inp):
break;
for i7 in range(0,6):
if(num_inp>=max_inp):
break;
for i4 in range(0,16):
if(num_inp>=max_inp):
break;
for i13 in range(0,40):
if(num_inp>=max_inp):
break;
for i1 in range(0,10):
if(num_inp>=max_inp):
break;
for i2 in range(0,8):
if(num_inp>=max_inp):
break;
for i9 in range(0,2):
if(num_inp>=max_inp):
break;
for i8 in range(0,5):
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
if(','.join(str(inp)) in curr_map.keys()):
pos_found=-1
print "here",inp
break
#print inp,num_inp,max_inp
num_test+=1
out = clf.predict([inp])[0]
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
break
if(pos_found==-1):
continue
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
num_inp+=1
print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 10,452 | 47.845794 | 133 | py |
Themis | Themis-master/subjectSystems/fairness_unaware/svm_group.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import itertools
import sys
max_inp = 1000
random.seed()
X=[]
Y=[]
i=0
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, 70) :
if(num_inp>=max_inp):
break;
for i12 in range(0,100):
if(num_inp>=max_inp):
break;
for i10 in range(0,100):
if(num_inp>=max_inp):
break;
for i11 in range(0,40):
if(num_inp>=max_inp):
break;
for i6 in range(0,14):
if(num_inp>=max_inp):
break;
for i5 in range(0,7):
if(num_inp>=max_inp):
break;
for i7 in range(0,6):
if(num_inp>=max_inp):
break;
for i4 in range(0,16):
if(num_inp>=max_inp):
break;
for i13 in range(0,40):
if(num_inp>=max_inp):
break;
for i1 in range(0,10):
if(num_inp>=max_inp):
break;
for i2 in range(0,8):
if(num_inp>=max_inp):
break;
for i9 in range(0,2):
if(num_inp>=max_inp):
break;
for i8 in range(0,5):
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
inp = str(inp)
out = clf.predict([inp])[0]
num_inp+=1
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 9,067 | 46.726316 | 95 | py |
Themis | Themis-master/subjectSystems/fairness_unaware/lr_group.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import sys
max_inp = 1000
random.seed()
X=[]
Y=[]
i=0
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
#print fix_atr, num
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
out = clf.predict([inp])[0]
num_inp+=1
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
print fixed,max,min, max-min
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 3,976 | 25.691275 | 81 | py |
Themis | Themis-master/subjectSystems/fairness_unaware/lr_causal.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
import itertools
import sys
max_inp = 1000
random.seed()
num_test=0
X=[]
Y=[]
i=0
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = linear_model.LinearRegression()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, max_inp) :
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
if(','.join(str(inp)) in curr_map.keys()):
pos_found=-1
break
num_test+=1
out = clf.predict([inp])[0]
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
break
if(pos_found==-1):
continue
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
num_inp+=1
print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 4,436 | 24.796512 | 85 | py |
Themis | Themis-master/subjectSystems/fairness_unaware/svm_causal.py | from __future__ import division
import random
import math
import os
from collections import defaultdict
from sklearn import linear_model
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import itertools
import sys
max_inp = 1000
random.seed()
num_test=0
X=[]
Y=[]
i=0
with open("cleaned_train", "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
X.append(map(int,line1[:-1]))
Y.append(int(line1[-1]))
clf = svm.SVC()
clf.fit(X, Y)
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
def check_ratio(fixed,clf):
num_test = 0
fix_atr = []
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
#print fix_atr, num
max = -1
min = 100
#print num
val = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#if(val%10000==0):
#print val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
curr_map={}
for i3 in range(0, 70) :
if(num_inp>=max_inp):
break;
for i12 in range(0,100):
if(num_inp>=max_inp):
break;
for i10 in range(0,100):
if(num_inp>=max_inp):
break;
for i11 in range(0,40):
if(num_inp>=max_inp):
break;
for i6 in range(0,14):
if(num_inp>=max_inp):
break;
for i5 in range(0,7):
if(num_inp>=max_inp):
break;
for i7 in range(0,6):
if(num_inp>=max_inp):
break;
for i4 in range(0,16):
if(num_inp>=max_inp):
break;
for i13 in range(0,40):
if(num_inp>=max_inp):
break;
for i1 in range(0,10):
if(num_inp>=max_inp):
break;
for i2 in range(0,8):
if(num_inp>=max_inp):
break;
for i9 in range(0,2):
if(num_inp>=max_inp):
break;
for i8 in range(0,5):
if(num_inp>=max_inp):
break;
if(inp_fix[0]==''):
inp[0]=(random.randint(0,9));
else:
inp[0]=inp_fix[0]
if(inp_fix[1]==''):
inp[1]=(random.randint(0,7));
else:
inp[1]=inp_fix[1]
if(inp_fix[2]==''):
inp[2]=(random.randint(0,39));
else:
inp[2]=inp_fix[2]
if(inp_fix[3]==''):
inp[3]=(random.randint(0,15));
else:
inp[3]=inp_fix[3]
if(inp_fix[4]==''):
inp[4]=(random.randint(0,6));
else:
inp[4]=inp_fix[4]
if(inp_fix[5]==''):
inp[5]=(random.randint(0,13));
else:
inp[5]=inp_fix[5]
if(inp_fix[6]==''):
inp[6]=(random.randint(0,5));
else:
inp[6]=inp_fix[6]
if(inp_fix[7]==''):
inp[7]=(random.randint(0,5));
else:
inp[7]=inp_fix[7]
if(inp_fix[8]==''):
inp[8]=(random.randint(0,5));
else:
inp[8]=inp_fix[8]
if(inp_fix[9]==''):
inp[9]=(random.randint(0,99));
else:
inp[9]=inp_fix[9]
if(inp_fix[10]==''):
inp[10]=(random.randint(0,39));
else:
inp[10]=inp_fix[10]
if(inp_fix[11]==''):
inp[11]=(random.randint(0,99));
else:
inp[11]=inp_fix[11]
if(inp_fix[12]==''):
inp[12]=(random.randint(0,39));
else:
inp[12]=inp_fix[12]
val = 0
pos_found = 0
neg_found = 0
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
#print val
while i>=0:
inp[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
val+=1
if(','.join(str(inp)) in curr_map.keys()):
pos_found=-1
print "here",inp
break
#print inp,num_inp,max_inp
num_test+=1
inp = str(inp)
out = clf.predict([inp])[0]
if(out>0):
pos_found=1
curr_map[','.join(str(inp))] = 1
else:
neg_found = 1
curr_map[','.join(str(inp))] = 0
if(pos_found==1 and neg_found==1):
break
if(pos_found==-1):
continue
if(pos_found==1 and neg_found==1):
pos+=1
else:
neg+=1
num_inp+=1
print fixed,pos,neg, pos*1.0/(neg+pos),num_test
def findsubsets(S,m):
return set(itertools.combinations(S, m))
for i in range(0,13):
out = findsubsets([0,1,2,3,4,5,6,7,8,9,10,11,12],i+1)
for a in out:
fixed = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for j in range(0,13):
if j in a:
fixed[j]=1
#print fixed
check_ratio(fixed,clf)
| 10,548 | 47.837963 | 133 | py |
Themis | Themis-master/Themis2.0/themis2.py | # Themis 2.0
#
# By: Rico Angell
from __future__ import division
import argparse
import subprocess
from itertools import chain, combinations, product
import math
import random
import scipy.stats as st
import xml.etree.ElementTree as ET
import copy
class Input:
"""
Class to define an input characteristic to the software.
Attributes
----------
name : str
Name of the input.
values : list
List of the possible values for this input.
Methods
-------
get_random_input()
Returns a random element from the values list.
"""
def __init__(self, name="", values=[], kind="", ub=None, lb=None):
try:
self.name = name
self.values = [str(v) for v in values]
self.kind = kind
if (ub != None and lb != None):
self.lb = lb
self.ub = ub
except:
print("Themis input initialization corrupted!")
def get_random_input(self):
"""
Return a random value from self.values
"""
try:
return random.choice(self.values)
except:
print("Error in get_random_input")
def __str__(self):
try:
s = "\nInput\n"
s += "-----\n"
s += "Name: " + self.name + "\n"
s += "Values: " + ", ".join(self.values)
return s
except:
print("Issue with returning a string representation of the input")
__repr__ = __str__
class Test:
"""
Data structure for storing tests.
Attributes
----------
function : str
Name of the function to call
i_fields : list of `Input.name`
The inputs of interest, i.e. compute the casual discrimination wrt
these fields.
threshold : float in [0,1]
At least this level of discrimination to be considered.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
group : bool
Search for group discrimination if `True`.
causal : bool
Search for causal discrimination if `True`.
"""
def __init__(self, function="", i_fields=[], conf=0.999, margin=0.0001,
group=False, causal=False, threshold=0.15):
try:
self.function = function
self.i_fields = i_fields
self.conf = conf
self.margin = margin
self.group = group
self.causal = causal
self.threshold = threshold
except:
print("Themis test initialization input")
def __str__(self):
try:
s = "\n\n"
# Alters output based on the test that was ran
if self.function == "discrimination_search":
print( "Ran discrimination search for: \n")
if (self.group == True):
s += "Group Discrimination\n"
if (self.causal == True):
s += "Causal Discrimination\n\n"
elif self.function == "causal_discrimination":
s += "Calculated causal discrimination for the following sets of inputs: \n (" + ", ".join(self.i_fields) + ") \n\n"
elif self.function == "group_discrimination":
s += "Calculated group discrimination for the following sets of inputs: \n (" + ", ".join(self.i_fields) + ") \n\n"
return s
except:
print("Issue with returning a string version of the test details")
__repr__ = __str__
class Themis:
"""
Compute discrimination for a piece of software.
Attributes
----------
Methods
-------
"""
def __init__(self, xml_fname=""):
"""
Initialize Themis from xml file.
Parameters
----------
xml_fname : string
name of the xml file we want to import settings from.
"""
if xml_fname != "":
try:
tree = ET.parse(xml_fname)
root = tree.getroot()
self.max_samples = int(root.find("max_samples").text)
self.min_samples = int(root.find("min_samples").text)
self.rand_seed = int(root.find("seed").text)
self.software_name = root.find("name").text
self.command = root.find("command").text.strip()
self._build_input_space(args=root.find("inputs"))
self._load_tests(args=root.find("tests"))
self._cache = {}
except:
print("issue with reading xml file and initializing Themis")
else:
self._cache = {}
self.tests = []
def run(self):
"""
Run Themis tests specified in the configuration file.
"""
## try:
#key = inputs tuple
# value = percentage from test execution
self.group_tests = {}
self.causal_tests = {}
self.group_search_results = {}
self.causal_search_results = {}
self.group_measurement_results = []
self.causal_measurement_results = []
self.simple_discrim_output = ""
self.detailed_discrim_output = ""
for test in self.tests:
print (test)
random.seed(self.rand_seed)
#print ("--------------------------------------------------")
if test.function == "causal_discrimination":
suite, p, self.causal_pairs = self.causal_discrimination(i_fields=test.i_fields,
conf=test.conf,
margin=test.margin)
# store tests for output strings
causal_key = tuple(test.i_fields)
self.causal_tests [causal_key] = "{:.1%}".format(p)
## self.output += str(test)
## op = 'Your software discriminates on the above inputs ' + "{:.1%}".format(p) + ' of the time.'
## self.output += op
elif test.function == "group_discrimination":
suite, p, _, _ = self.group_discrimination(i_fields=test.i_fields,
conf=test.conf,
margin=test.margin)
# store tests for output strings
group_key = tuple(test.i_fields)
self.group_tests [group_key] = "{:.1%}".format(p)
#save min_group and max_group
elif test.function == "discrimination_search":
print ("running discrim search")
print (test.conf)
print (test.margin)
print(test.group)
print(test.causal)
print(test.threshold)
g, c = self.discrimination_search(threshold=test.threshold,
conf=test.conf,
margin=test.margin,
group=test.group,
causal=test.causal)
if g:
for key, value in g.items():
values = ", ".join(key) + " --> " + "{:.1%}".format(value) + "\n"
self.group_search_results[tuple(key)] = "{:.1%}".format(value)
if c:
for key, value in c.items():
values = ", ".join(key) + " --> " + "{:.1%}".format(value) + "\n"
self.causal_search_results[tuple(key)] = "{:.1%}".format(value)
##
## print ("Group Discrimination Tests: \n")
## for key,value in self.group_tests.items():
## print ('Input(s): ' + str(key) + '--->' + str(value) + "\n")
##
## print ("Causal Discrimination Tests: \n")
## for key, value in self.causal_tests.items():
## print ('Input(s): ' + str(key) + '--->' + str(value) + "\n")
self.short_output = ""
self.extended_output = ""
## except:
## print ("Issue in main Themis run")
def group_discrimination(self, i_fields=None, conf=0.999, margin=0.0001):
"""
Compute the group discrimination for characteristics `i_fields`.
Parameters
----------
i_fields : list of `Input.name`
The inputs of interest, i.e. compute the casual discrimination wrt
these fields.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
Returns
-------
tuple
* list of dict
The test suite used to compute group discrimination.
* float
The percentage of group discrimination
"""
assert i_fields != None
## try:
min_group_score, max_group_score, test_suite, p = float("inf"), 0, [], 0
min_group_assign, max_group_assign = "",""
rand_fields = self._all_other_fields(i_fields)
for fixed_sub_assign in self._gen_all_sub_inputs(args=i_fields):
count = 0
for num_sampled in range(1, self.max_samples):
assign = self._new_random_sub_input(args=rand_fields)
assign.update(fixed_sub_assign)
self._add_assignment(test_suite, assign)
count += self._get_test_result(assign=assign)
p, end = self._end_condition(count, num_sampled, conf, margin)
if end:
break
print (fixed_sub_assign[i_fields[0]] + "--> " + str(p))
if p < min_group_score:
min_group_score = p
min_group_assign = fixed_sub_assign[i_fields[0]]
if p > max_group_score:
max_group_score = p
max_group_assign = fixed_sub_assign[i_fields[0]]
return test_suite, (max_group_score - min_group_score), (min_group_score,min_group_assign,max_group_score,max_group_assign)
## except:
## print("Issue in group_discrimination")
def causal_discrimination(self, i_fields=None, conf=0.999, margin=0.0001):
"""
Compute the causal discrimination for characteristics `i_fields`.
Parameters
----------
i_fields : list of `Input.name`
The inputs of interest, i.e. compute the casual discrimination wrt
these fields.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
Returns
-------
tuple
* list of dict
The test suite used to compute causal discrimination.
* float
The percentage of causal discrimination.
"""
## try:
assert i_fields != None
count, test_suite, p = 0, [], 0
f_fields = self._all_other_fields(i_fields) # fixed fields
causal_pairs = []
for num_sampled in range(1, self.max_samples):
fixed_assign = self._new_random_sub_input(args=f_fields)
singular_assign = self._new_random_sub_input(args=i_fields)
assign = self._merge_assignments(fixed_assign, singular_assign)
print ("Assign --> " , assign)
causal_assign1 = copy.deepcopy(assign)
self._add_assignment(test_suite, assign)
result = self._get_test_result(assign=assign)
for dyn_sub_assign in self._gen_all_sub_inputs(args=i_fields):
if dyn_sub_assign == singular_assign:
continue
assign.update(dyn_sub_assign)
self._add_assignment(test_suite, assign)
if self._get_test_result(assign=assign) != result:
count += 1
causal_pairs.append((causal_assign1,copy.deepcopy(assign)))
break
p, end = self._end_condition(count, num_sampled, conf, margin)
if end:
break
return test_suite, p, causal_pairs
## except:
## print("Issue in causal discrimination")
def discrimination_search(self, threshold=0.2, conf=0.99, margin=0.01,
group=False, causal=False):
"""
Find all minimall subsets of characteristics that discriminate.
Choose to search by group or causally and set a threshold for
discrimination.
Parameters
----------
threshold : float in [0,1]
At least this level of discrimination to be considered.
conf : float in [0, 1]
The z* confidence level (percentage of normal distribution.
margin : float in [0, 1]
The margin of error for the confidence.
group : bool
Search for group discrimination if `True`.
causal : bool
Search for causal discrimination if `True`.
Returns
-------
tuple of dict
The lists of subsets of the input characteristics that discriminate.
"""
## try:
assert group or causal
group_d_scores, causal_d_scores = {}, {}
for sub in self._all_relevant_subs(self.input_order):
if self._supset(list(set(group_d_scores.keys())|
set(causal_d_scores.keys())), sub):
continue
if group:
suite, p, data = self.group_discrimination(i_fields=sub, conf=conf,
margin=margin)
if p > threshold:
group_d_scores[sub] = p
self.group_measurement_results.append(MeasurementResult(causal=False, i_fields=sub, p=p, testsuite=suite, data=data))
if causal:
suite, p, cp = self.causal_discrimination(i_fields=sub, conf=conf,
margin=margin)
print (sub)
print(conf)
print(margin)
print (p)
print (cp)
if p > threshold:
causal_d_scores[sub] = p
self.causal_measurement_results.append(MeasurementResult(causal=True, i_fields=sub, p=p, testsuite=suite, data=cp))
return group_d_scores, causal_d_scores
## except:
## print("Issue in trying to search for discrimination")
def _all_relevant_subs(self, xs):
try:
return chain.from_iterable(combinations(xs, n) \
for n in range(1, len(xs)))
except:
print("Issue in returning relative subsets. Possibly a divide by zero error")
def _supset(self, list_of_small, big):
try:
for small in list_of_small:
next_subset = False
for x in small:
if x not in big:
next_subset = True
break
if not next_subset:
return True
except:
print("Issue in finding superset")
def _new_random_sub_input(self, args=[]):
try:
assert args
return {name : self.inputs[name].get_random_input() for name in args}
except:
print("Issue in getting a random subset")
def _gen_all_sub_inputs(self, args=[]):
assert args
try:
vals_of_args = [self.inputs[arg].values for arg in args]
combos = [list(elt) for elt in list(product(*vals_of_args))]
return ({arg : elt[idx] for idx, arg in enumerate(args)} \
for elt in combos)
except:
print("Issue in generate all")
def _get_test_result(self, assign=None):
assert assign != None
## try:
tupled_args = self._tuple(assign)
if tupled_args in self._cache.keys():
return self._cache[tupled_args]
cmd = self.command + " " + " ".join(tupled_args)
output = subprocess.getoutput(cmd).strip()
self._cache[tupled_args] = (subprocess.getoutput(cmd).strip() == "1")
return self._cache[tupled_args]
## except:
## print("Issue in getting the results of the tests")
def _add_assignment(self, test_suite, assign):
try:
if assign not in test_suite:
test_suite.append(assign)
except:
print("Issue in assigining to the test_suite")
def _all_other_fields(self, i_fields):
try:
return [f for f in self.input_order if f not in i_fields]
except:
print("Issue in _all_other_fields")
def _end_condition(self, count, num_sampled, conf, margin):
try:
p = 0
if num_sampled > self.min_samples:
p = count / num_sampled
error = st.norm.ppf(conf)*math.sqrt((p*(1-p))/num_sampled)
return p, error < margin
return p, False
except:
print("Issue in _end_condition. Possibly a divide by zero error")
def _merge_assignments(self, assign1, assign2):
try:
merged = {}
merged.update(assign1)
merged.update(assign2)
return merged
except:
print("Issue in merging assigments")
def _tuple(self, assign=None):
try:
assert assign != None
return tuple(str(assign[name]) for name in self.input_order)
except:
print("Issue in generating tuples for tests")
def _untuple(self, tupled_args=None):
assert tupled_args != None
try:
listed_args = list(tupled_args)
return {name : listed_args[idx] \
for idx, name in enumerate(self.input_order)}
except:
print("Issue in untupling")
def _build_input_space(self, args=None):
assert args != None
try:
self.inputs = {}
self.input_order = []
self.input_names = []
for obj in args.findall("input"):
name = obj.find("name").text
self.input_names.append(name)
values = []
t = obj.find("type").text
if t == "categorical":
values = [elt.text
for elt in obj.find("values").findall("value")]
self.inputs[name] = Input(name=name, values=values, kind="categorical")
elif t == "continuousInt":
lowerbound = int(obj.find("bounds").find("lowerbound").text)
upperbound = int(obj.find("bounds").find("upperbound").text)+1
values = range(int(obj.find("bounds").find("lowerbound").text),
int(obj.find("bounds").find("upperbound").text)+1)
self.inputs[name] = Input(name=name, values=values, kind="continuousInt", lb = str(lowerbound), ub = str(upperbound))
else:
assert False
self.input_order.append(name)
except:
print("Issue in building the input space/scope. Major problem")
def _add_input(self, name=None, kind=None, values=None):
assert name != None
assert kind != None
assert values != None
try:
try:
self.inputs
except AttributeError:
self.inputs = {}
self.input_order = []
self.input_names = []
local_values = []
self.input_names.append(name)
if kind == "Categorical":
i_values = values.split(',')
self.inputs[name] = Input(name=name, values=i_values, kind="categorical")
elif kind == "Continuous Int":
ulb = values.split('-')
lowerbound = ulb[0]
upperbound = ulb[1]
local_values = range(int(lowerbound),int(upperbound)+1)
self.inputs[name] = Input(name=name, values=local_values, kind="continuousInt", lb = str(lowerbound), ub = str(upperbound))
else:
assert False
self.input_order.append(name)
except:
print("Issue in adding input to input space.")
def _load_tests(self, args=None):
assert args != None
try:
self.tests = []
for obj in args.findall("test"):
test = Test()
test.function = obj.find("function").text
if test.function == "causal_discrimination" or \
test.function == "group_discrimination":
test.i_fields = [elt.text
for elt in obj.find("i_fields").findall("input_name")]
if test.function == "discrimination_search":
test.group = bool(obj.findall("group"))
test.causal = bool(obj.findall("causal"))
test.threshold = float(obj.find("threshold").text)
test.conf = float(obj.find("conf").text)
test.margin = float(obj.find("margin").text)
self.tests.append(test)
except:
print("Issue in loading the tests")
def _new_test(self, group, causal, name=None, conf=None, margin=None, i_fields=None, threshold=0.20):
assert name != None
assert conf != None
assert margin != None
assert i_fields != None
## try:
## self.tests
## except AttributeError:
## self.tests = []
try:
test = Test()
test.function = name
test.conf = conf
test.margin = margin
test.group = group
test.causal = causal
test.i_fields = i_fields
test.threshold = threshold
self.tests.append(test)
except:
print("Issue creating test")
class MeasurementResult(object):
def __init__(self, causal=False, i_fields=None, p=None, testsuite=None, data=None):
self.causal = causal
self.i_fields = i_fields
self.p = p
self.testsuite = testsuite
self.data = data
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description="Run Themis.")
parser.add_argument("XML_FILE", type=str, nargs=1,
help="XML configuration file")
args = parser.parse_args()
t = Themis(xml_fname=args.XML_FILE[0])
t.run()
except:
print("Issue in the main call to Themis i.e. Driver")
| 23,477 | 34.626707 | 139 | py |
Themis | Themis-master/Themis2.0/software.py | import sys
sex = sys.argv[1]
race = sys.argv[3]
if(sex=="Male" and race=="Red"):
print "1"
else:
print "0"
| 116 | 12 | 32 | py |
Themis | Themis-master/Themis2.0/grid2.py | import sys
from PyQt5.QtWidgets import *
import PyQt5.QtGui as QtGui
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import xml.etree.ElementTree as ET
import themis2
tree = None
class App(QDialog):
def __init__(self):
super().__init__()
self.title = 'Themis 2.0'
self.left = 500
self.top = 200
self.width = 600
self.height = 700
self.dialog = None
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createThemisGrid()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox4)
windowLayout.addWidget(self.horizontalGroupBox)
windowLayout.addWidget(self.horizontalGroupBox2)
windowLayout.addWidget(self.horizontalGroupBox3)
self.setLayout(windowLayout)
self.show()
def createThemisGrid(self):
self.horizontalGroupBox4 = QGroupBox()
layout4 = QGridLayout()
self.tester = themis2.Themis()
command_box_label = QLabel("Command:")
self.command_box = QLineEdit(self)
layout4.addWidget(command_box_label,1, 0)
layout4.addWidget(self.command_box,1, 1)
self.createInputsTable()
self.createTestsTable()
self.horizontalGroupBox = QGroupBox("Inputs")
self.horizontalGroupBox.setFont(QFont("", pointSize=14))
layout = QGridLayout()
layout.setSpacing(5)
add_button = QPushButton('Add Input')
add_button.setAutoDefault(False)
add_button.clicked.connect(self.addInput)
layout.addWidget(self.inputs_table,0,1, 3, 4)
layout.addWidget(add_button,5,1)
self.horizontalGroupBox2 = QGroupBox("Measurements")
self.horizontalGroupBox2.setFont(QFont("", pointSize=14))
layout2 = QGridLayout()
add_test_button = QPushButton('Add Measurement')
add_test_button.setAutoDefault(False)
add_test_button.clicked.connect(self.addTest)
layout2.addWidget(self.tests_table, 5, 4, 4, 4)
layout2.addWidget(add_test_button, 9, 4)
self.horizontalGroupBox3 = QGroupBox("")
self.layout3 = QGridLayout()
self.results_box = QTextEdit()
self.results_box.setReadOnly(True)
run_button = QPushButton("Run")
run_button.clicked.connect(self.handleRunButton)
run_button.setAutoDefault(False)
load_button = QPushButton('Load')
load_button.setAutoDefault(False)
save_button = QPushButton('Save')
save_button.setAutoDefault(False)
save_button.clicked.connect(self.handleSaveButton)
load_button.clicked.connect(self.handleLoadButton)
self.layout3.addWidget(load_button,1, 1)
self.layout3.addWidget(save_button,1, 2)
self.layout3.addWidget(run_button,1, 3)
self.layout3.addWidget(self.results_box, 2, 1, 5, 5)
self.horizontalGroupBox3.setLayout(self.layout3)
self.horizontalGroupBox.setLayout(layout)
self.horizontalGroupBox2.setLayout(layout2)
self.horizontalGroupBox4.setLayout(layout4)
def addTest(self):
inputs = []
for i in range(self.inputs_table.rowCount()):
print(self.inputs_table.item(i,1).text())
inputs.append(self.inputs_table.item(i,1))
self.dialog = EditTestWindow(True)
self.dialog.setModal(True)
self.dialog.show()
if self.dialog.exec_():
tests = self.dialog.all_measurements
conf_values = self.dialog.all_confs
margins = self.dialog.all_margins
inputs = self.dialog.all_inputs
#add thresholds
for i in range(len(tests)):
print ("Measurement: " + tests[i])
print ("Confidence: " + conf_values[i])
print ("Margin: " + margins[i])
print ("Inputs of interest: " + ",".join(inputs[i]))
test = tests[i]
print (test)
conf = conf_values[i]
margin = margins[i]
i_fields = list(inputs[i])
self.tests_table.insertRow(i)
self.createEditButtons(self.tests_table, i, test=None)
self.setTestTableValue(conf, i, 2)
self.setTestTableValue(margin, i, 3)
self.setTestTableValue(",".join(i_fields), i, 4)
if test == "Group Discrimination":
test = self.tester._new_test(False, False, "group_discrimination", float(conf), float(margin), i_fields)
self.setTestTableValue(test, i, 1)
elif test == "Causal Discrimination":
test = self.tester._new_test(False, False, "causal_discrimination", float(conf), float(margin), i_fields)
self.setTestTableValue(test, i, 1)
else:
if test == "Discrimination Search Causal":
self.setTestTableValue("Search for causal discrimination", i, 1)
test = self.tester._new_test( False, True, "discrimination_search", float(conf), float(margin),i_fields)
elif test == "Discrimination Search Group":
print("HERE 1")
self.setTestTableValue("Search for group discrimination", i, 1)
test = self.tester._new_test( True, False, "discrimination_search", float(conf), float(margin), i_fields)
else:
print ("HERE 2")
self.setTestTableValue("Search for group & causal discrimination", i, 1)
test = self.tester._new_test(True, True, "discrimination_search", float(conf), float(margin), i_fields)
self.resizeCells(self.tests_table)
def addInput(self):
dialog = EditInputWindow(True)
dialog.setModal(True)
dialog.show()
if dialog.exec_():
for i in range(len(dialog.all_names)):
print("Name: " + dialog.all_names[i])
print("Type: " + dialog.all_types[i])
print("Values: " + dialog.all_values[i])
name = dialog.all_names[i]
kind = dialog.all_types[i]
values = dialog.all_values[i]
self.inputs_table.insertRow(i)
self.createEditButtons(self.inputs_table, i, test=None)
self.setCellValue(name, i, 1)
self.setCellValue(kind, i, 2)
if kind == "Categorical":
self.setCellValue("{" + values + "}", i, 3)
self.tester._add_input(name, "Categorical", values)
else:
ulb = values.split('-')
lb = ulb[0]
ub = ulb[1]
self.setCellValue("(" + lb + "-" + ub + ")", i, 3)
self.tester._add_input(name, "Continuous Int", values)
self.resizeCells(self.inputs_table)
def handleRunButton(self):
#python loan.py
#42
#200
#10
self.tester.max_samples = 10000
self.tester.min_samples = 10
self.tester.rand_seed = 42
self.tester.command = self.command_box.text()
self.tester.run()
self.results_box.setText("<h2 style=\"text-align:center\">Themis 2.0 Execution Complete!</h2>");
self.results_box.repaint()
for test in self.tester.tests:
print (test)
if test.group == True or test.causal == True:
if self.tester.group_search_results or self.tester.causal_search_results:
self.results_box.append("<h2> Discrimination found! </h2>")
self.results_box.repaint()
detailed_output_btn = QPushButton("More details...")
self.layout3.addWidget(detailed_output_btn, 8, 4)
detailed_output_btn.clicked.connect(self.handleDetailedButton)
self.horizontalGroupBox3.setLayout(self.layout3)
self.results_box.toHtml()
def handleDetailedButton(self):
dialog = QDialog()
horizontalGroupBox = QGroupBox("Detailed Discrimination Findings")
horizontalGroupBox.setFont(QFont("", pointSize=14))
layout = QGridLayout()
detailed_output_box = QTextEdit()
detailed_output_box.setReadOnly(True)
detailed_output_box.setText("")
## if self.tester.group_tests:
## detailed_output_box.append("<h2>Group Discrimination Tests</h2>")
## if self.tester.causal_tests:
## detailed_output_box.append("<h2>Causal Discrimination Tests</h2>")
for test in self.tester.tests:
if test.group == True or test.causal == True:
detailed_output_box.append("<h2>Discrimination Results</h2>")
detailed_output_box.append("<h4> Threshold: " + "{:.1%}".format(test.threshold) + "</h4>")
if self.tester.group_search_results:
detailed_output_box.append("<h3> Group discrimination found </h3>")
for key,value in self.tester.group_search_results.items():
detailed_output_box.append("<h3>" + ",".join(key) + ": " + value + "</h3>")
print (",".join(key) + "-->" + value )
for result in self.tester.group_measurement_results:
if (key == tuple(result.i_fields)):
min_score = result.data[0]
min_assign = result.data[1]
max_score = result.data[2]
max_assign = result.data[3]
detailed_output_box.append(" <b>Min: </b>" + min_assign + " → " + "{:.1%}".format(min_score))
detailed_output_box.append(" <b>Max: </b>" + max_assign + " → " + "{:.1%}".format(max_score))
if self.tester.causal_search_results:
detailed_output_box.append("<h3>Causal discrimination found</h3>")
for key,value in self.tester.causal_search_results.items():
detailed_output_box.append("<h3>" + ",".join(key) + ": " + value + "</h3>")
print (",".join(key) + "-->" + value)
detailed_output_box.append("<h4> Causal Pairs: </h4>")
for result in self.tester.causal_measurement_results:
if (key == tuple(result.i_fields)):
for a1, a2 in result.data:
str_list1 = []
str_list2 = []
for k in self.tester.input_order:
if k in key:
str_list1.append("<b>" + a1[k] + "</b>")
str_list2.append("<b>" + a2[k] + "</b>")
else:
str_list1.append(a1[k])
str_list2.append(a2[k])
causal_pair1 = ", ".join(str_list1)
causal_pair2 = ", ".join(str_list2)
detailed_output_box.append("<p> " + causal_pair1 + " → " + causal_pair2 + "</p>")
print (causal_pair1, causal_pair2)
detailed_output_box.toHtml()
detailed_output_box.moveCursor(QtGui.QTextCursor.Start)
layout.addWidget(detailed_output_box, 1, 1, 5, 5)
horizontalGroupBox.setLayout(layout)
dialog.setGeometry(self.left, self.top, self.width+100, self.height-200)
windowLayout = QVBoxLayout()
windowLayout.addWidget(horizontalGroupBox)
dialog.setLayout(windowLayout)
dialog.setWindowTitle("Themis 2.0: Detailed Output")
dialog.exec_()
def handleLoadButton(self):
dialog = QFileDialog()
filename = dialog.getOpenFileName(self, "Open File", "~/Documents/Themis/Themis2.0")
if filename[0]:
self.file = open(filename[0], 'r')
else:
return
self.tester = themis2.Themis(filename[0])
command = self.tester.command
rand_seed = self.tester.rand_seed
max_samples = self.tester.max_samples
min_samples = self.tester.min_samples
# set text boxes from Themis
self.command_box.setText(command)
self.results_box.clear()
self.tests_table.clearContents()
self.inputs_table.clearContents()
index = 0
inputs = []
for test in self.tester.tests:
self.tests_table.insertRow(index)
self.createEditButtons(self.tests_table,index, test)
function = test.function
confidence = test.conf
margin = test.margin
self.setTestTableValue(str(function),index,1)
self.setTestTableValue(str(confidence), index, 2)
self.setTestTableValue(str(margin), index, 3)
index += 1
for field in test.i_fields:
if field not in inputs:
inputs.append(field)
self.resizeCells(self.tests_table)
i = 0
for name in self.tester.input_names:
self.inputs_table.insertRow(i)
self.createEditButtons(self.inputs_table, i, test)
inpt = self.tester.inputs[name]
name = inpt.name
inpt_type = inpt.kind
values = inpt.values
self.setCellValue(name, i, 1)
self.setCellValue(inpt_type, i, 2)
if inpt_type == "categorical":
value = "{" + ", ".join(values) + "}"
self.setCellValue(value, i, 3)
else:
value = "[" + inpt.lb + "-" + inpt.ub + "]"
self.setCellValue(value, i, 3)
i +=1
self.resizeCells(self.inputs_table)
def resizeCells(self, table):
table.resizeRowsToContents()
table.setEditTriggers(QAbstractItemView.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setStretchLastSection(True)
for i in range(table.columnCount()-1):
table.resizeColumnToContents(i)
def handleSaveButton(self):
return
def createInputsTable(self):
self.inputs_table = QTableWidget()
self.inputs_table.setColumnCount(4)
self.inputs_table.setHorizontalHeaderLabels(["", "Input Name", "Input Type", "Possible Values"])
self.inputs_table.horizontalHeader().setStretchLastSection(True)
self.resizeCells(self.inputs_table)
def createTestsTable(self):
self.tests_table = QTableWidget()
self.tests_table.setColumnCount(5)
self.tests_table.setHorizontalHeaderLabels(["", "Measurement To Run", "Confidence", "Margin", "Inputs"])
self.resizeCells(self.tests_table)
def createEditButtons(self, table, row, test):
layout = QHBoxLayout()
layout.setContentsMargins(2,2,2,2)
layout.setSpacing(10)
delete_btn = QPushButton(table)
delete_btn.setText("Delete")
delete_btn.adjustSize()
layout.addWidget(delete_btn)
edit_btn = QPushButton(table)
edit_btn.setText("Edit...")
index = QPersistentModelIndex(table.model().index(row, 0))
edit_btn.clicked.connect(lambda *args, index=index: self.handleEditButton(index, table, test))
layout.addWidget(edit_btn)
cellWidget = QWidget()
cellWidget.setLayout(layout)
table.setCellWidget(row,0,cellWidget)
def handleEditButton(self, index, table, test):
if table is self.inputs_table:
item_name = table.item(index.row(), 1)
item_type = table.item(index.row(), 2)
item_values = table.item(index.row(), 3)
print ("Name: " + item_name.text())
print ("Type: " + item_type.text())
print ("Values :" + item_values.text())
edit_input_dialog = EditInputWindow(False, item_name.text(), item_type.text(), item_values.text())
edit_input_dialog.setModal(True)
edit_input_dialog.show()
if table is self.tests_table:
test_name = table.item(index.row(), 1)
confidence = table.item(index.row(), 2)
margin = table.item(index.row(), 3)
self.edit_test_dialog = EditTestWindow(False, test_name.text(), confidence.text(), margin.text(), test)
self.edit_test_dialog.setModal(True)
self.edit_test_dialog.show()
self.updateTable(table, index)
def updateTable(self, table, index):
if self.edit_test_dialog.exec_():
if table is self.tests_table:
test = self.tester.tests[index.row()]
print (" We are in update table")
print ("Sex ", self.sex_cb.isChecked())
print ("Race ", self.race_cb.isChecked())
print ("Income ", self.income_cb.isChecked())
#update Themis values
if self.edit_test_dialog.sex_cb.isChecked() and self.edit_test_dialog.race_cb.isChecked() and self.edit_test_dialog.income_cb.isChecked():
print ("Blah blah blah")
# fix wht goes in table!
table.item(index.row(),1).setText("discrimination_search")
test.function = "discrimination_search"
if self.edit_test_dialog.causal_cb.isChecked() and not self.edit_test_dialog.group_cb.isChecked():
test.group = False
test.causal = True
elif not self.edit_test_dialog.causal_cb.isChecked() and self.edit_test_dialog.group_cb.isChecked():
test.group = True
test.causal = False
else:
test.group = True
test.causal = True
else:
if self.edit_test_dialog.group_cb.isChecked():
test.function = "group_discrimination"
table.item(index.row(),1,).setText("group_discrimination")
elif self.edit_test_dialog.causal_cb.isChecked():
table.item(index.row(),1).setText("causal_discrimination")
test.function = "causal_discrimination"
test.conf = float(self.edit_test_dialog.conf_box.text())
table.item(index.row(), 2).setText(str(test.conf))
test.margin = float(self.edit_test_dialog.margin_box.text())
table.item(index.row(), 3).setText(str(test.margin))
self.tester.tests[index.row()] = test
def setCellValue(self, value, row, column):
new_input = QTableWidgetItem()
new_input.setText(value)
self.inputs_table.setItem(row,column,new_input)
def setTestTableValue(self, value, row, column):
new_input = QTableWidgetItem()
new_input.setText(value)
self.tests_table.setItem(row,column,new_input)
class EditTestWindow(QDialog):
def __init__(self, inputs=None, add=False, name=None, conf=None, margin=None, test=None):
super().__init__()
if add == False:
self.title = 'Edit Measurement'
else:
self.title = 'Add Measurements'
self.left = 100
self.top = 100
self.width = 500
self.height = 300
assert inputs != None
try:
if name == None and conf == None and margin == None and test==None:
self.initUI(inputs)
else:
self.initUI(inputs, name, conf, margin, test)
except:
print('Inputs required to add test!')
def initUI(self, name=None, conf=None, margin=None, test=None):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
if name == None and conf == None and margin == None and test==None:
self.createGrid()
else:
self.createGrid(name, conf, margin, test)
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
def createGrid(self, name=None, conf=None, margin=None, test=None):
self.all_measurements = []
self.all_confs = []
self.all_margins = []
self.all_inputs = []
self.all_thresholds = []
self.horizontalGroupBox = QGroupBox("")
self.layout = QGridLayout()
self.type_label = QLabel("Measurement Type: ")
self.group_cb = QCheckBox('Group Discrimination',self)
self.causal_cb = QCheckBox('Causal Discrimination',self)
if name is not None:
if name == "group_discrimination":
self.group_cb.setChecked(True)
elif name == "causal_discrimination":
self.causal_cb.setChecked(True)
elif name == "discrimination_search":
if test is not None:
self.discrim_search.setChecked(True)
if test.group == True:
self.group_cb.setChecked(True)
if test.causal == True:
self.causal_cb.setChecked(True)
else:
print ("No test was passed in!")
self.group_cb.stateChanged.connect(self.selectionChange)
self.causal_cb.stateChanged.connect(self.selectionChange)
self.layout.addWidget(self.type_label, 1, 1)
self.layout.addWidget(self.group_cb, 1, 2)
self.layout.addWidget(self.causal_cb, 2, 2)
self.inputs_label = QLabel("Select inputs of interest:")
self.sex_cb = QCheckBox("sex")
self.race_cb = QCheckBox("race")
self.income_cb = QCheckBox("income")
self.sex_cb.stateChanged.connect(self.selectionChange)
self.race_cb.stateChanged.connect(self.selectionChange)
self.income_cb.stateChanged.connect(self.selectionChange)
self.layout.addWidget(self.inputs_label, 5, 1)
self.layout.addWidget(self.sex_cb,5,2)
self.layout.addWidget(self.race_cb,6,2)
self.layout.addWidget(self.income_cb,7,2)
self.conf_label = QLabel("Confidence: ")
self.conf_box = QLineEdit(self)
if conf is not None:
self.conf_box.setText(str(conf))
self.layout.addWidget(self.conf_label, 8, 1)
self.layout.addWidget(self.conf_box, 8, 2)
self.margin_label = QLabel("Margin: ")
self.margin_box = QLineEdit(self)
if margin is not None:
self.margin_box.setText(str(margin))
self.layout.addWidget(self.margin_label, 9, 1)
self.layout.addWidget(self.margin_box, 9, 2)
self.add_button = QPushButton("Add")
self.add_button.setAutoDefault(False)
self.layout.addWidget(self.add_button, 10, 3)
self.add_button.clicked.connect(self.handleAddButton)
self.horizontalGroupBox.setLayout(self.layout)
def selectionChange(self):
if self.group_cb.isChecked or self.causal_cb.isChecked():
if self.sex_cb.isChecked() and self.race_cb.isChecked() and self.income_cb.isChecked():
self.enterThreshold()
else:
try:
self.removeThreshold()
except:
return
def removeThreshold(self):
self.layout.removeWidget(self.threshold_label)
self.layout.removeWidget(self.threshold_box)
self.layout.removeWidget(self.conf_label)
self.layout.removeWidget(self.conf_box)
self.layout.removeWidget(self.margin_label)
self.layout.removeWidget(self.margin_box)
self.layout.removeWidget(self.add_button)
self.layout.addWidget(self.conf_label, 8, 1 )
self.layout.addWidget(self.conf_box, 8, 2 )
self.layout.addWidget(self.margin_label, 9, 1 )
self.layout.addWidget(self.margin_box, 9, 2 )
self.layout.addWidget(self.add_button, 10, 3)
def enterThreshold(self):
self.threshold_label = QLabel("Threshold:")
self.threshold_box = QLineEdit(self)
self.layout.removeWidget(self.conf_label)
self.layout.removeWidget(self.conf_box)
self.layout.removeWidget(self.margin_label)
self.layout.removeWidget(self.margin_box)
self.layout.removeWidget(self.add_button)
self.layout.addWidget(self.threshold_label, 8, 1)
self.layout.addWidget(self.threshold_box, 8, 2)
self.layout.addWidget(self.conf_label, 9, 1 )
self.layout.addWidget(self.conf_box, 9, 2 )
self.layout.addWidget(self.margin_label, 10, 1 )
self.layout.addWidget(self.margin_box, 10, 2 )
self.layout.addWidget(self.add_button, 11, 3)
def handleAddButton(self):
if self.sex_cb.isChecked() and self.race_cb.isChecked() and self.income_cb.isChecked():
if self.causal_cb.isChecked() and not self.group_cb.isChecked():
self.all_measurements.append("Discrimination Search Causal")
self.all_thresholds.append(self.threshold_box.text())
elif self.group_cb.isChecked() and not self.causal_cb.isChecked():
self.all_measurements.append("Discrimination Search Group")
self.all_thresholds.append(self.threshold_box.text())
else:
self.all_measurements.append("Discrimination Search ")
self.all_thresholds.append(self.threshold_box.text())
else:
if self.group_cb.isChecked():
self.all_measurements.append(self.group_cb.text())
print(self.group_cb.text())
elif self.causal_cb.isChecked():
self.all_measurements.append(self.causal_cb.text())
print (self.causal_cb.text())
self.all_confs.append(self.conf_box.text())
self.all_margins.append(self.margin_box.text())
inputs = []
if self.sex_cb.isChecked():
inputs.append("sex")
if self.race_cb.isChecked():
inputs.append("race")
if self.income_cb.isChecked():
inputs.append("income")
inputs_tuple = tuple(inputs)
self.all_inputs.append(inputs_tuple)
self.accept()
def handleDoneButton(self):
self.accept()
class EditInputWindow(QDialog):
def __init__(self, add=False, name=None, kind="categorical", values=None):
super().__init__()
if add == False:
self.title = "Edit Input"
else:
self.title = 'Add Inputs'
self.left = 100
self.top = 100
self.width = 500
self.height = 300
if name == None and kind == "categorical" and values == None:
self.initUI()
else:
self.initUI(name, kind, values)
def initUI(self, name=None, kind="categorical",values=None):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
if name == None and kind == "categorical" and values == None:
self.createGrid()
else:
self.createGrid(name, kind, values)
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
def createGrid(self, name=None, kind="categorical", values=None):
self.horizontalGroupBox = QGroupBox("")
layout = QGridLayout()
self.all_names = []
self.all_values = []
self.all_types = []
name_label = QLabel("Input name: ")
self.name_box = QLineEdit(self)
if name is not None:
self.name_box.setText(str(name))
layout.addWidget(name_label, 1, 1)
layout.addWidget(self.name_box, 1, 2)
self.type_label = QLabel("Input type: ")
self.types = QComboBox()
self.types.addItem("Categorical")
self.types.addItem("Continuous Int")
if kind == "continuousInt":
index = self.types.findText("Continuous Int")
if index >= 0:
self.types.setCurrentIndex(index)
else:
print ("Can't find item in combo box!")
layout.addWidget(self.type_label, 2, 1)
layout.addWidget(self.types, 2, 2)
self.values_label = QLabel("Values (separated by commas): ")
self.values_box = QLineEdit(self)
if values is not None:
self.values_box.setText(values)
layout.addWidget(self.values_label, 3, 1)
layout.addWidget(self.values_box, 3, 2)
self.types.currentIndexChanged.connect(self.selectionChange)
self.add_button = QPushButton("Add")
self.add_button.setAutoDefault(False)
self.add_button.clicked.connect(self.handleAddButton)
layout.addWidget(self.add_button, 4,4)
self.horizontalGroupBox.setLayout(layout)
def selectionChange(self):
if self.types.currentText() == "Continuous Int":
self.values_label.setText("Range (e.g. 1-10) : ")
else:
self.values_label.setText("Values (separated by commas): ")
def handleAddButton(self):
self.all_names.append(self.name_box.text())
self.all_values.append(self.values_box.text())
self.all_types.append(self.types.currentText())
self.accept()
def handleDoneButton(self):
self.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| 31,234 | 35.617819 | 164 | py |
Themis | Themis-master/Themis2.0/loan.py | import sys
sex = sys.argv[1]
race = sys.argv[2]
income = sys.argv[3]
# first case
if sex == "male":
print ("1")
elif race != "green":
if income == "0...50000":
print ("0")
else:
print ("1")
else:
if income == "0...50000" or income == "50001...100000":
print ("0")
else:
print ("1")
| 299 | 12.636364 | 56 | py |
Themis | Themis-master/Themis2.0/loan_2.py | import sys
sex = sys.argv[1]
race = sys.argv[2]
income = sys.argv[3]
# second case
if race == "green" or race == "orange":
if income == "0...50000":
print ("0")
else:
print ("1")
else:
if income == "50001...100000":
print ("0")
else:
print ("1")
| 260 | 13.5 | 39 | py |
Themis | Themis-master/Themis2.0/grid.py | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import xml.etree.ElementTree as ET
import themis2
class App(QDialog):
def __init__(self):
super().__init__()
self.title = 'Themis 2.0'
self.left = 100
self.top = 100
self.width = 800
self.height = 1000
self.tree = None
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createThemisGrid()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox4)
windowLayout.addWidget(self.horizontalGroupBox)
windowLayout.addWidget(self.horizontalGroupBox2)
windowLayout.addWidget(self.horizontalGroupBox3)
self.setLayout(windowLayout)
self.show()
def createThemisGrid(self):
self.horizontalGroupBox4 = QGroupBox()
layout4 = QGridLayout()
command_box_label = QLabel("Command:")
self.command_box = QLineEdit(self)
seed_box_label = QLabel("Random seed:")
self.seed_box = QLineEdit(self)
max_box_label = QLabel("Max Samples:")
self.max_box = QLineEdit(self)
min_box_label = QLabel("Min Samples:")
self.min_box = QLineEdit(self)
layout4.addWidget(command_box_label,1, 0)
layout4.addWidget(self.command_box,1, 1)
layout4.addWidget(seed_box_label,2,0)
layout4.addWidget(self.seed_box,2,1)
layout4.addWidget(max_box_label,3,0)
layout4.addWidget(self.max_box,3,1)
layout4.addWidget(min_box_label,4,0)
layout4.addWidget(self.min_box,4,1)
self.horizontalGroupBox = QGroupBox("Inputs")
layout = QGridLayout()
layout.setSpacing(5)
self.createInputsTable()
load_button = QPushButton('Load...')
load_button.clicked.connect(self.handleLoadButton)
save_button = QPushButton('Save...')
save_button.clicked.connect(self.handleSaveButton)
add_button = QPushButton('Add Input...')
add_button.clicked.connect(self.handleAddButton)
self.dialog = EditInputWindow()
layout.addWidget(self.inputs_table,0,1, 3, 4)
layout.addWidget(load_button,5, 1)
layout.addWidget(save_button,5, 2)
layout.addWidget(add_button,5,3)
self.horizontalGroupBox2 = QGroupBox("Tests")
layout2 = QGridLayout()
self.createTestsTable()
add_test_button = QPushButton("Add Test...")
layout2.addWidget(self.tests_table, 5, 4, 4, 4)
layout2.addWidget(add_test_button, 9, 4)
self.horizontalGroupBox3 = QGroupBox("")
layout3 = QGridLayout()
run_button = QPushButton("Run")
# run themis
run_button.clicked.connect(self.runThemis)
self.results_box = QTextEdit()
self.results_box.setReadOnly(True)
layout3.addWidget(run_button,1, 1)
layout3.addWidget(self.results_box, 2, 1, 5, 5)
self.horizontalGroupBox.setLayout(layout)
self.horizontalGroupBox2.setLayout(layout2)
self.horizontalGroupBox3.setLayout(layout3)
self.horizontalGroupBox4.setLayout(layout4)
def runThemis(self):
self.tester.run()
self.results_box.setText(self.getTesterOutput)
def getTesterOutput(self):
results = self.tester.output
return results
def handleAddButton(self):
self.dialog.setModal(True)
self.dialog.show()
def handleLoadButton(self):
dialog = QFileDialog()
filename = dialog.getOpenFileName(self, "Open File", "/home")
if filename[0]:
self.file = open(filename[0], 'r')
# add themis instance with loaded file
self.processSettingsFiles()
def processSettingsFiles(self):
self.tree = ET.parse(self.file)
root = self.tree.getroot()
run_command = root.find('command').text
self.command_box.setText(run_command)
seed = root.find('seed').text
self.seed_box.setText(seed)
max_samples = root.find('max_samples').text
self.max_box.setText(max_samples)
min_samples = root.find('min_samples').text
self.min_box.setText(min_samples)
# column 1 = Input Name
# column 2 = Input Type
# column 3 = Values
#for categorical values
self.inputs = []
ctr = 0
for run_input in root.iter('input'):
name = run_input.find('name').text
print(name)
self.inputs[ctr] = name
categoricalFlag = False
for j in run_input.iter('type'):
if j.text == "categorical":
categoricalFlag = True
values = []
if(categoricalFlag is True):
for i in run_input.iter('value'):
values.append(i.text)
else:
for lbound in run_input.iter('lowerbound'):
values.append(lbound.text)
for ubound in run_input.iter('upperbound'):
values.append(ubound.text)
if (len(values) != 0):
self.setCellValue(values.__str__(), ctr, 3)
ctr += 1
index = 0
for run_test in root.iter('test'):
function = ""
configuration = ""
margin = ""
for func in run_test.iter("function"):
function = func.text
for config in run_test.iter("conf"):
configuration = config.text
for marg in run_test.iter("margin"):
margin = marg.text
print(function)
print(configuration)
print(margin)
print("Got all the values")
self.setTestTableValue(function,index,1)
self.setTestTableValue(configuration, index, 2)
self.setTestTableValue(margin, index, 3)
index += 1
def handleSaveButton(self):
self.tree.write("settings")
def createInputsTable(self):
self.inputs_table = QTableWidget()
self.inputs_table.setRowCount(10)
self.inputs_table.setColumnCount(4)
self.inputs_table.setHorizontalHeaderLabels(["", "Input Name", "Input Type", "Values"])
# pass in row to create buttons on that row
for i in range(self.inputs_table.rowCount()):
self.createEditButtons(self.inputs_table, i)
self.inputs_table.horizontalHeader().setStretchLastSection(True)
self.inputs_table.resizeRowsToContents()
self.inputs_table.resizeColumnsToContents()
self.inputs_table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.inputs_table.verticalHeader().setVisible(False)
def createTestsTable(self):
self.tests_table = QTableWidget()
self.tests_table.setRowCount(10)
self.tests_table.setColumnCount(5)
self.tests_table.setHorizontalHeaderLabels(["", "Name", "Confidence", "Margin", "Notes"])
for i in range(self.tests_table.rowCount()):
self.createEditButtons(self.tests_table,i)
self.tests_table.horizontalHeader().setStretchLastSection(True)
self.tests_table.resizeRowsToContents()
self.tests_table.resizeColumnsToContents()
self.tests_table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tests_table.verticalHeader().setVisible(False)
def createEditButtons(self, table, row):
layout = QHBoxLayout()
layout.setContentsMargins(2,2,2,2)
layout.setSpacing(10)
delete_btn = QPushButton(table)
delete_btn.setText("Delete")
delete_btn.adjustSize()
layout.addWidget(delete_btn)
edit_btn = QPushButton(table)
edit_btn.setText("Edit...")
layout.addWidget(edit_btn)
cellWidget = QWidget()
cellWidget.setLayout(layout)
table.setCellWidget(row,0,cellWidget)
def setCellValue(self, value, row, column):
new_input = QTableWidgetItem()
new_input.setText(value)
self.inputs_table.setItem(row,column,new_input)
def setTestTableValue(self, value, row, column):
new_input = QTableWidgetItem()
new_input.setText(value)
self.tests_table.setItem(row,column,new_input)
class EditInputWindow(QDialog):
def __init__(self):
super().__init__()
self.title = 'Add or Edit Inputs'
self.left = 100
self.top = 100
self.width = 500
self.height = 300
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createGrid()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
## self.show()
def createGrid(self):
self.horizontalGroupBox = QGroupBox("")
layout = QGridLayout()
name_label = QLabel("Input name: ")
self.name_box = QLineEdit(self)
layout.addWidget(name_label, 1, 1)
layout.addWidget(self.name_box, 1, 2)
type_label = QLabel("Input type: ")
self.types = QComboBox()
self.types.addItem("Categorical")
self.types.addItem("Continuous Int")
layout.addWidget(type_label, 2, 1)
layout.addWidget(self.types, 2, 2)
self.values_label = QLabel("Values (separated by commas): ")
self.values_box = QLineEdit(self)
layout.addWidget(self.values_label, 3, 1)
layout.addWidget(self.values_box, 3, 2)
self.types.currentIndexChanged.connect(self.selectionChange)
self.add_button = QPushButton("Add")
layout.addWidget(self.add_button, 4, 1)
self.done_button = QPushButton("Done")
layout.addWidget(self.done_button, 4, 4)
self.horizontalGroupBox.setLayout(layout)
def selectionChange(self):
if self.types.currentText() == "Continuous Int":
self.values_label.setText("Enter range (e.g. 1-10) : ")
else:
self.values_label.setText("Values (separated by commas): ")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| 10,797 | 28.746556 | 97 | py |