nwo
stringlengths 6
76
| sha
stringlengths 40
40
| path
stringlengths 5
118
| language
stringclasses 1
value | identifier
stringlengths 1
89
| parameters
stringlengths 2
5.4k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
51.1k
| docstring
stringlengths 1
17.6k
| docstring_summary
stringlengths 0
7.02k
| docstring_tokens
sequence | function
stringlengths 30
51.1k
| function_tokens
sequence | url
stringlengths 85
218
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/build_tfrecord.py | python | _process_image_files | (thread_index, ranges, name, images, decoder, vocab,
num_shards) | Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files. | Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files. | [
"Processes",
"and",
"saves",
"a",
"subset",
"of",
"images",
"as",
"TFRecord",
"files",
"in",
"one",
"thread",
".",
"Args",
":",
"thread_index",
":",
"Integer",
"thread",
"identifier",
"within",
"[",
"0",
"len",
"(",
"ranges",
")",
"]",
".",
"ranges",
":",
"A",
"list",
"of",
"pairs",
"of",
"integers",
"specifying",
"the",
"ranges",
"of",
"the",
"dataset",
"to",
"process",
"in",
"parallel",
".",
"name",
":",
"Unique",
"identifier",
"specifying",
"the",
"dataset",
".",
"images",
":",
"List",
"of",
"ImageMetadata",
".",
"decoder",
":",
"An",
"ImageDecoder",
"object",
".",
"vocab",
":",
"A",
"Vocabulary",
"object",
".",
"num_shards",
":",
"Integer",
"number",
"of",
"shards",
"for",
"the",
"output",
"files",
"."
] | def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush() | [
"def",
"_process_image_files",
"(",
"thread_index",
",",
"ranges",
",",
"name",
",",
"images",
",",
"decoder",
",",
"vocab",
",",
"num_shards",
")",
":",
"# Each thread produces N shards where N = num_shards / num_threads. For",
"# instance, if num_shards = 128, and num_threads = 2, then the first thread",
"# would produce shards [0, 64).",
"num_threads",
"=",
"len",
"(",
"ranges",
")",
"assert",
"not",
"num_shards",
"%",
"num_threads",
"num_shards_per_batch",
"=",
"int",
"(",
"num_shards",
"/",
"num_threads",
")",
"shard_ranges",
"=",
"np",
".",
"linspace",
"(",
"ranges",
"[",
"thread_index",
"]",
"[",
"0",
"]",
",",
"ranges",
"[",
"thread_index",
"]",
"[",
"1",
"]",
",",
"num_shards_per_batch",
"+",
"1",
")",
".",
"astype",
"(",
"int",
")",
"num_images_in_thread",
"=",
"ranges",
"[",
"thread_index",
"]",
"[",
"1",
"]",
"-",
"ranges",
"[",
"thread_index",
"]",
"[",
"0",
"]",
"counter",
"=",
"0",
"for",
"s",
"in",
"range",
"(",
"num_shards_per_batch",
")",
":",
"# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'",
"shard",
"=",
"thread_index",
"*",
"num_shards_per_batch",
"+",
"s",
"output_filename",
"=",
"\"%s-%.5d-of-%.5d\"",
"%",
"(",
"name",
",",
"shard",
",",
"num_shards",
")",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"output_dir",
",",
"output_filename",
")",
"writer",
"=",
"tf",
".",
"python_io",
".",
"TFRecordWriter",
"(",
"output_file",
")",
"shard_counter",
"=",
"0",
"images_in_shard",
"=",
"np",
".",
"arange",
"(",
"shard_ranges",
"[",
"s",
"]",
",",
"shard_ranges",
"[",
"s",
"+",
"1",
"]",
",",
"dtype",
"=",
"int",
")",
"for",
"i",
"in",
"images_in_shard",
":",
"image",
"=",
"images",
"[",
"i",
"]",
"sequence_example",
"=",
"_to_sequence_example",
"(",
"image",
",",
"decoder",
",",
"vocab",
")",
"if",
"sequence_example",
"is",
"not",
"None",
":",
"writer",
".",
"write",
"(",
"sequence_example",
".",
"SerializeToString",
"(",
")",
")",
"shard_counter",
"+=",
"1",
"counter",
"+=",
"1",
"if",
"not",
"counter",
"%",
"1000",
":",
"print",
"(",
"\"%s [thread %d]: Processed %d of %d items in thread batch.\"",
"%",
"(",
"datetime",
".",
"now",
"(",
")",
",",
"thread_index",
",",
"counter",
",",
"num_images_in_thread",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"writer",
".",
"close",
"(",
")",
"print",
"(",
"\"%s [thread %d]: Wrote %d image-caption pairs to %s\"",
"%",
"(",
"datetime",
".",
"now",
"(",
")",
",",
"thread_index",
",",
"shard_counter",
",",
"output_file",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"shard_counter",
"=",
"0",
"print",
"(",
"\"%s [thread %d]: Wrote %d image-caption pairs to %d shards.\"",
"%",
"(",
"datetime",
".",
"now",
"(",
")",
",",
"thread_index",
",",
"counter",
",",
"num_shards_per_batch",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L170-L225 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/build_tfrecord.py | python | _process_dataset | (name, images, vocab, num_shards) | Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files. | Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files. | [
"Processes",
"a",
"complete",
"data",
"set",
"and",
"saves",
"it",
"as",
"a",
"TFRecord",
".",
"Args",
":",
"name",
":",
"Unique",
"identifier",
"specifying",
"the",
"dataset",
".",
"images",
":",
"List",
"of",
"ImageMetadata",
".",
"vocab",
":",
"A",
"Vocabulary",
"object",
".",
"num_shards",
":",
"Integer",
"number",
"of",
"shards",
"for",
"the",
"output",
"files",
"."
] | def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name)) | [
"def",
"_process_dataset",
"(",
"name",
",",
"images",
",",
"vocab",
",",
"num_shards",
")",
":",
"# Break up each image into a separate entity for each caption.",
"images",
"=",
"[",
"ImageMetadata",
"(",
"image",
".",
"id",
",",
"image",
".",
"filename",
",",
"[",
"caption",
"]",
")",
"for",
"image",
"in",
"images",
"for",
"caption",
"in",
"image",
".",
"captions",
"]",
"# Shuffle the ordering of images. Make the randomization repeatable.",
"random",
".",
"seed",
"(",
"12345",
")",
"random",
".",
"shuffle",
"(",
"images",
")",
"# Break the images into num_threads batches. Batch i is defined as",
"# images[ranges[i][0]:ranges[i][1]].",
"num_threads",
"=",
"min",
"(",
"num_shards",
",",
"FLAGS",
".",
"num_threads",
")",
"spacing",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"images",
")",
",",
"num_threads",
"+",
"1",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"ranges",
"=",
"[",
"]",
"threads",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"spacing",
")",
"-",
"1",
")",
":",
"ranges",
".",
"append",
"(",
"[",
"spacing",
"[",
"i",
"]",
",",
"spacing",
"[",
"i",
"+",
"1",
"]",
"]",
")",
"# Create a mechanism for monitoring when all threads are finished.",
"coord",
"=",
"tf",
".",
"train",
".",
"Coordinator",
"(",
")",
"# Create a utility for decoding JPEG images to run sanity checks.",
"decoder",
"=",
"ImageDecoder",
"(",
")",
"# Launch a thread for each batch.",
"print",
"(",
"\"Launching %d threads for spacings: %s\"",
"%",
"(",
"num_threads",
",",
"ranges",
")",
")",
"for",
"thread_index",
"in",
"range",
"(",
"len",
"(",
"ranges",
")",
")",
":",
"args",
"=",
"(",
"thread_index",
",",
"ranges",
",",
"name",
",",
"images",
",",
"decoder",
",",
"vocab",
",",
"num_shards",
")",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"_process_image_files",
",",
"args",
"=",
"args",
")",
"t",
".",
"start",
"(",
")",
"threads",
".",
"append",
"(",
"t",
")",
"# Wait for all the threads to terminate.",
"coord",
".",
"join",
"(",
"threads",
")",
"print",
"(",
"\"%s: Finished processing all %d image-caption pairs in data set '%s'.\"",
"%",
"(",
"datetime",
".",
"now",
"(",
")",
",",
"len",
"(",
"images",
")",
",",
"name",
")",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L228-L270 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/build_tfrecord.py | python | _create_vocab | (captions) | return vocab | Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object. | Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object. | [
"Creates",
"the",
"vocabulary",
"of",
"word",
"to",
"word_id",
".",
"The",
"vocabulary",
"is",
"saved",
"to",
"disk",
"in",
"a",
"text",
"file",
"of",
"word",
"counts",
".",
"The",
"id",
"of",
"each",
"word",
"in",
"the",
"file",
"is",
"its",
"corresponding",
"0",
"-",
"based",
"line",
"number",
".",
"Args",
":",
"captions",
":",
"A",
"list",
"of",
"lists",
"of",
"strings",
".",
"Returns",
":",
"A",
"Vocabulary",
"object",
"."
] | def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab | [
"def",
"_create_vocab",
"(",
"captions",
")",
":",
"print",
"(",
"\"Creating vocabulary.\"",
")",
"counter",
"=",
"Counter",
"(",
")",
"for",
"c",
"in",
"captions",
":",
"counter",
".",
"update",
"(",
"c",
")",
"print",
"(",
"\"Total words:\"",
",",
"len",
"(",
"counter",
")",
")",
"# Filter uncommon words and sort by descending count.",
"word_counts",
"=",
"[",
"x",
"for",
"x",
"in",
"counter",
".",
"items",
"(",
")",
"if",
"x",
"[",
"1",
"]",
">=",
"FLAGS",
".",
"min_word_count",
"]",
"word_counts",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"print",
"(",
"\"Words in vocabulary:\"",
",",
"len",
"(",
"word_counts",
")",
")",
"# Write out the word counts file.",
"with",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"FLAGS",
".",
"word_counts_output_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"%s %d\"",
"%",
"(",
"w",
",",
"c",
")",
"for",
"w",
",",
"c",
"in",
"word_counts",
"]",
")",
")",
"print",
"(",
"\"Wrote vocabulary file:\"",
",",
"FLAGS",
".",
"word_counts_output_file",
")",
"# Create the vocabulary dictionary.",
"reverse_vocab",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"word_counts",
"]",
"unk_id",
"=",
"len",
"(",
"reverse_vocab",
")",
"vocab_dict",
"=",
"dict",
"(",
"[",
"(",
"x",
",",
"y",
")",
"for",
"(",
"y",
",",
"x",
")",
"in",
"enumerate",
"(",
"reverse_vocab",
")",
"]",
")",
"vocab",
"=",
"Vocabulary",
"(",
"vocab_dict",
",",
"unk_id",
")",
"return",
"vocab"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L273-L304 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/build_tfrecord.py | python | _process_caption_jieba | (caption) | return tokenized_caption | Processes a Chinese caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption. | Processes a Chinese caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption. | [
"Processes",
"a",
"Chinese",
"caption",
"string",
"into",
"a",
"list",
"of",
"tonenized",
"words",
".",
"Args",
":",
"caption",
":",
"A",
"string",
"caption",
".",
"Returns",
":",
"A",
"list",
"of",
"strings",
";",
"the",
"tokenized",
"caption",
"."
] | def _process_caption_jieba(caption):
"""Processes a Chinese caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(jieba.cut(caption, cut_all=False))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption | [
"def",
"_process_caption_jieba",
"(",
"caption",
")",
":",
"tokenized_caption",
"=",
"[",
"FLAGS",
".",
"start_word",
"]",
"tokenized_caption",
".",
"extend",
"(",
"jieba",
".",
"cut",
"(",
"caption",
",",
"cut_all",
"=",
"False",
")",
")",
"tokenized_caption",
".",
"append",
"(",
"FLAGS",
".",
"end_word",
")",
"return",
"tokenized_caption"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L307-L317 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/build_tfrecord.py | python | _load_and_process_metadata | (captions_file, image_dir) | return image_metadata | Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: Json file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata. | Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: Json file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata. | [
"Loads",
"image",
"metadata",
"from",
"a",
"JSON",
"file",
"and",
"processes",
"the",
"captions",
".",
"Args",
":",
"captions_file",
":",
"Json",
"file",
"containing",
"caption",
"annotations",
".",
"image_dir",
":",
"Directory",
"containing",
"the",
"image",
"files",
".",
"Returns",
":",
"A",
"list",
"of",
"ImageMetadata",
"."
] | def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: Json file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
image_id = set([])
id_to_captions = {}
with open(captions_file, 'r') as f:
caption_data = json.load(f)
for data in caption_data:
image_name = data['image_id'].split('.')[0]
descriptions = data['caption']
if image_name not in image_id:
id_to_captions.setdefault(image_name, [])
image_id.add(image_name)
caption_num = len(descriptions)
for i in range(caption_num):
caption_temp = descriptions[i].strip().strip("。").replace('\n', '')
if caption_temp != '':
id_to_captions[image_name].append(caption_temp)
print("Loaded caption metadata for %d images from %s and image_id num is %s" %
(len(id_to_captions), captions_file, len(image_id)))
# Process the captions and combine the data into a list of ImageMetadata.
print("Proccessing captions.")
image_metadata = []
num_captions = 0
id = 0
for base_filename in image_id:
filename = os.path.join(image_dir, base_filename + '.jpg')
# captions = [_process_caption(c) for c in id_to_captions[base_filename]]
captions = [_process_caption_jieba(c) for c in id_to_captions[base_filename]]
image_metadata.append(ImageMetadata(id, filename, captions))
id = id + 1
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_captions), captions_file))
return image_metadata | [
"def",
"_load_and_process_metadata",
"(",
"captions_file",
",",
"image_dir",
")",
":",
"image_id",
"=",
"set",
"(",
"[",
"]",
")",
"id_to_captions",
"=",
"{",
"}",
"with",
"open",
"(",
"captions_file",
",",
"'r'",
")",
"as",
"f",
":",
"caption_data",
"=",
"json",
".",
"load",
"(",
"f",
")",
"for",
"data",
"in",
"caption_data",
":",
"image_name",
"=",
"data",
"[",
"'image_id'",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"descriptions",
"=",
"data",
"[",
"'caption'",
"]",
"if",
"image_name",
"not",
"in",
"image_id",
":",
"id_to_captions",
".",
"setdefault",
"(",
"image_name",
",",
"[",
"]",
")",
"image_id",
".",
"add",
"(",
"image_name",
")",
"caption_num",
"=",
"len",
"(",
"descriptions",
")",
"for",
"i",
"in",
"range",
"(",
"caption_num",
")",
":",
"caption_temp",
"=",
"descriptions",
"[",
"i",
"]",
".",
"strip",
"(",
")",
".",
"strip",
"(",
"\"。\").",
"r",
"e",
"place('",
"\\",
"n', ",
"'",
")",
"",
"if",
"caption_temp",
"!=",
"''",
":",
"id_to_captions",
"[",
"image_name",
"]",
".",
"append",
"(",
"caption_temp",
")",
"print",
"(",
"\"Loaded caption metadata for %d images from %s and image_id num is %s\"",
"%",
"(",
"len",
"(",
"id_to_captions",
")",
",",
"captions_file",
",",
"len",
"(",
"image_id",
")",
")",
")",
"# Process the captions and combine the data into a list of ImageMetadata.",
"print",
"(",
"\"Proccessing captions.\"",
")",
"image_metadata",
"=",
"[",
"]",
"num_captions",
"=",
"0",
"id",
"=",
"0",
"for",
"base_filename",
"in",
"image_id",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"image_dir",
",",
"base_filename",
"+",
"'.jpg'",
")",
"# captions = [_process_caption(c) for c in id_to_captions[base_filename]]",
"captions",
"=",
"[",
"_process_caption_jieba",
"(",
"c",
")",
"for",
"c",
"in",
"id_to_captions",
"[",
"base_filename",
"]",
"]",
"image_metadata",
".",
"append",
"(",
"ImageMetadata",
"(",
"id",
",",
"filename",
",",
"captions",
")",
")",
"id",
"=",
"id",
"+",
"1",
"num_captions",
"+=",
"len",
"(",
"captions",
")",
"print",
"(",
"\"Finished processing %d captions for %d images in %s\"",
"%",
"(",
"num_captions",
",",
"len",
"(",
"id_to_captions",
")",
",",
"captions_file",
")",
")",
"return",
"image_metadata"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L320-L361 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/build_tfrecord.py | python | Vocabulary.__init__ | (self, vocab, unk_id) | Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word. | Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word. | [
"Initializes",
"the",
"vocabulary",
".",
"Args",
":",
"vocab",
":",
"A",
"dictionary",
"of",
"word",
"to",
"word_id",
".",
"unk_id",
":",
"Id",
"of",
"the",
"special",
"unknown",
"word",
"."
] | def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id | [
"def",
"__init__",
"(",
"self",
",",
"vocab",
",",
"unk_id",
")",
":",
"self",
".",
"_vocab",
"=",
"vocab",
"self",
".",
"_unk_id",
"=",
"unk_id"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L79-L86 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/build_tfrecord.py | python | Vocabulary.word_to_id | (self, word) | Returns the integer id of a word string. | Returns the integer id of a word string. | [
"Returns",
"the",
"integer",
"id",
"of",
"a",
"word",
"string",
"."
] | def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id | [
"def",
"word_to_id",
"(",
"self",
",",
"word",
")",
":",
"if",
"word",
"in",
"self",
".",
"_vocab",
":",
"return",
"self",
".",
"_vocab",
"[",
"word",
"]",
"else",
":",
"return",
"self",
".",
"_unk_id"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L88-L93 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/configuration.py | python | ModelConfig.__init__ | (self) | Sets the default model hyperparameters. | Sets the default model hyperparameters. | [
"Sets",
"the",
"default",
"model",
"hyperparameters",
"."
] | def __init__(self):
"""Sets the default model hyperparameters."""
# File pattern of sharded TFRecord file containing SequenceExample protos.
# Must be provided in training and evaluation modes.
self.input_file_pattern = None
# Image format ("jpeg" or "png").
self.image_format = "jpeg"
# Approximate number of values per input shard. Used to ensure sufficient
# mixing between shards in training.
self.values_per_input_shard = 2300
# Minimum number of shards to keep in the input queue.
self.input_queue_capacity_factor = 2
# Number of threads for prefetching SequenceExample protos.
self.num_input_reader_threads = 1
# Name of the SequenceExample context feature containing image data.
self.image_feature_name = "image/data"
# Name of the SequenceExample feature list containing integer captions.
self.caption_feature_name = "image/caption_ids"
# Number of unique words in the vocab (plus 1, for <UNK>).
# The default value is larger than the expected actual vocab size to allow
# for differences between tokenizer versions used in preprocessing. There is
# no harm in using a value greater than the actual vocab size, but using a
# value less than the actual vocab size will result in an error.
self.vocab_size = 20000
# Number of threads for image preprocessing. Should be a multiple of 2.
self.num_preprocess_threads = 4
# Batch size.
self.batch_size = 32
# File containing an Inception v3 checkpoint to initialize the variables
# of the Inception model. Must be provided when starting training for the
# first time.
self.inception_checkpoint_file = None
# Dimensions of Inception v3 input images.
self.image_height = 299
self.image_width = 299
# Scale used to initialize model variables.
self.initializer_scale = 0.08
# LSTM input and output dimensionality, respectively.
self.embedding_size = 512
self.num_lstm_units = 512
# If < 1.0, the dropout keep probability applied to LSTM variables.
self.lstm_dropout_keep_prob = 0.7 | [
"def",
"__init__",
"(",
"self",
")",
":",
"# File pattern of sharded TFRecord file containing SequenceExample protos.",
"# Must be provided in training and evaluation modes.",
"self",
".",
"input_file_pattern",
"=",
"None",
"# Image format (\"jpeg\" or \"png\").",
"self",
".",
"image_format",
"=",
"\"jpeg\"",
"# Approximate number of values per input shard. Used to ensure sufficient",
"# mixing between shards in training.",
"self",
".",
"values_per_input_shard",
"=",
"2300",
"# Minimum number of shards to keep in the input queue.",
"self",
".",
"input_queue_capacity_factor",
"=",
"2",
"# Number of threads for prefetching SequenceExample protos.",
"self",
".",
"num_input_reader_threads",
"=",
"1",
"# Name of the SequenceExample context feature containing image data.",
"self",
".",
"image_feature_name",
"=",
"\"image/data\"",
"# Name of the SequenceExample feature list containing integer captions.",
"self",
".",
"caption_feature_name",
"=",
"\"image/caption_ids\"",
"# Number of unique words in the vocab (plus 1, for <UNK>).",
"# The default value is larger than the expected actual vocab size to allow",
"# for differences between tokenizer versions used in preprocessing. There is",
"# no harm in using a value greater than the actual vocab size, but using a",
"# value less than the actual vocab size will result in an error.",
"self",
".",
"vocab_size",
"=",
"20000",
"# Number of threads for image preprocessing. Should be a multiple of 2.",
"self",
".",
"num_preprocess_threads",
"=",
"4",
"# Batch size.",
"self",
".",
"batch_size",
"=",
"32",
"# File containing an Inception v3 checkpoint to initialize the variables",
"# of the Inception model. Must be provided when starting training for the",
"# first time.",
"self",
".",
"inception_checkpoint_file",
"=",
"None",
"# Dimensions of Inception v3 input images.",
"self",
".",
"image_height",
"=",
"299",
"self",
".",
"image_width",
"=",
"299",
"# Scale used to initialize model variables.",
"self",
".",
"initializer_scale",
"=",
"0.08",
"# LSTM input and output dimensionality, respectively.",
"self",
".",
"embedding_size",
"=",
"512",
"self",
".",
"num_lstm_units",
"=",
"512",
"# If < 1.0, the dropout keep probability applied to LSTM variables.",
"self",
".",
"lstm_dropout_keep_prob",
"=",
"0.7"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/configuration.py#L26-L78 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/configuration.py | python | TrainingConfig.__init__ | (self) | Sets the default training hyperparameters. | Sets the default training hyperparameters. | [
"Sets",
"the",
"default",
"training",
"hyperparameters",
"."
] | def __init__(self):
"""Sets the default training hyperparameters."""
# Number of examples per epoch of training data.
self.num_examples_per_epoch = 586363
# Optimizer for training the model.
self.optimizer = "SGD"
# Learning rate for the initial phase of training.
self.initial_learning_rate = 2.0
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 8.0
# Learning rate when fine tuning the Inception v3 parameters.
self.train_inception_learning_rate = 0.0005
# If not None, clip gradients to this value.
self.clip_gradients = 5.0
# How many model checkpoints to keep.
self.max_checkpoints_to_keep = 5 | [
"def",
"__init__",
"(",
"self",
")",
":",
"# Number of examples per epoch of training data.",
"self",
".",
"num_examples_per_epoch",
"=",
"586363",
"# Optimizer for training the model.",
"self",
".",
"optimizer",
"=",
"\"SGD\"",
"# Learning rate for the initial phase of training.",
"self",
".",
"initial_learning_rate",
"=",
"2.0",
"self",
".",
"learning_rate_decay_factor",
"=",
"0.5",
"self",
".",
"num_epochs_per_decay",
"=",
"8.0",
"# Learning rate when fine tuning the Inception v3 parameters.",
"self",
".",
"train_inception_learning_rate",
"=",
"0.0005",
"# If not None, clip gradients to this value.",
"self",
".",
"clip_gradients",
"=",
"5.0",
"# How many model checkpoints to keep.",
"self",
".",
"max_checkpoints_to_keep",
"=",
"5"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/configuration.py#L84-L104 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.__init__ | (self, config, mode, train_inception=False) | Basic setup.
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "inference".
train_inception: Whether the inception submodel variables are trainable. | Basic setup. | [
"Basic",
"setup",
"."
] | def __init__(self, config, mode, train_inception=False):
"""Basic setup.
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "inference".
train_inception: Whether the inception submodel variables are trainable.
"""
assert mode in ["train", "eval", "inference"]
self.config = config
self.mode = mode
self.train_inception = train_inception
# Reader for the input data.
self.reader = tf.TFRecordReader()
# To match the "Show and Tell" paper we initialize all variables with a
# random uniform initializer.
self.initializer = tf.random_uniform_initializer(
minval=-self.config.initializer_scale,
maxval=self.config.initializer_scale)
# A float32 Tensor with shape [batch_size, height, width, channels].
self.images = None
# An int32 Tensor with shape [batch_size, padded_length].
self.input_seqs = None
# An int32 Tensor with shape [batch_size, padded_length].
self.target_seqs = None
# An int32 0/1 Tensor with shape [batch_size, padded_length].
self.input_mask = None
# A float32 Tensor with shape [batch_size, embedding_size].
self.image_embeddings = None
# A float32 Tensor with shape [batch_size, padded_length, embedding_size].
self.seq_embeddings = None
# A float32 scalar Tensor; the total loss for the trainer to optimize.
self.total_loss = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_losses = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_loss_weights = None
# Collection of variables from the inception submodel.
self.inception_variables = []
# Function to restore the inception submodel from checkpoint.
self.init_fn = None
# Global step Tensor.
self.global_step = None | [
"def",
"__init__",
"(",
"self",
",",
"config",
",",
"mode",
",",
"train_inception",
"=",
"False",
")",
":",
"assert",
"mode",
"in",
"[",
"\"train\"",
",",
"\"eval\"",
",",
"\"inference\"",
"]",
"self",
".",
"config",
"=",
"config",
"self",
".",
"mode",
"=",
"mode",
"self",
".",
"train_inception",
"=",
"train_inception",
"# Reader for the input data.",
"self",
".",
"reader",
"=",
"tf",
".",
"TFRecordReader",
"(",
")",
"# To match the \"Show and Tell\" paper we initialize all variables with a",
"# random uniform initializer.",
"self",
".",
"initializer",
"=",
"tf",
".",
"random_uniform_initializer",
"(",
"minval",
"=",
"-",
"self",
".",
"config",
".",
"initializer_scale",
",",
"maxval",
"=",
"self",
".",
"config",
".",
"initializer_scale",
")",
"# A float32 Tensor with shape [batch_size, height, width, channels].",
"self",
".",
"images",
"=",
"None",
"# An int32 Tensor with shape [batch_size, padded_length].",
"self",
".",
"input_seqs",
"=",
"None",
"# An int32 Tensor with shape [batch_size, padded_length].",
"self",
".",
"target_seqs",
"=",
"None",
"# An int32 0/1 Tensor with shape [batch_size, padded_length].",
"self",
".",
"input_mask",
"=",
"None",
"# A float32 Tensor with shape [batch_size, embedding_size].",
"self",
".",
"image_embeddings",
"=",
"None",
"# A float32 Tensor with shape [batch_size, padded_length, embedding_size].",
"self",
".",
"seq_embeddings",
"=",
"None",
"# A float32 scalar Tensor; the total loss for the trainer to optimize.",
"self",
".",
"total_loss",
"=",
"None",
"# A float32 Tensor with shape [batch_size * padded_length].",
"self",
".",
"target_cross_entropy_losses",
"=",
"None",
"# A float32 Tensor with shape [batch_size * padded_length].",
"self",
".",
"target_cross_entropy_loss_weights",
"=",
"None",
"# Collection of variables from the inception submodel.",
"self",
".",
"inception_variables",
"=",
"[",
"]",
"# Function to restore the inception submodel from checkpoint.",
"self",
".",
"init_fn",
"=",
"None",
"# Global step Tensor.",
"self",
".",
"global_step",
"=",
"None"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L41-L97 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.is_training | (self) | return self.mode == "train" | Returns true if the model is built for training mode. | Returns true if the model is built for training mode. | [
"Returns",
"true",
"if",
"the",
"model",
"is",
"built",
"for",
"training",
"mode",
"."
] | def is_training(self):
"""Returns true if the model is built for training mode."""
return self.mode == "train" | [
"def",
"is_training",
"(",
"self",
")",
":",
"return",
"self",
".",
"mode",
"==",
"\"train\""
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L99-L101 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.process_image | (self, encoded_image, thread_id=0) | return image_processing.process_image(encoded_image,
is_training=self.is_training(),
height=self.config.image_height,
width=self.config.image_width,
thread_id=thread_id,
image_format=self.config.image_format) | Decodes and processes an image string.
Args:
encoded_image: A scalar string Tensor; the encoded image.
thread_id: Preprocessing thread id used to select the ordering of color
distortions.
Returns:
A float32 Tensor of shape [height, width, 3]; the processed image. | Decodes and processes an image string. | [
"Decodes",
"and",
"processes",
"an",
"image",
"string",
"."
] | def process_image(self, encoded_image, thread_id=0):
"""Decodes and processes an image string.
Args:
encoded_image: A scalar string Tensor; the encoded image.
thread_id: Preprocessing thread id used to select the ordering of color
distortions.
Returns:
A float32 Tensor of shape [height, width, 3]; the processed image.
"""
return image_processing.process_image(encoded_image,
is_training=self.is_training(),
height=self.config.image_height,
width=self.config.image_width,
thread_id=thread_id,
image_format=self.config.image_format) | [
"def",
"process_image",
"(",
"self",
",",
"encoded_image",
",",
"thread_id",
"=",
"0",
")",
":",
"return",
"image_processing",
".",
"process_image",
"(",
"encoded_image",
",",
"is_training",
"=",
"self",
".",
"is_training",
"(",
")",
",",
"height",
"=",
"self",
".",
"config",
".",
"image_height",
",",
"width",
"=",
"self",
".",
"config",
".",
"image_width",
",",
"thread_id",
"=",
"thread_id",
",",
"image_format",
"=",
"self",
".",
"config",
".",
"image_format",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L103-L119 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.build_inputs | (self) | Input prefetching, preprocessing and batching.
Outputs:
self.images
self.input_seqs
self.target_seqs (training and eval only)
self.input_mask (training and eval only) | Input prefetching, preprocessing and batching. | [
"Input",
"prefetching",
"preprocessing",
"and",
"batching",
"."
] | def build_inputs(self):
"""Input prefetching, preprocessing and batching.
Outputs:
self.images
self.input_seqs
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
"""
if self.mode == "inference":
# In inference mode, images and inputs are fed via placeholders.
image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed")
input_feed = tf.placeholder(dtype=tf.int64,
shape=[None], # batch_size
name="input_feed")
# Process image and insert batch dimensions.
images = tf.expand_dims(self.process_image(image_feed), 0)
input_seqs = tf.expand_dims(input_feed, 1)
# No target sequences or input mask in inference mode.
target_seqs = None
input_mask = None
else:
# Prefetch serialized SequenceExample protos.
input_queue = input_ops.prefetch_input_data(
self.reader,
self.config.input_file_pattern,
is_training=self.is_training(),
batch_size=self.config.batch_size,
values_per_shard=self.config.values_per_input_shard,
input_queue_capacity_factor=self.config.input_queue_capacity_factor,
num_reader_threads=self.config.num_input_reader_threads)
# Image processing and random distortion. Split across multiple threads
# with each thread applying a slightly different distortion.
assert self.config.num_preprocess_threads % 2 == 0
images_and_captions = []
for thread_id in range(self.config.num_preprocess_threads):
serialized_sequence_example = input_queue.dequeue()
encoded_image, caption = input_ops.parse_sequence_example(
serialized_sequence_example,
image_feature=self.config.image_feature_name,
caption_feature=self.config.caption_feature_name)
image = self.process_image(encoded_image, thread_id=thread_id)
images_and_captions.append([image, caption])
# Batch inputs.
queue_capacity = (2 * self.config.num_preprocess_threads *
self.config.batch_size)
images, input_seqs, target_seqs, input_mask = (
input_ops.batch_with_dynamic_pad(images_and_captions,
batch_size=self.config.batch_size,
queue_capacity=queue_capacity))
self.images = images
self.input_seqs = input_seqs
self.target_seqs = target_seqs
self.input_mask = input_mask | [
"def",
"build_inputs",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"\"inference\"",
":",
"# In inference mode, images and inputs are fed via placeholders.",
"image_feed",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"string",
",",
"shape",
"=",
"[",
"]",
",",
"name",
"=",
"\"image_feed\"",
")",
"input_feed",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int64",
",",
"shape",
"=",
"[",
"None",
"]",
",",
"# batch_size",
"name",
"=",
"\"input_feed\"",
")",
"# Process image and insert batch dimensions.",
"images",
"=",
"tf",
".",
"expand_dims",
"(",
"self",
".",
"process_image",
"(",
"image_feed",
")",
",",
"0",
")",
"input_seqs",
"=",
"tf",
".",
"expand_dims",
"(",
"input_feed",
",",
"1",
")",
"# No target sequences or input mask in inference mode.",
"target_seqs",
"=",
"None",
"input_mask",
"=",
"None",
"else",
":",
"# Prefetch serialized SequenceExample protos.",
"input_queue",
"=",
"input_ops",
".",
"prefetch_input_data",
"(",
"self",
".",
"reader",
",",
"self",
".",
"config",
".",
"input_file_pattern",
",",
"is_training",
"=",
"self",
".",
"is_training",
"(",
")",
",",
"batch_size",
"=",
"self",
".",
"config",
".",
"batch_size",
",",
"values_per_shard",
"=",
"self",
".",
"config",
".",
"values_per_input_shard",
",",
"input_queue_capacity_factor",
"=",
"self",
".",
"config",
".",
"input_queue_capacity_factor",
",",
"num_reader_threads",
"=",
"self",
".",
"config",
".",
"num_input_reader_threads",
")",
"# Image processing and random distortion. Split across multiple threads",
"# with each thread applying a slightly different distortion.",
"assert",
"self",
".",
"config",
".",
"num_preprocess_threads",
"%",
"2",
"==",
"0",
"images_and_captions",
"=",
"[",
"]",
"for",
"thread_id",
"in",
"range",
"(",
"self",
".",
"config",
".",
"num_preprocess_threads",
")",
":",
"serialized_sequence_example",
"=",
"input_queue",
".",
"dequeue",
"(",
")",
"encoded_image",
",",
"caption",
"=",
"input_ops",
".",
"parse_sequence_example",
"(",
"serialized_sequence_example",
",",
"image_feature",
"=",
"self",
".",
"config",
".",
"image_feature_name",
",",
"caption_feature",
"=",
"self",
".",
"config",
".",
"caption_feature_name",
")",
"image",
"=",
"self",
".",
"process_image",
"(",
"encoded_image",
",",
"thread_id",
"=",
"thread_id",
")",
"images_and_captions",
".",
"append",
"(",
"[",
"image",
",",
"caption",
"]",
")",
"# Batch inputs.",
"queue_capacity",
"=",
"(",
"2",
"*",
"self",
".",
"config",
".",
"num_preprocess_threads",
"*",
"self",
".",
"config",
".",
"batch_size",
")",
"images",
",",
"input_seqs",
",",
"target_seqs",
",",
"input_mask",
"=",
"(",
"input_ops",
".",
"batch_with_dynamic_pad",
"(",
"images_and_captions",
",",
"batch_size",
"=",
"self",
".",
"config",
".",
"batch_size",
",",
"queue_capacity",
"=",
"queue_capacity",
")",
")",
"self",
".",
"images",
"=",
"images",
"self",
".",
"input_seqs",
"=",
"input_seqs",
"self",
".",
"target_seqs",
"=",
"target_seqs",
"self",
".",
"input_mask",
"=",
"input_mask"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L121-L179 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.build_image_embeddings | (self) | Builds the image model subgraph and generates image embeddings.
Inputs:
self.images
Outputs:
self.image_embeddings | Builds the image model subgraph and generates image embeddings. | [
"Builds",
"the",
"image",
"model",
"subgraph",
"and",
"generates",
"image",
"embeddings",
"."
] | def build_image_embeddings(self):
"""Builds the image model subgraph and generates image embeddings.
Inputs:
self.images
Outputs:
self.image_embeddings
"""
inception_output = image_embedding.inception_v3(
self.images,
trainable=self.train_inception,
is_training=self.is_training())
self.inception_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")
# Map inception output into embedding space.
with tf.variable_scope("image_embedding") as scope:
image_embeddings = tf.contrib.layers.fully_connected(
inputs=inception_output,
num_outputs=self.config.embedding_size,
activation_fn=None,
weights_initializer=self.initializer,
biases_initializer=None,
scope=scope)
# Save the embedding size in the graph.
tf.constant(self.config.embedding_size, name="embedding_size")
self.image_embeddings = image_embeddings | [
"def",
"build_image_embeddings",
"(",
"self",
")",
":",
"inception_output",
"=",
"image_embedding",
".",
"inception_v3",
"(",
"self",
".",
"images",
",",
"trainable",
"=",
"self",
".",
"train_inception",
",",
"is_training",
"=",
"self",
".",
"is_training",
"(",
")",
")",
"self",
".",
"inception_variables",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"GLOBAL_VARIABLES",
",",
"scope",
"=",
"\"InceptionV3\"",
")",
"# Map inception output into embedding space.",
"with",
"tf",
".",
"variable_scope",
"(",
"\"image_embedding\"",
")",
"as",
"scope",
":",
"image_embeddings",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"fully_connected",
"(",
"inputs",
"=",
"inception_output",
",",
"num_outputs",
"=",
"self",
".",
"config",
".",
"embedding_size",
",",
"activation_fn",
"=",
"None",
",",
"weights_initializer",
"=",
"self",
".",
"initializer",
",",
"biases_initializer",
"=",
"None",
",",
"scope",
"=",
"scope",
")",
"# Save the embedding size in the graph.",
"tf",
".",
"constant",
"(",
"self",
".",
"config",
".",
"embedding_size",
",",
"name",
"=",
"\"embedding_size\"",
")",
"self",
".",
"image_embeddings",
"=",
"image_embeddings"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L181-L210 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.build_seq_embeddings | (self) | Builds the input sequence embeddings.
Inputs:
self.input_seqs
Outputs:
self.seq_embeddings | Builds the input sequence embeddings. | [
"Builds",
"the",
"input",
"sequence",
"embeddings",
"."
] | def build_seq_embeddings(self):
"""Builds the input sequence embeddings.
Inputs:
self.input_seqs
Outputs:
self.seq_embeddings
"""
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[self.config.vocab_size, self.config.embedding_size],
initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs)
self.seq_embeddings = seq_embeddings | [
"def",
"build_seq_embeddings",
"(",
"self",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"seq_embedding\"",
")",
",",
"tf",
".",
"device",
"(",
"\"/cpu:0\"",
")",
":",
"embedding_map",
"=",
"tf",
".",
"get_variable",
"(",
"name",
"=",
"\"map\"",
",",
"shape",
"=",
"[",
"self",
".",
"config",
".",
"vocab_size",
",",
"self",
".",
"config",
".",
"embedding_size",
"]",
",",
"initializer",
"=",
"self",
".",
"initializer",
")",
"seq_embeddings",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"embedding_map",
",",
"self",
".",
"input_seqs",
")",
"self",
".",
"seq_embeddings",
"=",
"seq_embeddings"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L212-L228 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.build_model | (self) | Builds the model.
Inputs:
self.image_embeddings
self.seq_embeddings
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
Outputs:
self.total_loss (training and eval only)
self.target_cross_entropy_losses (training and eval only)
self.target_cross_entropy_loss_weights (training and eval only) | Builds the model. | [
"Builds",
"the",
"model",
"."
] | def build_model(self):
"""Builds the model.
Inputs:
self.image_embeddings
self.seq_embeddings
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
Outputs:
self.total_loss (training and eval only)
self.target_cross_entropy_losses (training and eval only)
self.target_cross_entropy_loss_weights (training and eval only)
"""
# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o).
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True)
if self.mode == "train":
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope:
# Feed the image embeddings to set the initial LSTM state.
zero_state = lstm_cell.zero_state(
batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
_, initial_state = lstm_cell(self.image_embeddings, zero_state)
# Allow the LSTM variables to be reused.
lstm_scope.reuse_variables()
if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and
# fetching.
tf.concat(initial_state, 1, name="initial_state")
# Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)],
name="state_feed")
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell(
inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),
state=state_tuple)
# Concatentate the resulting state.
tf.concat(state_tuple, 1, name="state")
else:
# Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1)
lstm_outputs, _ = tf.nn.dynamic_rnn(cell=lstm_cell,
inputs=self.seq_embeddings,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32,
scope=lstm_scope)
# Stack batches vertically.
lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])
with tf.variable_scope("logits") as logits_scope:
logits = tf.contrib.layers.fully_connected(
inputs=lstm_outputs,
num_outputs=self.config.vocab_size,
activation_fn=None,
weights_initializer=self.initializer,
scope=logits_scope)
if self.mode == "inference":
tf.nn.softmax(logits, name="softmax")
else:
targets = tf.reshape(self.target_seqs, [-1])
weights = tf.to_float(tf.reshape(self.input_mask, [-1]))
# Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
logits=logits)
batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="batch_loss")
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
# Add summaries.
tf.summary.scalar("losses/batch_loss", batch_loss)
tf.summary.scalar("losses/total_loss", total_loss)
for var in tf.trainable_variables():
tf.summary.histogram("parameters/" + var.op.name, var)
self.total_loss = total_loss
self.target_cross_entropy_losses = losses # Used in evaluation.
self.target_cross_entropy_loss_weights = weights | [
"def",
"build_model",
"(",
"self",
")",
":",
"# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the",
"# modified LSTM in the \"Show and Tell\" paper has no biases and outputs",
"# new_c * sigmoid(o).",
"lstm_cell",
"=",
"tf",
".",
"contrib",
".",
"rnn",
".",
"BasicLSTMCell",
"(",
"num_units",
"=",
"self",
".",
"config",
".",
"num_lstm_units",
",",
"state_is_tuple",
"=",
"True",
")",
"if",
"self",
".",
"mode",
"==",
"\"train\"",
":",
"lstm_cell",
"=",
"tf",
".",
"contrib",
".",
"rnn",
".",
"DropoutWrapper",
"(",
"lstm_cell",
",",
"input_keep_prob",
"=",
"self",
".",
"config",
".",
"lstm_dropout_keep_prob",
",",
"output_keep_prob",
"=",
"self",
".",
"config",
".",
"lstm_dropout_keep_prob",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"lstm\"",
",",
"initializer",
"=",
"self",
".",
"initializer",
")",
"as",
"lstm_scope",
":",
"# Feed the image embeddings to set the initial LSTM state.",
"zero_state",
"=",
"lstm_cell",
".",
"zero_state",
"(",
"batch_size",
"=",
"self",
".",
"image_embeddings",
".",
"get_shape",
"(",
")",
"[",
"0",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"_",
",",
"initial_state",
"=",
"lstm_cell",
"(",
"self",
".",
"image_embeddings",
",",
"zero_state",
")",
"# Allow the LSTM variables to be reused.",
"lstm_scope",
".",
"reuse_variables",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"\"inference\"",
":",
"# In inference mode, use concatenated states for convenient feeding and",
"# fetching.",
"tf",
".",
"concat",
"(",
"initial_state",
",",
"1",
",",
"name",
"=",
"\"initial_state\"",
")",
"# Placeholder for feeding a batch of concatenated states.",
"state_feed",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"None",
",",
"sum",
"(",
"lstm_cell",
".",
"state_size",
")",
"]",
",",
"name",
"=",
"\"state_feed\"",
")",
"state_tuple",
"=",
"tf",
".",
"split",
"(",
"value",
"=",
"state_feed",
",",
"num_or_size_splits",
"=",
"2",
",",
"axis",
"=",
"1",
")",
"# Run a single LSTM step.",
"lstm_outputs",
",",
"state_tuple",
"=",
"lstm_cell",
"(",
"inputs",
"=",
"tf",
".",
"squeeze",
"(",
"self",
".",
"seq_embeddings",
",",
"squeeze_dims",
"=",
"[",
"1",
"]",
")",
",",
"state",
"=",
"state_tuple",
")",
"# Concatentate the resulting state.",
"tf",
".",
"concat",
"(",
"state_tuple",
",",
"1",
",",
"name",
"=",
"\"state\"",
")",
"else",
":",
"# Run the batch of sequence embeddings through the LSTM.",
"sequence_length",
"=",
"tf",
".",
"reduce_sum",
"(",
"self",
".",
"input_mask",
",",
"1",
")",
"lstm_outputs",
",",
"_",
"=",
"tf",
".",
"nn",
".",
"dynamic_rnn",
"(",
"cell",
"=",
"lstm_cell",
",",
"inputs",
"=",
"self",
".",
"seq_embeddings",
",",
"sequence_length",
"=",
"sequence_length",
",",
"initial_state",
"=",
"initial_state",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"scope",
"=",
"lstm_scope",
")",
"# Stack batches vertically.",
"lstm_outputs",
"=",
"tf",
".",
"reshape",
"(",
"lstm_outputs",
",",
"[",
"-",
"1",
",",
"lstm_cell",
".",
"output_size",
"]",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"logits\"",
")",
"as",
"logits_scope",
":",
"logits",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"fully_connected",
"(",
"inputs",
"=",
"lstm_outputs",
",",
"num_outputs",
"=",
"self",
".",
"config",
".",
"vocab_size",
",",
"activation_fn",
"=",
"None",
",",
"weights_initializer",
"=",
"self",
".",
"initializer",
",",
"scope",
"=",
"logits_scope",
")",
"if",
"self",
".",
"mode",
"==",
"\"inference\"",
":",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
",",
"name",
"=",
"\"softmax\"",
")",
"else",
":",
"targets",
"=",
"tf",
".",
"reshape",
"(",
"self",
".",
"target_seqs",
",",
"[",
"-",
"1",
"]",
")",
"weights",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"reshape",
"(",
"self",
".",
"input_mask",
",",
"[",
"-",
"1",
"]",
")",
")",
"# Compute losses.",
"losses",
"=",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"(",
"labels",
"=",
"targets",
",",
"logits",
"=",
"logits",
")",
"batch_loss",
"=",
"tf",
".",
"div",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"losses",
",",
"weights",
")",
")",
",",
"tf",
".",
"reduce_sum",
"(",
"weights",
")",
",",
"name",
"=",
"\"batch_loss\"",
")",
"tf",
".",
"losses",
".",
"add_loss",
"(",
"batch_loss",
")",
"total_loss",
"=",
"tf",
".",
"losses",
".",
"get_total_loss",
"(",
")",
"# Add summaries.",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"losses/batch_loss\"",
",",
"batch_loss",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"losses/total_loss\"",
",",
"total_loss",
")",
"for",
"var",
"in",
"tf",
".",
"trainable_variables",
"(",
")",
":",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"parameters/\"",
"+",
"var",
".",
"op",
".",
"name",
",",
"var",
")",
"self",
".",
"total_loss",
"=",
"total_loss",
"self",
".",
"target_cross_entropy_losses",
"=",
"losses",
"# Used in evaluation.",
"self",
".",
"target_cross_entropy_loss_weights",
"=",
"weights"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L230-L326 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.setup_inception_initializer | (self) | Sets up the function to restore inception variables from checkpoint. | Sets up the function to restore inception variables from checkpoint. | [
"Sets",
"up",
"the",
"function",
"to",
"restore",
"inception",
"variables",
"from",
"checkpoint",
"."
] | def setup_inception_initializer(self):
"""Sets up the function to restore inception variables from checkpoint."""
if self.mode != "inference":
# Restore inception variables only.
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info("Restoring Inception variables from checkpoint file %s",
self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn | [
"def",
"setup_inception_initializer",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"!=",
"\"inference\"",
":",
"# Restore inception variables only.",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"self",
".",
"inception_variables",
")",
"def",
"restore_fn",
"(",
"sess",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Restoring Inception variables from checkpoint file %s\"",
",",
"self",
".",
"config",
".",
"inception_checkpoint_file",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"self",
".",
"config",
".",
"inception_checkpoint_file",
")",
"self",
".",
"init_fn",
"=",
"restore_fn"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L328-L339 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.setup_global_step | (self) | Sets up the global step Tensor. | Sets up the global step Tensor. | [
"Sets",
"up",
"the",
"global",
"step",
"Tensor",
"."
] | def setup_global_step(self):
"""Sets up the global step Tensor."""
global_step = tf.Variable(
initial_value=0,
name="global_step",
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.global_step = global_step | [
"def",
"setup_global_step",
"(",
"self",
")",
":",
"global_step",
"=",
"tf",
".",
"Variable",
"(",
"initial_value",
"=",
"0",
",",
"name",
"=",
"\"global_step\"",
",",
"trainable",
"=",
"False",
",",
"collections",
"=",
"[",
"tf",
".",
"GraphKeys",
".",
"GLOBAL_STEP",
",",
"tf",
".",
"GraphKeys",
".",
"GLOBAL_VARIABLES",
"]",
")",
"self",
".",
"global_step",
"=",
"global_step"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L341-L349 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py | python | ShowAndTellModel.build | (self) | Creates all ops for training and evaluation. | Creates all ops for training and evaluation. | [
"Creates",
"all",
"ops",
"for",
"training",
"and",
"evaluation",
"."
] | def build(self):
"""Creates all ops for training and evaluation."""
self.build_inputs()
self.build_image_embeddings()
self.build_seq_embeddings()
self.build_model()
self.setup_inception_initializer()
self.setup_global_step() | [
"def",
"build",
"(",
"self",
")",
":",
"self",
".",
"build_inputs",
"(",
")",
"self",
".",
"build_image_embeddings",
"(",
")",
"self",
".",
"build_seq_embeddings",
"(",
")",
"self",
".",
"build_model",
"(",
")",
"self",
".",
"setup_inception_initializer",
"(",
")",
"self",
".",
"setup_global_step",
"(",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/show_and_tell_model.py#L351-L358 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/ops/image_embedding.py | python | inception_v3 | (images,
trainable=True,
is_training=True,
weight_decay=0.00004,
stddev=0.1,
dropout_keep_prob=0.8,
use_batch_norm=True,
batch_norm_params=None,
add_summaries=True,
scope="InceptionV3") | return net | Builds an Inception V3 subgraph for image embeddings.
Args:
images: A float32 Tensor of shape [batch, height, width, channels].
trainable: Whether the inception submodel should be trainable or not.
is_training: Boolean indicating training mode or not.
weight_decay: Coefficient for weight regularization.
stddev: The standard deviation of the trunctated normal weight initializer.
dropout_keep_prob: Dropout keep probability.
use_batch_norm: Whether to use batch normalization.
batch_norm_params: Parameters for batch normalization. See
tf.contrib.layers.batch_norm for details.
add_summaries: Whether to add activation summaries.
scope: Optional Variable scope.
Returns:
end_points: A dictionary of activations from inception_v3 layers. | Builds an Inception V3 subgraph for image embeddings. | [
"Builds",
"an",
"Inception",
"V3",
"subgraph",
"for",
"image",
"embeddings",
"."
] | def inception_v3(images,
trainable=True,
is_training=True,
weight_decay=0.00004,
stddev=0.1,
dropout_keep_prob=0.8,
use_batch_norm=True,
batch_norm_params=None,
add_summaries=True,
scope="InceptionV3"):
"""Builds an Inception V3 subgraph for image embeddings.
Args:
images: A float32 Tensor of shape [batch, height, width, channels].
trainable: Whether the inception submodel should be trainable or not.
is_training: Boolean indicating training mode or not.
weight_decay: Coefficient for weight regularization.
stddev: The standard deviation of the trunctated normal weight initializer.
dropout_keep_prob: Dropout keep probability.
use_batch_norm: Whether to use batch normalization.
batch_norm_params: Parameters for batch normalization. See
tf.contrib.layers.batch_norm for details.
add_summaries: Whether to add activation summaries.
scope: Optional Variable scope.
Returns:
end_points: A dictionary of activations from inception_v3 layers.
"""
# Only consider the inception model to be in training mode if it's trainable.
is_inception_model_training = trainable and is_training
if use_batch_norm:
# Default parameters for batch normalization.
if not batch_norm_params:
batch_norm_params = {
"is_training": is_inception_model_training,
"trainable": trainable,
# Decay for the moving averages.
"decay": 0.9997,
# Epsilon to prevent 0s in variance.
"epsilon": 0.001,
# Collection containing the moving mean and moving variance.
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": ["moving_vars"],
"moving_variance": ["moving_vars"],
}
}
else:
batch_norm_params = None
if trainable:
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
weights_regularizer = None
with tf.variable_scope(scope, "InceptionV3", [images]) as scope:
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
trainable=trainable):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
net, end_points = inception_v3_base(images, scope=scope)
with tf.variable_scope("logits"):
shape = net.get_shape()
net = slim.avg_pool2d(net, shape[1:3], padding="VALID", scope="pool")
net = slim.dropout(
net,
keep_prob=dropout_keep_prob,
is_training=is_inception_model_training,
scope="dropout")
net = slim.flatten(net, scope="flatten")
# Add summaries.
if add_summaries:
for v in end_points.values():
tf.contrib.layers.summaries.summarize_activation(v)
return net | [
"def",
"inception_v3",
"(",
"images",
",",
"trainable",
"=",
"True",
",",
"is_training",
"=",
"True",
",",
"weight_decay",
"=",
"0.00004",
",",
"stddev",
"=",
"0.1",
",",
"dropout_keep_prob",
"=",
"0.8",
",",
"use_batch_norm",
"=",
"True",
",",
"batch_norm_params",
"=",
"None",
",",
"add_summaries",
"=",
"True",
",",
"scope",
"=",
"\"InceptionV3\"",
")",
":",
"# Only consider the inception model to be in training mode if it's trainable.",
"is_inception_model_training",
"=",
"trainable",
"and",
"is_training",
"if",
"use_batch_norm",
":",
"# Default parameters for batch normalization.",
"if",
"not",
"batch_norm_params",
":",
"batch_norm_params",
"=",
"{",
"\"is_training\"",
":",
"is_inception_model_training",
",",
"\"trainable\"",
":",
"trainable",
",",
"# Decay for the moving averages.",
"\"decay\"",
":",
"0.9997",
",",
"# Epsilon to prevent 0s in variance.",
"\"epsilon\"",
":",
"0.001",
",",
"# Collection containing the moving mean and moving variance.",
"\"variables_collections\"",
":",
"{",
"\"beta\"",
":",
"None",
",",
"\"gamma\"",
":",
"None",
",",
"\"moving_mean\"",
":",
"[",
"\"moving_vars\"",
"]",
",",
"\"moving_variance\"",
":",
"[",
"\"moving_vars\"",
"]",
",",
"}",
"}",
"else",
":",
"batch_norm_params",
"=",
"None",
"if",
"trainable",
":",
"weights_regularizer",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"l2_regularizer",
"(",
"weight_decay",
")",
"else",
":",
"weights_regularizer",
"=",
"None",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"\"InceptionV3\"",
",",
"[",
"images",
"]",
")",
"as",
"scope",
":",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
",",
"slim",
".",
"fully_connected",
"]",
",",
"weights_regularizer",
"=",
"weights_regularizer",
",",
"trainable",
"=",
"trainable",
")",
":",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
"]",
",",
"weights_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"stddev",
")",
",",
"activation_fn",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"normalizer_fn",
"=",
"slim",
".",
"batch_norm",
",",
"normalizer_params",
"=",
"batch_norm_params",
")",
":",
"net",
",",
"end_points",
"=",
"inception_v3_base",
"(",
"images",
",",
"scope",
"=",
"scope",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"logits\"",
")",
":",
"shape",
"=",
"net",
".",
"get_shape",
"(",
")",
"net",
"=",
"slim",
".",
"avg_pool2d",
"(",
"net",
",",
"shape",
"[",
"1",
":",
"3",
"]",
",",
"padding",
"=",
"\"VALID\"",
",",
"scope",
"=",
"\"pool\"",
")",
"net",
"=",
"slim",
".",
"dropout",
"(",
"net",
",",
"keep_prob",
"=",
"dropout_keep_prob",
",",
"is_training",
"=",
"is_inception_model_training",
",",
"scope",
"=",
"\"dropout\"",
")",
"net",
"=",
"slim",
".",
"flatten",
"(",
"net",
",",
"scope",
"=",
"\"flatten\"",
")",
"# Add summaries.",
"if",
"add_summaries",
":",
"for",
"v",
"in",
"end_points",
".",
"values",
"(",
")",
":",
"tf",
".",
"contrib",
".",
"layers",
".",
"summaries",
".",
"summarize_activation",
"(",
"v",
")",
"return",
"net"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/ops/image_embedding.py#L30-L114 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/ops/image_processing.py | python | distort_image | (image, thread_id) | return image | Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1]. | Perform random distortions on an image. | [
"Perform",
"random",
"distortions",
"on",
"an",
"image",
"."
] | def distort_image(image, thread_id):
"""Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1].
"""
# Randomly flip horizontally.
with tf.name_scope("flip_horizontal", values=[image]):
image = tf.image.random_flip_left_right(image)
# Randomly distort the colors based on thread id.
color_ordering = thread_id % 2
with tf.name_scope("distort_color", values=[image]):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image | [
"def",
"distort_image",
"(",
"image",
",",
"thread_id",
")",
":",
"# Randomly flip horizontally.",
"with",
"tf",
".",
"name_scope",
"(",
"\"flip_horizontal\"",
",",
"values",
"=",
"[",
"image",
"]",
")",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_flip_left_right",
"(",
"image",
")",
"# Randomly distort the colors based on thread id.",
"color_ordering",
"=",
"thread_id",
"%",
"2",
"with",
"tf",
".",
"name_scope",
"(",
"\"distort_color\"",
",",
"values",
"=",
"[",
"image",
"]",
")",
":",
"if",
"color_ordering",
"==",
"0",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"image",
",",
"max_delta",
"=",
"32.",
"/",
"255.",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_saturation",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"image",
",",
"max_delta",
"=",
"0.032",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"elif",
"color_ordering",
"==",
"1",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"image",
",",
"max_delta",
"=",
"32.",
"/",
"255.",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_saturation",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"image",
",",
"max_delta",
"=",
"0.032",
")",
"# The random_* ops do not necessarily clamp.",
"image",
"=",
"tf",
".",
"clip_by_value",
"(",
"image",
",",
"0.0",
",",
"1.0",
")",
"return",
"image"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/ops/image_processing.py#L26-L59 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/ops/image_processing.py | python | process_image | (encoded_image,
is_training,
height,
width,
resize_height=346,
resize_width=346,
thread_id=0,
image_format="jpeg") | return image | Decode an image, resize and apply random distortions.
In training, images are distorted slightly differently depending on thread_id.
Args:
encoded_image: String Tensor containing the image.
is_training: Boolean; whether preprocessing for training or eval.
height: Height of the output image.
width: Width of the output image.
resize_height: If > 0, resize height before crop to final dimensions.
resize_width: If > 0, resize width before crop to final dimensions.
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
image_format: "jpeg" or "png".
Returns:
A float32 Tensor of shape [height, width, 3] with values in [-1, 1].
Raises:
ValueError: If image_format is invalid. | Decode an image, resize and apply random distortions. | [
"Decode",
"an",
"image",
"resize",
"and",
"apply",
"random",
"distortions",
"."
] | def process_image(encoded_image,
is_training,
height,
width,
resize_height=346,
resize_width=346,
thread_id=0,
image_format="jpeg"):
"""Decode an image, resize and apply random distortions.
In training, images are distorted slightly differently depending on thread_id.
Args:
encoded_image: String Tensor containing the image.
is_training: Boolean; whether preprocessing for training or eval.
height: Height of the output image.
width: Width of the output image.
resize_height: If > 0, resize height before crop to final dimensions.
resize_width: If > 0, resize width before crop to final dimensions.
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
image_format: "jpeg" or "png".
Returns:
A float32 Tensor of shape [height, width, 3] with values in [-1, 1].
Raises:
ValueError: If image_format is invalid.
"""
# Helper function to log an image summary to the visualizer. Summaries are
# only logged in thread 0.
def image_summary(name, image):
if not thread_id:
tf.summary.image(name, tf.expand_dims(image, 0))
# Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).
with tf.name_scope("decode", values=[encoded_image]):
if image_format == "jpeg":
image = tf.image.decode_jpeg(encoded_image, channels=3)
elif image_format == "png":
image = tf.image.decode_png(encoded_image, channels=3)
else:
raise ValueError("Invalid image format: %s" % image_format)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image_summary("original_image", image)
# Resize image.
assert (resize_height > 0) == (resize_width > 0)
if resize_height:
image = tf.image.resize_images(image,
size=[resize_height, resize_width],
method=tf.image.ResizeMethod.BILINEAR)
# Crop to final dimensions.
if is_training:
image = tf.random_crop(image, [height, width, 3])
else:
# Central crop, assuming resize_height > height, resize_width > width.
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
image_summary("resized_image", image)
# Randomly distort the image.
if is_training:
image = distort_image(image, thread_id)
image_summary("final_image", image)
# Rescale to [-1,1] instead of [0, 1]
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image | [
"def",
"process_image",
"(",
"encoded_image",
",",
"is_training",
",",
"height",
",",
"width",
",",
"resize_height",
"=",
"346",
",",
"resize_width",
"=",
"346",
",",
"thread_id",
"=",
"0",
",",
"image_format",
"=",
"\"jpeg\"",
")",
":",
"# Helper function to log an image summary to the visualizer. Summaries are",
"# only logged in thread 0.",
"def",
"image_summary",
"(",
"name",
",",
"image",
")",
":",
"if",
"not",
"thread_id",
":",
"tf",
".",
"summary",
".",
"image",
"(",
"name",
",",
"tf",
".",
"expand_dims",
"(",
"image",
",",
"0",
")",
")",
"# Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).",
"with",
"tf",
".",
"name_scope",
"(",
"\"decode\"",
",",
"values",
"=",
"[",
"encoded_image",
"]",
")",
":",
"if",
"image_format",
"==",
"\"jpeg\"",
":",
"image",
"=",
"tf",
".",
"image",
".",
"decode_jpeg",
"(",
"encoded_image",
",",
"channels",
"=",
"3",
")",
"elif",
"image_format",
"==",
"\"png\"",
":",
"image",
"=",
"tf",
".",
"image",
".",
"decode_png",
"(",
"encoded_image",
",",
"channels",
"=",
"3",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid image format: %s\"",
"%",
"image_format",
")",
"image",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"image_summary",
"(",
"\"original_image\"",
",",
"image",
")",
"# Resize image.",
"assert",
"(",
"resize_height",
">",
"0",
")",
"==",
"(",
"resize_width",
">",
"0",
")",
"if",
"resize_height",
":",
"image",
"=",
"tf",
".",
"image",
".",
"resize_images",
"(",
"image",
",",
"size",
"=",
"[",
"resize_height",
",",
"resize_width",
"]",
",",
"method",
"=",
"tf",
".",
"image",
".",
"ResizeMethod",
".",
"BILINEAR",
")",
"# Crop to final dimensions.",
"if",
"is_training",
":",
"image",
"=",
"tf",
".",
"random_crop",
"(",
"image",
",",
"[",
"height",
",",
"width",
",",
"3",
"]",
")",
"else",
":",
"# Central crop, assuming resize_height > height, resize_width > width.",
"image",
"=",
"tf",
".",
"image",
".",
"resize_image_with_crop_or_pad",
"(",
"image",
",",
"height",
",",
"width",
")",
"image_summary",
"(",
"\"resized_image\"",
",",
"image",
")",
"# Randomly distort the image.",
"if",
"is_training",
":",
"image",
"=",
"distort_image",
"(",
"image",
",",
"thread_id",
")",
"image_summary",
"(",
"\"final_image\"",
",",
"image",
")",
"# Rescale to [-1,1] instead of [0, 1]",
"image",
"=",
"tf",
".",
"subtract",
"(",
"image",
",",
"0.5",
")",
"image",
"=",
"tf",
".",
"multiply",
"(",
"image",
",",
"2.0",
")",
"return",
"image"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/ops/image_processing.py#L62-L133 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/ops/inputs.py | python | parse_sequence_example | (serialized, image_feature, caption_feature) | return encoded_image, caption | Parses a tensorflow.SequenceExample into an image and caption.
Args:
serialized: A scalar string Tensor; a single serialized SequenceExample.
image_feature: Name of SequenceExample context feature containing image
data.
caption_feature: Name of SequenceExample feature list containing integer
captions.
Returns:
encoded_image: A scalar string Tensor containing a JPEG encoded image.
caption: A 1-D uint64 Tensor with dynamically specified length. | Parses a tensorflow.SequenceExample into an image and caption. | [
"Parses",
"a",
"tensorflow",
".",
"SequenceExample",
"into",
"an",
"image",
"and",
"caption",
"."
] | def parse_sequence_example(serialized, image_feature, caption_feature):
"""Parses a tensorflow.SequenceExample into an image and caption.
Args:
serialized: A scalar string Tensor; a single serialized SequenceExample.
image_feature: Name of SequenceExample context feature containing image
data.
caption_feature: Name of SequenceExample feature list containing integer
captions.
Returns:
encoded_image: A scalar string Tensor containing a JPEG encoded image.
caption: A 1-D uint64 Tensor with dynamically specified length.
"""
context, sequence = tf.parse_single_sequence_example(
serialized,
context_features={
image_feature: tf.FixedLenFeature([], dtype=tf.string)
},
sequence_features={
caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
})
encoded_image = context[image_feature]
caption = sequence[caption_feature]
return encoded_image, caption | [
"def",
"parse_sequence_example",
"(",
"serialized",
",",
"image_feature",
",",
"caption_feature",
")",
":",
"context",
",",
"sequence",
"=",
"tf",
".",
"parse_single_sequence_example",
"(",
"serialized",
",",
"context_features",
"=",
"{",
"image_feature",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"string",
")",
"}",
",",
"sequence_features",
"=",
"{",
"caption_feature",
":",
"tf",
".",
"FixedLenSequenceFeature",
"(",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
",",
"}",
")",
"encoded_image",
"=",
"context",
"[",
"image_feature",
"]",
"caption",
"=",
"sequence",
"[",
"caption_feature",
"]",
"return",
"encoded_image",
",",
"caption"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/ops/inputs.py#L26-L51 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/ops/inputs.py | python | prefetch_input_data | (reader,
file_pattern,
is_training,
batch_size,
values_per_shard,
input_queue_capacity_factor=16,
num_reader_threads=1,
shard_queue_name="filename_queue",
value_queue_name="input_queue") | return values_queue | Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values. | Prefetches string values from disk into an input queue. | [
"Prefetches",
"string",
"values",
"from",
"disk",
"into",
"an",
"input",
"queue",
"."
] | def prefetch_input_data(reader,
file_pattern,
is_training,
batch_size,
values_per_shard,
input_queue_capacity_factor=16,
num_reader_threads=1,
shard_queue_name="filename_queue",
value_queue_name="input_queue"):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tf.logging.fatal("Found no input files matching %s", file_pattern)
else:
tf.logging.info("Prefetching values from %d files matching %s",
len(data_files), file_pattern)
if is_training:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string],
name="random_" + value_queue_name)
else:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(
capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
values_queue, enqueue_ops))
tf.summary.scalar(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
return values_queue | [
"def",
"prefetch_input_data",
"(",
"reader",
",",
"file_pattern",
",",
"is_training",
",",
"batch_size",
",",
"values_per_shard",
",",
"input_queue_capacity_factor",
"=",
"16",
",",
"num_reader_threads",
"=",
"1",
",",
"shard_queue_name",
"=",
"\"filename_queue\"",
",",
"value_queue_name",
"=",
"\"input_queue\"",
")",
":",
"data_files",
"=",
"[",
"]",
"for",
"pattern",
"in",
"file_pattern",
".",
"split",
"(",
"\",\"",
")",
":",
"data_files",
".",
"extend",
"(",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"pattern",
")",
")",
"if",
"not",
"data_files",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"\"Found no input files matching %s\"",
",",
"file_pattern",
")",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Prefetching values from %d files matching %s\"",
",",
"len",
"(",
"data_files",
")",
",",
"file_pattern",
")",
"if",
"is_training",
":",
"filename_queue",
"=",
"tf",
".",
"train",
".",
"string_input_producer",
"(",
"data_files",
",",
"shuffle",
"=",
"True",
",",
"capacity",
"=",
"16",
",",
"name",
"=",
"shard_queue_name",
")",
"min_queue_examples",
"=",
"values_per_shard",
"*",
"input_queue_capacity_factor",
"capacity",
"=",
"min_queue_examples",
"+",
"100",
"*",
"batch_size",
"values_queue",
"=",
"tf",
".",
"RandomShuffleQueue",
"(",
"capacity",
"=",
"capacity",
",",
"min_after_dequeue",
"=",
"min_queue_examples",
",",
"dtypes",
"=",
"[",
"tf",
".",
"string",
"]",
",",
"name",
"=",
"\"random_\"",
"+",
"value_queue_name",
")",
"else",
":",
"filename_queue",
"=",
"tf",
".",
"train",
".",
"string_input_producer",
"(",
"data_files",
",",
"shuffle",
"=",
"False",
",",
"capacity",
"=",
"1",
",",
"name",
"=",
"shard_queue_name",
")",
"capacity",
"=",
"values_per_shard",
"+",
"3",
"*",
"batch_size",
"values_queue",
"=",
"tf",
".",
"FIFOQueue",
"(",
"capacity",
"=",
"capacity",
",",
"dtypes",
"=",
"[",
"tf",
".",
"string",
"]",
",",
"name",
"=",
"\"fifo_\"",
"+",
"value_queue_name",
")",
"enqueue_ops",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"num_reader_threads",
")",
":",
"_",
",",
"value",
"=",
"reader",
".",
"read",
"(",
"filename_queue",
")",
"enqueue_ops",
".",
"append",
"(",
"values_queue",
".",
"enqueue",
"(",
"[",
"value",
"]",
")",
")",
"tf",
".",
"train",
".",
"queue_runner",
".",
"add_queue_runner",
"(",
"tf",
".",
"train",
".",
"queue_runner",
".",
"QueueRunner",
"(",
"values_queue",
",",
"enqueue_ops",
")",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"queue/%s/fraction_of_%d_full\"",
"%",
"(",
"values_queue",
".",
"name",
",",
"capacity",
")",
",",
"tf",
".",
"cast",
"(",
"values_queue",
".",
"size",
"(",
")",
",",
"tf",
".",
"float32",
")",
"*",
"(",
"1.",
"/",
"capacity",
")",
")",
"return",
"values_queue"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/ops/inputs.py#L54-L123 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/ops/inputs.py | python | batch_with_dynamic_pad | (images_and_captions,
batch_size,
queue_capacity,
add_summaries=True) | return images, input_seqs, target_seqs, mask | Batches input images and captions.
This function splits the caption into an input sequence and a target sequence,
where the target sequence is the input sequence right-shifted by 1. Input and
target sequences are batched and padded up to the maximum length of sequences
in the batch. A mask is created to distinguish real words from padding words.
Example:
Actual captions in the batch ('-' denotes padded character):
[
[ 1 2 5 4 5 ],
[ 1 2 3 4 - ],
[ 1 2 3 - - ],
]
input_seqs:
[
[ 1 2 3 4 ],
[ 1 2 3 - ],
[ 1 2 - - ],
]
target_seqs:
[
[ 2 3 4 5 ],
[ 2 3 4 - ],
[ 2 3 - - ],
]
mask:
[
[ 1 1 1 1 ],
[ 1 1 1 0 ],
[ 1 1 0 0 ],
]
Args:
images_and_captions: A list of pairs [image, caption], where image is a
Tensor of shape [height, width, channels] and caption is a 1-D Tensor of
any length. Each pair will be processed and added to the queue in a
separate thread.
batch_size: Batch size.
queue_capacity: Queue capacity.
add_summaries: If true, add caption length summaries.
Returns:
images: A Tensor of shape [batch_size, height, width, channels].
input_seqs: An int32 Tensor of shape [batch_size, padded_length].
target_seqs: An int32 Tensor of shape [batch_size, padded_length].
mask: An int32 0/1 Tensor of shape [batch_size, padded_length]. | Batches input images and captions. | [
"Batches",
"input",
"images",
"and",
"captions",
"."
] | def batch_with_dynamic_pad(images_and_captions,
batch_size,
queue_capacity,
add_summaries=True):
"""Batches input images and captions.
This function splits the caption into an input sequence and a target sequence,
where the target sequence is the input sequence right-shifted by 1. Input and
target sequences are batched and padded up to the maximum length of sequences
in the batch. A mask is created to distinguish real words from padding words.
Example:
Actual captions in the batch ('-' denotes padded character):
[
[ 1 2 5 4 5 ],
[ 1 2 3 4 - ],
[ 1 2 3 - - ],
]
input_seqs:
[
[ 1 2 3 4 ],
[ 1 2 3 - ],
[ 1 2 - - ],
]
target_seqs:
[
[ 2 3 4 5 ],
[ 2 3 4 - ],
[ 2 3 - - ],
]
mask:
[
[ 1 1 1 1 ],
[ 1 1 1 0 ],
[ 1 1 0 0 ],
]
Args:
images_and_captions: A list of pairs [image, caption], where image is a
Tensor of shape [height, width, channels] and caption is a 1-D Tensor of
any length. Each pair will be processed and added to the queue in a
separate thread.
batch_size: Batch size.
queue_capacity: Queue capacity.
add_summaries: If true, add caption length summaries.
Returns:
images: A Tensor of shape [batch_size, height, width, channels].
input_seqs: An int32 Tensor of shape [batch_size, padded_length].
target_seqs: An int32 Tensor of shape [batch_size, padded_length].
mask: An int32 0/1 Tensor of shape [batch_size, padded_length].
"""
enqueue_list = []
for image, caption in images_and_captions:
caption_length = tf.shape(caption)[0]
input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)
input_seq = tf.slice(caption, [0], input_length)
target_seq = tf.slice(caption, [1], input_length)
indicator = tf.ones(input_length, dtype=tf.int32)
enqueue_list.append([image, input_seq, target_seq, indicator])
images, input_seqs, target_seqs, mask = tf.train.batch_join(
enqueue_list,
batch_size=batch_size,
capacity=queue_capacity,
dynamic_pad=True,
name="batch_and_pad")
if add_summaries:
lengths = tf.add(tf.reduce_sum(mask, 1), 1)
tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths))
tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths))
tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths))
return images, input_seqs, target_seqs, mask | [
"def",
"batch_with_dynamic_pad",
"(",
"images_and_captions",
",",
"batch_size",
",",
"queue_capacity",
",",
"add_summaries",
"=",
"True",
")",
":",
"enqueue_list",
"=",
"[",
"]",
"for",
"image",
",",
"caption",
"in",
"images_and_captions",
":",
"caption_length",
"=",
"tf",
".",
"shape",
"(",
"caption",
")",
"[",
"0",
"]",
"input_length",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"subtract",
"(",
"caption_length",
",",
"1",
")",
",",
"0",
")",
"input_seq",
"=",
"tf",
".",
"slice",
"(",
"caption",
",",
"[",
"0",
"]",
",",
"input_length",
")",
"target_seq",
"=",
"tf",
".",
"slice",
"(",
"caption",
",",
"[",
"1",
"]",
",",
"input_length",
")",
"indicator",
"=",
"tf",
".",
"ones",
"(",
"input_length",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"enqueue_list",
".",
"append",
"(",
"[",
"image",
",",
"input_seq",
",",
"target_seq",
",",
"indicator",
"]",
")",
"images",
",",
"input_seqs",
",",
"target_seqs",
",",
"mask",
"=",
"tf",
".",
"train",
".",
"batch_join",
"(",
"enqueue_list",
",",
"batch_size",
"=",
"batch_size",
",",
"capacity",
"=",
"queue_capacity",
",",
"dynamic_pad",
"=",
"True",
",",
"name",
"=",
"\"batch_and_pad\"",
")",
"if",
"add_summaries",
":",
"lengths",
"=",
"tf",
".",
"add",
"(",
"tf",
".",
"reduce_sum",
"(",
"mask",
",",
"1",
")",
",",
"1",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"caption_length/batch_min\"",
",",
"tf",
".",
"reduce_min",
"(",
"lengths",
")",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"caption_length/batch_max\"",
",",
"tf",
".",
"reduce_max",
"(",
"lengths",
")",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"caption_length/batch_mean\"",
",",
"tf",
".",
"reduce_mean",
"(",
"lengths",
")",
")",
"return",
"images",
",",
"input_seqs",
",",
"target_seqs",
",",
"mask"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/ops/inputs.py#L126-L204 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/vocabulary.py | python | Vocabulary.__init__ | (self,
vocab_file,
start_word="<S>",
end_word="</S>",
unk_word="<UNK>") | Initializes the vocabulary.
Args:
vocab_file: File containing the vocabulary, where the words are the first
whitespace-separated token on each line (other tokens are ignored) and
the word ids are the corresponding line numbers.
start_word: Special word denoting sentence start.
end_word: Special word denoting sentence end.
unk_word: Special word denoting unknown words. | Initializes the vocabulary. | [
"Initializes",
"the",
"vocabulary",
"."
] | def __init__(self,
vocab_file,
start_word="<S>",
end_word="</S>",
unk_word="<UNK>"):
"""Initializes the vocabulary.
Args:
vocab_file: File containing the vocabulary, where the words are the first
whitespace-separated token on each line (other tokens are ignored) and
the word ids are the corresponding line numbers.
start_word: Special word denoting sentence start.
end_word: Special word denoting sentence end.
unk_word: Special word denoting unknown words.
"""
if not tf.gfile.Exists(vocab_file):
tf.logging.fatal("Vocab file %s not found.", vocab_file)
tf.logging.info("Initializing vocabulary from file: %s", vocab_file)
with tf.gfile.GFile(vocab_file, mode="r") as f:
reverse_vocab = list(f.readlines())
reverse_vocab = [line.split()[0] for line in reverse_vocab]
assert start_word in reverse_vocab
assert end_word in reverse_vocab
if unk_word not in reverse_vocab:
reverse_vocab.append(unk_word)
vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
tf.logging.info("Created vocabulary with %d words" % len(vocab))
self.vocab = vocab # vocab[word] = id
self.reverse_vocab = reverse_vocab # reverse_vocab[id] = word
# Save special word ids.
self.start_id = vocab[start_word]
self.end_id = vocab[end_word]
self.unk_id = vocab[unk_word] | [
"def",
"__init__",
"(",
"self",
",",
"vocab_file",
",",
"start_word",
"=",
"\"<S>\"",
",",
"end_word",
"=",
"\"</S>\"",
",",
"unk_word",
"=",
"\"<UNK>\"",
")",
":",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"vocab_file",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"\"Vocab file %s not found.\"",
",",
"vocab_file",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Initializing vocabulary from file: %s\"",
",",
"vocab_file",
")",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"vocab_file",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"f",
":",
"reverse_vocab",
"=",
"list",
"(",
"f",
".",
"readlines",
"(",
")",
")",
"reverse_vocab",
"=",
"[",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"for",
"line",
"in",
"reverse_vocab",
"]",
"assert",
"start_word",
"in",
"reverse_vocab",
"assert",
"end_word",
"in",
"reverse_vocab",
"if",
"unk_word",
"not",
"in",
"reverse_vocab",
":",
"reverse_vocab",
".",
"append",
"(",
"unk_word",
")",
"vocab",
"=",
"dict",
"(",
"[",
"(",
"x",
",",
"y",
")",
"for",
"(",
"y",
",",
"x",
")",
"in",
"enumerate",
"(",
"reverse_vocab",
")",
"]",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Created vocabulary with %d words\"",
"%",
"len",
"(",
"vocab",
")",
")",
"self",
".",
"vocab",
"=",
"vocab",
"# vocab[word] = id",
"self",
".",
"reverse_vocab",
"=",
"reverse_vocab",
"# reverse_vocab[id] = word",
"# Save special word ids.",
"self",
".",
"start_id",
"=",
"vocab",
"[",
"start_word",
"]",
"self",
".",
"end_id",
"=",
"vocab",
"[",
"end_word",
"]",
"self",
".",
"unk_id",
"=",
"vocab",
"[",
"unk_word",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/vocabulary.py#L28-L64 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/vocabulary.py | python | Vocabulary.word_to_id | (self, word) | Returns the integer word id of a word string. | Returns the integer word id of a word string. | [
"Returns",
"the",
"integer",
"word",
"id",
"of",
"a",
"word",
"string",
"."
] | def word_to_id(self, word):
"""Returns the integer word id of a word string."""
if word in self.vocab:
return self.vocab[word]
else:
return self.unk_id | [
"def",
"word_to_id",
"(",
"self",
",",
"word",
")",
":",
"if",
"word",
"in",
"self",
".",
"vocab",
":",
"return",
"self",
".",
"vocab",
"[",
"word",
"]",
"else",
":",
"return",
"self",
".",
"unk_id"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/vocabulary.py#L66-L71 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/vocabulary.py | python | Vocabulary.id_to_word | (self, word_id) | Returns the word string of an integer word id. | Returns the word string of an integer word id. | [
"Returns",
"the",
"word",
"string",
"of",
"an",
"integer",
"word",
"id",
"."
] | def id_to_word(self, word_id):
"""Returns the word string of an integer word id."""
if word_id >= len(self.reverse_vocab):
return self.reverse_vocab[self.unk_id]
else:
return self.reverse_vocab[word_id] | [
"def",
"id_to_word",
"(",
"self",
",",
"word_id",
")",
":",
"if",
"word_id",
">=",
"len",
"(",
"self",
".",
"reverse_vocab",
")",
":",
"return",
"self",
".",
"reverse_vocab",
"[",
"self",
".",
"unk_id",
"]",
"else",
":",
"return",
"self",
".",
"reverse_vocab",
"[",
"word_id",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/vocabulary.py#L73-L78 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py | python | InferenceWrapperBase.build_model | (self, model_config) | Builds the model for inference.
Args:
model_config: Object containing configuration for building the model.
Returns:
model: The model object. | Builds the model for inference. | [
"Builds",
"the",
"model",
"for",
"inference",
"."
] | def build_model(self, model_config):
"""Builds the model for inference.
Args:
model_config: Object containing configuration for building the model.
Returns:
model: The model object.
"""
tf.logging.fatal("Please implement build_model in subclass") | [
"def",
"build_model",
"(",
"self",
",",
"model_config",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"\"Please implement build_model in subclass\"",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py#L62-L71 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py | python | InferenceWrapperBase._create_restore_fn | (self, checkpoint_path, saver) | return _restore_fn | Creates a function that restores a model from checkpoint.
Args:
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
saver: Saver for restoring variables from the checkpoint file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
Raises:
ValueError: If checkpoint_path does not refer to a checkpoint file or a
directory containing a checkpoint file. | Creates a function that restores a model from checkpoint. | [
"Creates",
"a",
"function",
"that",
"restores",
"a",
"model",
"from",
"checkpoint",
"."
] | def _create_restore_fn(self, checkpoint_path, saver):
"""Creates a function that restores a model from checkpoint.
Args:
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
saver: Saver for restoring variables from the checkpoint file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
Raises:
ValueError: If checkpoint_path does not refer to a checkpoint file or a
directory containing a checkpoint file.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_path:
raise ValueError("No checkpoint file found in: %s" % checkpoint_path)
def _restore_fn(sess):
tf.logging.info("Loading model from checkpoint: %s", checkpoint_path)
saver.restore(sess, checkpoint_path)
tf.logging.info("Successfully loaded checkpoint: %s",
os.path.basename(checkpoint_path))
return _restore_fn | [
"def",
"_create_restore_fn",
"(",
"self",
",",
"checkpoint_path",
",",
"saver",
")",
":",
"if",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"checkpoint_path",
")",
":",
"checkpoint_path",
"=",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"checkpoint_path",
")",
"if",
"not",
"checkpoint_path",
":",
"raise",
"ValueError",
"(",
"\"No checkpoint file found in: %s\"",
"%",
"checkpoint_path",
")",
"def",
"_restore_fn",
"(",
"sess",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Loading model from checkpoint: %s\"",
",",
"checkpoint_path",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"checkpoint_path",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Successfully loaded checkpoint: %s\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"checkpoint_path",
")",
")",
"return",
"_restore_fn"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py#L73-L100 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py | python | InferenceWrapperBase.build_graph_from_config | (self, model_config, checkpoint_path) | return self._create_restore_fn(checkpoint_path, saver) | Builds the inference graph from a configuration object.
Args:
model_config: Object containing configuration for building the model.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file. | Builds the inference graph from a configuration object. | [
"Builds",
"the",
"inference",
"graph",
"from",
"a",
"configuration",
"object",
"."
] | def build_graph_from_config(self, model_config, checkpoint_path):
"""Builds the inference graph from a configuration object.
Args:
model_config: Object containing configuration for building the model.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
"""
tf.logging.info("Building model.")
self.build_model(model_config)
saver = tf.train.Saver()
return self._create_restore_fn(checkpoint_path, saver) | [
"def",
"build_graph_from_config",
"(",
"self",
",",
"model_config",
",",
"checkpoint_path",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Building model.\"",
")",
"self",
".",
"build_model",
"(",
"model_config",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"return",
"self",
".",
"_create_restore_fn",
"(",
"checkpoint_path",
",",
"saver",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py#L102-L118 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py | python | InferenceWrapperBase.build_graph_from_proto | (self, graph_def_file, saver_def_file,
checkpoint_path) | return self._create_restore_fn(checkpoint_path, saver) | Builds the inference graph from serialized GraphDef and SaverDef protos.
Args:
graph_def_file: File containing a serialized GraphDef proto.
saver_def_file: File containing a serialized SaverDef proto.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file. | Builds the inference graph from serialized GraphDef and SaverDef protos. | [
"Builds",
"the",
"inference",
"graph",
"from",
"serialized",
"GraphDef",
"and",
"SaverDef",
"protos",
"."
] | def build_graph_from_proto(self, graph_def_file, saver_def_file,
checkpoint_path):
"""Builds the inference graph from serialized GraphDef and SaverDef protos.
Args:
graph_def_file: File containing a serialized GraphDef proto.
saver_def_file: File containing a serialized SaverDef proto.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
"""
# Load the Graph.
tf.logging.info("Loading GraphDef from file: %s", graph_def_file)
graph_def = tf.GraphDef()
with tf.gfile.FastGFile(graph_def_file, "rb") as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
# Load the Saver.
tf.logging.info("Loading SaverDef from file: %s", saver_def_file)
saver_def = tf.train.SaverDef()
with tf.gfile.FastGFile(saver_def_file, "rb") as f:
saver_def.ParseFromString(f.read())
saver = tf.train.Saver(saver_def=saver_def)
return self._create_restore_fn(checkpoint_path, saver) | [
"def",
"build_graph_from_proto",
"(",
"self",
",",
"graph_def_file",
",",
"saver_def_file",
",",
"checkpoint_path",
")",
":",
"# Load the Graph.",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Loading GraphDef from file: %s\"",
",",
"graph_def_file",
")",
"graph_def",
"=",
"tf",
".",
"GraphDef",
"(",
")",
"with",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"graph_def_file",
",",
"\"rb\"",
")",
"as",
"f",
":",
"graph_def",
".",
"ParseFromString",
"(",
"f",
".",
"read",
"(",
")",
")",
"tf",
".",
"import_graph_def",
"(",
"graph_def",
",",
"name",
"=",
"\"\"",
")",
"# Load the Saver.",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Loading SaverDef from file: %s\"",
",",
"saver_def_file",
")",
"saver_def",
"=",
"tf",
".",
"train",
".",
"SaverDef",
"(",
")",
"with",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"saver_def_file",
",",
"\"rb\"",
")",
"as",
"f",
":",
"saver_def",
".",
"ParseFromString",
"(",
"f",
".",
"read",
"(",
")",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"saver_def",
"=",
"saver_def",
")",
"return",
"self",
".",
"_create_restore_fn",
"(",
"checkpoint_path",
",",
"saver",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py#L120-L148 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py | python | InferenceWrapperBase.feed_image | (self, sess, encoded_image) | Feeds an image and returns the initial model state.
See comments at the top of file.
Args:
sess: TensorFlow Session object.
encoded_image: An encoded image string.
Returns:
state: A numpy array of shape [1, state_size]. | Feeds an image and returns the initial model state. | [
"Feeds",
"an",
"image",
"and",
"returns",
"the",
"initial",
"model",
"state",
"."
] | def feed_image(self, sess, encoded_image):
"""Feeds an image and returns the initial model state.
See comments at the top of file.
Args:
sess: TensorFlow Session object.
encoded_image: An encoded image string.
Returns:
state: A numpy array of shape [1, state_size].
"""
tf.logging.fatal("Please implement feed_image in subclass") | [
"def",
"feed_image",
"(",
"self",
",",
"sess",
",",
"encoded_image",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"\"Please implement feed_image in subclass\"",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py#L150-L162 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py | python | InferenceWrapperBase.inference_step | (self, sess, input_feed, state_feed) | Runs one step of inference.
Args:
sess: TensorFlow Session object.
input_feed: A numpy array of shape [batch_size].
state_feed: A numpy array of shape [batch_size, state_size].
Returns:
softmax_output: A numpy array of shape [batch_size, vocab_size].
new_state: A numpy array of shape [batch_size, state_size].
metadata: Optional. If not None, a string containing metadata about the
current inference step (e.g. serialized numpy array containing
activations from a particular model layer.). | Runs one step of inference. | [
"Runs",
"one",
"step",
"of",
"inference",
"."
] | def inference_step(self, sess, input_feed, state_feed):
"""Runs one step of inference.
Args:
sess: TensorFlow Session object.
input_feed: A numpy array of shape [batch_size].
state_feed: A numpy array of shape [batch_size, state_size].
Returns:
softmax_output: A numpy array of shape [batch_size, vocab_size].
new_state: A numpy array of shape [batch_size, state_size].
metadata: Optional. If not None, a string containing metadata about the
current inference step (e.g. serialized numpy array containing
activations from a particular model layer.).
"""
tf.logging.fatal("Please implement inference_step in subclass") | [
"def",
"inference_step",
"(",
"self",
",",
"sess",
",",
"input_feed",
",",
"state_feed",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"\"Please implement inference_step in subclass\"",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/inference_wrapper_base.py#L164-L179 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py | python | Caption.__init__ | (self, sentence, state, logprob, score, metadata=None) | Initializes the Caption.
Args:
sentence: List of word ids in the caption.
state: Model state after generating the previous word.
logprob: Log-probability of the caption.
score: Score of the caption.
metadata: Optional metadata associated with the partial sentence. If not
None, a list of strings with the same length as 'sentence'. | Initializes the Caption. | [
"Initializes",
"the",
"Caption",
"."
] | def __init__(self, sentence, state, logprob, score, metadata=None):
"""Initializes the Caption.
Args:
sentence: List of word ids in the caption.
state: Model state after generating the previous word.
logprob: Log-probability of the caption.
score: Score of the caption.
metadata: Optional metadata associated with the partial sentence. If not
None, a list of strings with the same length as 'sentence'.
"""
self.sentence = sentence
self.state = state
self.logprob = logprob
self.score = score
self.metadata = metadata | [
"def",
"__init__",
"(",
"self",
",",
"sentence",
",",
"state",
",",
"logprob",
",",
"score",
",",
"metadata",
"=",
"None",
")",
":",
"self",
".",
"sentence",
"=",
"sentence",
"self",
".",
"state",
"=",
"state",
"self",
".",
"logprob",
"=",
"logprob",
"self",
".",
"score",
"=",
"score",
"self",
".",
"metadata",
"=",
"metadata"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py#L31-L46 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py | python | Caption.__cmp__ | (self, other) | Compares Captions by score. | Compares Captions by score. | [
"Compares",
"Captions",
"by",
"score",
"."
] | def __cmp__(self, other):
"""Compares Captions by score."""
assert isinstance(other, Caption)
if self.score == other.score:
return 0
elif self.score < other.score:
return -1
else:
return 1 | [
"def",
"__cmp__",
"(",
"self",
",",
"other",
")",
":",
"assert",
"isinstance",
"(",
"other",
",",
"Caption",
")",
"if",
"self",
".",
"score",
"==",
"other",
".",
"score",
":",
"return",
"0",
"elif",
"self",
".",
"score",
"<",
"other",
".",
"score",
":",
"return",
"-",
"1",
"else",
":",
"return",
"1"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py#L48-L56 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py | python | TopN.push | (self, x) | Pushes a new element. | Pushes a new element. | [
"Pushes",
"a",
"new",
"element",
"."
] | def push(self, x):
"""Pushes a new element."""
assert self._data is not None
if len(self._data) < self._n:
heapq.heappush(self._data, x)
else:
heapq.heappushpop(self._data, x) | [
"def",
"push",
"(",
"self",
",",
"x",
")",
":",
"assert",
"self",
".",
"_data",
"is",
"not",
"None",
"if",
"len",
"(",
"self",
".",
"_data",
")",
"<",
"self",
".",
"_n",
":",
"heapq",
".",
"heappush",
"(",
"self",
".",
"_data",
",",
"x",
")",
"else",
":",
"heapq",
".",
"heappushpop",
"(",
"self",
".",
"_data",
",",
"x",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py#L80-L86 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py | python | TopN.extract | (self, sort=False) | return data | Extracts all elements from the TopN. This is a destructive operation.
The only method that can be called immediately after extract() is reset().
Args:
sort: Whether to return the elements in descending sorted order.
Returns:
A list of data; the top n elements provided to the set. | Extracts all elements from the TopN. This is a destructive operation. | [
"Extracts",
"all",
"elements",
"from",
"the",
"TopN",
".",
"This",
"is",
"a",
"destructive",
"operation",
"."
] | def extract(self, sort=False):
"""Extracts all elements from the TopN. This is a destructive operation.
The only method that can be called immediately after extract() is reset().
Args:
sort: Whether to return the elements in descending sorted order.
Returns:
A list of data; the top n elements provided to the set.
"""
assert self._data is not None
data = self._data
self._data = None
if sort:
data.sort(reverse=True)
return data | [
"def",
"extract",
"(",
"self",
",",
"sort",
"=",
"False",
")",
":",
"assert",
"self",
".",
"_data",
"is",
"not",
"None",
"data",
"=",
"self",
".",
"_data",
"self",
".",
"_data",
"=",
"None",
"if",
"sort",
":",
"data",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"return",
"data"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py#L88-L104 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py | python | TopN.reset | (self) | Returns the TopN to an empty state. | Returns the TopN to an empty state. | [
"Returns",
"the",
"TopN",
"to",
"an",
"empty",
"state",
"."
] | def reset(self):
"""Returns the TopN to an empty state."""
self._data = [] | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"[",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py#L106-L108 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py | python | CaptionGenerator.__init__ | (self,
model,
vocab,
beam_size=3,
max_caption_length=20,
length_normalization_factor=0.0) | Initializes the generator.
Args:
model: Object encapsulating a trained image-to-text model. Must have
methods feed_image() and inference_step(). For example, an instance of
InferenceWrapperBase.
vocab: A Vocabulary object.
beam_size: Beam size to use when generating captions.
max_caption_length: The maximum caption length before stopping the search.
length_normalization_factor: If != 0, a number x such that captions are
scored by logprob/length^x, rather than logprob. This changes the
relative scores of captions depending on their lengths. For example, if
x > 0 then longer captions will be favored. | Initializes the generator. | [
"Initializes",
"the",
"generator",
"."
] | def __init__(self,
model,
vocab,
beam_size=3,
max_caption_length=20,
length_normalization_factor=0.0):
"""Initializes the generator.
Args:
model: Object encapsulating a trained image-to-text model. Must have
methods feed_image() and inference_step(). For example, an instance of
InferenceWrapperBase.
vocab: A Vocabulary object.
beam_size: Beam size to use when generating captions.
max_caption_length: The maximum caption length before stopping the search.
length_normalization_factor: If != 0, a number x such that captions are
scored by logprob/length^x, rather than logprob. This changes the
relative scores of captions depending on their lengths. For example, if
x > 0 then longer captions will be favored.
"""
self.vocab = vocab
self.model = model
self.beam_size = beam_size
self.max_caption_length = max_caption_length
self.length_normalization_factor = length_normalization_factor | [
"def",
"__init__",
"(",
"self",
",",
"model",
",",
"vocab",
",",
"beam_size",
"=",
"3",
",",
"max_caption_length",
"=",
"20",
",",
"length_normalization_factor",
"=",
"0.0",
")",
":",
"self",
".",
"vocab",
"=",
"vocab",
"self",
".",
"model",
"=",
"model",
"self",
".",
"beam_size",
"=",
"beam_size",
"self",
".",
"max_caption_length",
"=",
"max_caption_length",
"self",
".",
"length_normalization_factor",
"=",
"length_normalization_factor"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py#L114-L139 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py | python | CaptionGenerator.beam_search | (self, sess, encoded_image) | return complete_captions.extract(sort=True) | Runs beam search caption generation on a single image.
Args:
sess: TensorFlow Session object.
encoded_image: An encoded image string.
Returns:
A list of Caption sorted by descending score. | Runs beam search caption generation on a single image. | [
"Runs",
"beam",
"search",
"caption",
"generation",
"on",
"a",
"single",
"image",
"."
] | def beam_search(self, sess, encoded_image):
"""Runs beam search caption generation on a single image.
Args:
sess: TensorFlow Session object.
encoded_image: An encoded image string.
Returns:
A list of Caption sorted by descending score.
"""
# Feed in the image to get the initial state.
initial_state = self.model.feed_image(sess, encoded_image)
initial_beam = Caption(
sentence=[self.vocab.start_id],
state=initial_state[0],
logprob=0.0,
score=0.0,
metadata=[""])
partial_captions = TopN(self.beam_size)
partial_captions.push(initial_beam)
complete_captions = TopN(self.beam_size)
# Run beam search.
for _ in range(self.max_caption_length - 1):
partial_captions_list = partial_captions.extract()
partial_captions.reset()
input_feed = np.array([c.sentence[-1] for c in partial_captions_list])
state_feed = np.array([c.state for c in partial_captions_list])
softmax, new_states, metadata = self.model.inference_step(sess,
input_feed,
state_feed)
for i, partial_caption in enumerate(partial_captions_list):
word_probabilities = softmax[i]
state = new_states[i]
# For this partial caption, get the beam_size most probable next words.
words_and_probs = list(enumerate(word_probabilities))
words_and_probs.sort(key=lambda x: -x[1])
words_and_probs = words_and_probs[0:self.beam_size]
# Each next word gives a new partial caption.
for w, p in words_and_probs:
if p < 1e-12:
continue # Avoid log(0).
sentence = partial_caption.sentence + [w]
logprob = partial_caption.logprob + math.log(p)
score = logprob
if metadata:
metadata_list = partial_caption.metadata + [metadata[i]]
else:
metadata_list = None
if w == self.vocab.end_id:
if self.length_normalization_factor > 0:
score /= len(sentence)**self.length_normalization_factor
beam = Caption(sentence, state, logprob, score, metadata_list)
complete_captions.push(beam)
else:
beam = Caption(sentence, state, logprob, score, metadata_list)
partial_captions.push(beam)
if partial_captions.size() == 0:
# We have run out of partial candidates; happens when beam_size = 1.
break
# If we have no complete captions then fall back to the partial captions.
# But never output a mixture of complete and partial captions because a
# partial caption could have a higher score than all the complete captions.
if not complete_captions.size():
complete_captions = partial_captions
return complete_captions.extract(sort=True) | [
"def",
"beam_search",
"(",
"self",
",",
"sess",
",",
"encoded_image",
")",
":",
"# Feed in the image to get the initial state.",
"initial_state",
"=",
"self",
".",
"model",
".",
"feed_image",
"(",
"sess",
",",
"encoded_image",
")",
"initial_beam",
"=",
"Caption",
"(",
"sentence",
"=",
"[",
"self",
".",
"vocab",
".",
"start_id",
"]",
",",
"state",
"=",
"initial_state",
"[",
"0",
"]",
",",
"logprob",
"=",
"0.0",
",",
"score",
"=",
"0.0",
",",
"metadata",
"=",
"[",
"\"\"",
"]",
")",
"partial_captions",
"=",
"TopN",
"(",
"self",
".",
"beam_size",
")",
"partial_captions",
".",
"push",
"(",
"initial_beam",
")",
"complete_captions",
"=",
"TopN",
"(",
"self",
".",
"beam_size",
")",
"# Run beam search.",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"max_caption_length",
"-",
"1",
")",
":",
"partial_captions_list",
"=",
"partial_captions",
".",
"extract",
"(",
")",
"partial_captions",
".",
"reset",
"(",
")",
"input_feed",
"=",
"np",
".",
"array",
"(",
"[",
"c",
".",
"sentence",
"[",
"-",
"1",
"]",
"for",
"c",
"in",
"partial_captions_list",
"]",
")",
"state_feed",
"=",
"np",
".",
"array",
"(",
"[",
"c",
".",
"state",
"for",
"c",
"in",
"partial_captions_list",
"]",
")",
"softmax",
",",
"new_states",
",",
"metadata",
"=",
"self",
".",
"model",
".",
"inference_step",
"(",
"sess",
",",
"input_feed",
",",
"state_feed",
")",
"for",
"i",
",",
"partial_caption",
"in",
"enumerate",
"(",
"partial_captions_list",
")",
":",
"word_probabilities",
"=",
"softmax",
"[",
"i",
"]",
"state",
"=",
"new_states",
"[",
"i",
"]",
"# For this partial caption, get the beam_size most probable next words.",
"words_and_probs",
"=",
"list",
"(",
"enumerate",
"(",
"word_probabilities",
")",
")",
"words_and_probs",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"1",
"]",
")",
"words_and_probs",
"=",
"words_and_probs",
"[",
"0",
":",
"self",
".",
"beam_size",
"]",
"# Each next word gives a new partial caption.",
"for",
"w",
",",
"p",
"in",
"words_and_probs",
":",
"if",
"p",
"<",
"1e-12",
":",
"continue",
"# Avoid log(0).",
"sentence",
"=",
"partial_caption",
".",
"sentence",
"+",
"[",
"w",
"]",
"logprob",
"=",
"partial_caption",
".",
"logprob",
"+",
"math",
".",
"log",
"(",
"p",
")",
"score",
"=",
"logprob",
"if",
"metadata",
":",
"metadata_list",
"=",
"partial_caption",
".",
"metadata",
"+",
"[",
"metadata",
"[",
"i",
"]",
"]",
"else",
":",
"metadata_list",
"=",
"None",
"if",
"w",
"==",
"self",
".",
"vocab",
".",
"end_id",
":",
"if",
"self",
".",
"length_normalization_factor",
">",
"0",
":",
"score",
"/=",
"len",
"(",
"sentence",
")",
"**",
"self",
".",
"length_normalization_factor",
"beam",
"=",
"Caption",
"(",
"sentence",
",",
"state",
",",
"logprob",
",",
"score",
",",
"metadata_list",
")",
"complete_captions",
".",
"push",
"(",
"beam",
")",
"else",
":",
"beam",
"=",
"Caption",
"(",
"sentence",
",",
"state",
",",
"logprob",
",",
"score",
",",
"metadata_list",
")",
"partial_captions",
".",
"push",
"(",
"beam",
")",
"if",
"partial_captions",
".",
"size",
"(",
")",
"==",
"0",
":",
"# We have run out of partial candidates; happens when beam_size = 1.",
"break",
"# If we have no complete captions then fall back to the partial captions.",
"# But never output a mixture of complete and partial captions because a",
"# partial caption could have a higher score than all the complete captions.",
"if",
"not",
"complete_captions",
".",
"size",
"(",
")",
":",
"complete_captions",
"=",
"partial_captions",
"return",
"complete_captions",
".",
"extract",
"(",
"sort",
"=",
"True",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/im2txt/im2txt/inference_utils/caption_generator.py#L141-L211 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | enable_parallel | (processnum=None) | Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported. | Change the module's `cut` and `cut_for_search` functions to the
parallel version. | [
"Change",
"the",
"module",
"s",
"cut",
"and",
"cut_for_search",
"functions",
"to",
"the",
"parallel",
"version",
"."
] | def enable_parallel(processnum=None):
"""
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
"""
global pool, dt, cut, cut_for_search
from multiprocessing import cpu_count
if os.name == 'nt':
raise NotImplementedError(
"jieba: parallel mode only supports posix system")
else:
from multiprocessing import Pool
dt.check_initialized()
if processnum is None:
processnum = cpu_count()
pool = Pool(processnum)
cut = _pcut
cut_for_search = _pcut_for_search | [
"def",
"enable_parallel",
"(",
"processnum",
"=",
"None",
")",
":",
"global",
"pool",
",",
"dt",
",",
"cut",
",",
"cut_for_search",
"from",
"multiprocessing",
"import",
"cpu_count",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"raise",
"NotImplementedError",
"(",
"\"jieba: parallel mode only supports posix system\"",
")",
"else",
":",
"from",
"multiprocessing",
"import",
"Pool",
"dt",
".",
"check_initialized",
"(",
")",
"if",
"processnum",
"is",
"None",
":",
"processnum",
"=",
"cpu_count",
"(",
")",
"pool",
"=",
"Pool",
"(",
"processnum",
")",
"cut",
"=",
"_pcut",
"cut_for_search",
"=",
"_pcut_for_search"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L566-L586 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | Tokenizer.cut | (self, sentence, cut_all=False, HMM=True) | The main function that segments an entire sentence that contains
Chinese characters into seperated words.
Parameter:
- sentence: The str(unicode) to be segmented.
- cut_all: Model type. True for full pattern, False for accurate pattern.
- HMM: Whether to use the Hidden Markov Model. | The main function that segments an entire sentence that contains
Chinese characters into seperated words. | [
"The",
"main",
"function",
"that",
"segments",
"an",
"entire",
"sentence",
"that",
"contains",
"Chinese",
"characters",
"into",
"seperated",
"words",
"."
] | def cut(self, sentence, cut_all=False, HMM=True):
'''
The main function that segments an entire sentence that contains
Chinese characters into seperated words.
Parameter:
- sentence: The str(unicode) to be segmented.
- cut_all: Model type. True for full pattern, False for accurate pattern.
- HMM: Whether to use the Hidden Markov Model.
'''
sentence = strdecode(sentence)
if cut_all:
re_han = re_han_cut_all
re_skip = re_skip_cut_all
else:
re_han = re_han_default
re_skip = re_skip_default
if cut_all:
cut_block = self.__cut_all
elif HMM:
cut_block = self.__cut_DAG
else:
cut_block = self.__cut_DAG_NO_HMM
blocks = re_han.split(sentence)
for blk in blocks:
if not blk:
continue
if re_han.match(blk):
for word in cut_block(blk):
yield word
else:
tmp = re_skip.split(blk)
for x in tmp:
if re_skip.match(x):
yield x
elif not cut_all:
for xx in x:
yield xx
else:
yield x | [
"def",
"cut",
"(",
"self",
",",
"sentence",
",",
"cut_all",
"=",
"False",
",",
"HMM",
"=",
"True",
")",
":",
"sentence",
"=",
"strdecode",
"(",
"sentence",
")",
"if",
"cut_all",
":",
"re_han",
"=",
"re_han_cut_all",
"re_skip",
"=",
"re_skip_cut_all",
"else",
":",
"re_han",
"=",
"re_han_default",
"re_skip",
"=",
"re_skip_default",
"if",
"cut_all",
":",
"cut_block",
"=",
"self",
".",
"__cut_all",
"elif",
"HMM",
":",
"cut_block",
"=",
"self",
".",
"__cut_DAG",
"else",
":",
"cut_block",
"=",
"self",
".",
"__cut_DAG_NO_HMM",
"blocks",
"=",
"re_han",
".",
"split",
"(",
"sentence",
")",
"for",
"blk",
"in",
"blocks",
":",
"if",
"not",
"blk",
":",
"continue",
"if",
"re_han",
".",
"match",
"(",
"blk",
")",
":",
"for",
"word",
"in",
"cut_block",
"(",
"blk",
")",
":",
"yield",
"word",
"else",
":",
"tmp",
"=",
"re_skip",
".",
"split",
"(",
"blk",
")",
"for",
"x",
"in",
"tmp",
":",
"if",
"re_skip",
".",
"match",
"(",
"x",
")",
":",
"yield",
"x",
"elif",
"not",
"cut_all",
":",
"for",
"xx",
"in",
"x",
":",
"yield",
"xx",
"else",
":",
"yield",
"x"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L272-L312 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | Tokenizer.cut_for_search | (self, sentence, HMM=True) | Finer segmentation for search engines. | Finer segmentation for search engines. | [
"Finer",
"segmentation",
"for",
"search",
"engines",
"."
] | def cut_for_search(self, sentence, HMM=True):
"""
Finer segmentation for search engines.
"""
words = self.cut(sentence, HMM=HMM)
for w in words:
if len(w) > 2:
for i in xrange(len(w) - 1):
gram2 = w[i:i + 2]
if self.FREQ.get(gram2):
yield gram2
if len(w) > 3:
for i in xrange(len(w) - 2):
gram3 = w[i:i + 3]
if self.FREQ.get(gram3):
yield gram3
yield w | [
"def",
"cut_for_search",
"(",
"self",
",",
"sentence",
",",
"HMM",
"=",
"True",
")",
":",
"words",
"=",
"self",
".",
"cut",
"(",
"sentence",
",",
"HMM",
"=",
"HMM",
")",
"for",
"w",
"in",
"words",
":",
"if",
"len",
"(",
"w",
")",
">",
"2",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"w",
")",
"-",
"1",
")",
":",
"gram2",
"=",
"w",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"if",
"self",
".",
"FREQ",
".",
"get",
"(",
"gram2",
")",
":",
"yield",
"gram2",
"if",
"len",
"(",
"w",
")",
">",
"3",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"w",
")",
"-",
"2",
")",
":",
"gram3",
"=",
"w",
"[",
"i",
":",
"i",
"+",
"3",
"]",
"if",
"self",
".",
"FREQ",
".",
"get",
"(",
"gram3",
")",
":",
"yield",
"gram3",
"yield",
"w"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L314-L330 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | Tokenizer.load_userdict | (self, f) | Load personalized dict to improve detect rate.
Parameter:
- f : A plain text file contains words and their ocurrences.
Can be a file-like object, or the path of the dictionary file,
whose encoding must be utf-8.
Structure of dict file:
word1 freq1 word_type1
word2 freq2 word_type2
...
Word type may be ignored | Load personalized dict to improve detect rate. | [
"Load",
"personalized",
"dict",
"to",
"improve",
"detect",
"rate",
"."
] | def load_userdict(self, f):
'''
Load personalized dict to improve detect rate.
Parameter:
- f : A plain text file contains words and their ocurrences.
Can be a file-like object, or the path of the dictionary file,
whose encoding must be utf-8.
Structure of dict file:
word1 freq1 word_type1
word2 freq2 word_type2
...
Word type may be ignored
'''
self.check_initialized()
if isinstance(f, string_types):
f_name = f
f = open(f, 'rb')
else:
f_name = resolve_filename(f)
for lineno, ln in enumerate(f, 1):
line = ln.strip()
if not isinstance(line, text_type):
try:
line = line.decode('utf-8').lstrip('\ufeff')
except UnicodeDecodeError:
raise ValueError('dictionary file %s must be utf-8' % f_name)
if not line:
continue
# match won't be None because there's at least one character
word, freq, tag = re_userdict.match(line).groups()
if freq is not None:
freq = freq.strip()
if tag is not None:
tag = tag.strip()
self.add_word(word, freq, tag) | [
"def",
"load_userdict",
"(",
"self",
",",
"f",
")",
":",
"self",
".",
"check_initialized",
"(",
")",
"if",
"isinstance",
"(",
"f",
",",
"string_types",
")",
":",
"f_name",
"=",
"f",
"f",
"=",
"open",
"(",
"f",
",",
"'rb'",
")",
"else",
":",
"f_name",
"=",
"resolve_filename",
"(",
"f",
")",
"for",
"lineno",
",",
"ln",
"in",
"enumerate",
"(",
"f",
",",
"1",
")",
":",
"line",
"=",
"ln",
".",
"strip",
"(",
")",
"if",
"not",
"isinstance",
"(",
"line",
",",
"text_type",
")",
":",
"try",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"lstrip",
"(",
"'\\ufeff'",
")",
"except",
"UnicodeDecodeError",
":",
"raise",
"ValueError",
"(",
"'dictionary file %s must be utf-8'",
"%",
"f_name",
")",
"if",
"not",
"line",
":",
"continue",
"# match won't be None because there's at least one character",
"word",
",",
"freq",
",",
"tag",
"=",
"re_userdict",
".",
"match",
"(",
"line",
")",
".",
"groups",
"(",
")",
"if",
"freq",
"is",
"not",
"None",
":",
"freq",
"=",
"freq",
".",
"strip",
"(",
")",
"if",
"tag",
"is",
"not",
"None",
":",
"tag",
"=",
"tag",
".",
"strip",
"(",
")",
"self",
".",
"add_word",
"(",
"word",
",",
"freq",
",",
"tag",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L356-L392 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | Tokenizer.add_word | (self, word, freq=None, tag=None) | Add a word to dictionary.
freq and tag can be omitted, freq defaults to be a calculated value
that ensures the word can be cut out. | Add a word to dictionary. | [
"Add",
"a",
"word",
"to",
"dictionary",
"."
] | def add_word(self, word, freq=None, tag=None):
"""
Add a word to dictionary.
freq and tag can be omitted, freq defaults to be a calculated value
that ensures the word can be cut out.
"""
self.check_initialized()
word = strdecode(word)
freq = int(freq) if freq is not None else self.suggest_freq(word, False)
self.FREQ[word] = freq
self.total += freq
if tag:
self.user_word_tag_tab[word] = tag
for ch in xrange(len(word)):
wfrag = word[:ch + 1]
if wfrag not in self.FREQ:
self.FREQ[wfrag] = 0
if freq == 0:
finalseg.add_force_split(word) | [
"def",
"add_word",
"(",
"self",
",",
"word",
",",
"freq",
"=",
"None",
",",
"tag",
"=",
"None",
")",
":",
"self",
".",
"check_initialized",
"(",
")",
"word",
"=",
"strdecode",
"(",
"word",
")",
"freq",
"=",
"int",
"(",
"freq",
")",
"if",
"freq",
"is",
"not",
"None",
"else",
"self",
".",
"suggest_freq",
"(",
"word",
",",
"False",
")",
"self",
".",
"FREQ",
"[",
"word",
"]",
"=",
"freq",
"self",
".",
"total",
"+=",
"freq",
"if",
"tag",
":",
"self",
".",
"user_word_tag_tab",
"[",
"word",
"]",
"=",
"tag",
"for",
"ch",
"in",
"xrange",
"(",
"len",
"(",
"word",
")",
")",
":",
"wfrag",
"=",
"word",
"[",
":",
"ch",
"+",
"1",
"]",
"if",
"wfrag",
"not",
"in",
"self",
".",
"FREQ",
":",
"self",
".",
"FREQ",
"[",
"wfrag",
"]",
"=",
"0",
"if",
"freq",
"==",
"0",
":",
"finalseg",
".",
"add_force_split",
"(",
"word",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L394-L413 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | Tokenizer.del_word | (self, word) | Convenient function for deleting a word. | Convenient function for deleting a word. | [
"Convenient",
"function",
"for",
"deleting",
"a",
"word",
"."
] | def del_word(self, word):
"""
Convenient function for deleting a word.
"""
self.add_word(word, 0) | [
"def",
"del_word",
"(",
"self",
",",
"word",
")",
":",
"self",
".",
"add_word",
"(",
"word",
",",
"0",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L415-L419 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | Tokenizer.suggest_freq | (self, segment, tune=False) | return freq | Suggest word frequency to force the characters in a word to be
joined or splitted.
Parameter:
- segment : The segments that the word is expected to be cut into,
If the word should be treated as a whole, use a str.
- tune : If True, tune the word frequency.
Note that HMM may affect the final result. If the result doesn't change,
set HMM=False. | Suggest word frequency to force the characters in a word to be
joined or splitted. | [
"Suggest",
"word",
"frequency",
"to",
"force",
"the",
"characters",
"in",
"a",
"word",
"to",
"be",
"joined",
"or",
"splitted",
"."
] | def suggest_freq(self, segment, tune=False):
"""
Suggest word frequency to force the characters in a word to be
joined or splitted.
Parameter:
- segment : The segments that the word is expected to be cut into,
If the word should be treated as a whole, use a str.
- tune : If True, tune the word frequency.
Note that HMM may affect the final result. If the result doesn't change,
set HMM=False.
"""
self.check_initialized()
ftotal = float(self.total)
freq = 1
if isinstance(segment, string_types):
word = segment
for seg in self.cut(word, HMM=False):
freq *= self.FREQ.get(seg, 1) / ftotal
freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1))
else:
segment = tuple(map(strdecode, segment))
word = ''.join(segment)
for seg in segment:
freq *= self.FREQ.get(seg, 1) / ftotal
freq = min(int(freq * self.total), self.FREQ.get(word, 0))
if tune:
add_word(word, freq)
return freq | [
"def",
"suggest_freq",
"(",
"self",
",",
"segment",
",",
"tune",
"=",
"False",
")",
":",
"self",
".",
"check_initialized",
"(",
")",
"ftotal",
"=",
"float",
"(",
"self",
".",
"total",
")",
"freq",
"=",
"1",
"if",
"isinstance",
"(",
"segment",
",",
"string_types",
")",
":",
"word",
"=",
"segment",
"for",
"seg",
"in",
"self",
".",
"cut",
"(",
"word",
",",
"HMM",
"=",
"False",
")",
":",
"freq",
"*=",
"self",
".",
"FREQ",
".",
"get",
"(",
"seg",
",",
"1",
")",
"/",
"ftotal",
"freq",
"=",
"max",
"(",
"int",
"(",
"freq",
"*",
"self",
".",
"total",
")",
"+",
"1",
",",
"self",
".",
"FREQ",
".",
"get",
"(",
"word",
",",
"1",
")",
")",
"else",
":",
"segment",
"=",
"tuple",
"(",
"map",
"(",
"strdecode",
",",
"segment",
")",
")",
"word",
"=",
"''",
".",
"join",
"(",
"segment",
")",
"for",
"seg",
"in",
"segment",
":",
"freq",
"*=",
"self",
".",
"FREQ",
".",
"get",
"(",
"seg",
",",
"1",
")",
"/",
"ftotal",
"freq",
"=",
"min",
"(",
"int",
"(",
"freq",
"*",
"self",
".",
"total",
")",
",",
"self",
".",
"FREQ",
".",
"get",
"(",
"word",
",",
"0",
")",
")",
"if",
"tune",
":",
"add_word",
"(",
"word",
",",
"freq",
")",
"return",
"freq"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L421-L450 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py | python | Tokenizer.tokenize | (self, unicode_sentence, mode="default", HMM=True) | Tokenize a sentence and yields tuples of (word, start, end)
Parameter:
- sentence: the str(unicode) to be segmented.
- mode: "default" or "search", "search" is for finer segmentation.
- HMM: whether to use the Hidden Markov Model. | Tokenize a sentence and yields tuples of (word, start, end) | [
"Tokenize",
"a",
"sentence",
"and",
"yields",
"tuples",
"of",
"(",
"word",
"start",
"end",
")"
] | def tokenize(self, unicode_sentence, mode="default", HMM=True):
"""
Tokenize a sentence and yields tuples of (word, start, end)
Parameter:
- sentence: the str(unicode) to be segmented.
- mode: "default" or "search", "search" is for finer segmentation.
- HMM: whether to use the Hidden Markov Model.
"""
if not isinstance(unicode_sentence, text_type):
raise ValueError("jieba: the input parameter should be unicode.")
start = 0
if mode == 'default':
for w in self.cut(unicode_sentence, HMM=HMM):
width = len(w)
yield (w, start, start + width)
start += width
else:
for w in self.cut(unicode_sentence, HMM=HMM):
width = len(w)
if len(w) > 2:
for i in xrange(len(w) - 1):
gram2 = w[i:i + 2]
if self.FREQ.get(gram2):
yield (gram2, start + i, start + i + 2)
if len(w) > 3:
for i in xrange(len(w) - 2):
gram3 = w[i:i + 3]
if self.FREQ.get(gram3):
yield (gram3, start + i, start + i + 3)
yield (w, start, start + width)
start += width | [
"def",
"tokenize",
"(",
"self",
",",
"unicode_sentence",
",",
"mode",
"=",
"\"default\"",
",",
"HMM",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"unicode_sentence",
",",
"text_type",
")",
":",
"raise",
"ValueError",
"(",
"\"jieba: the input parameter should be unicode.\"",
")",
"start",
"=",
"0",
"if",
"mode",
"==",
"'default'",
":",
"for",
"w",
"in",
"self",
".",
"cut",
"(",
"unicode_sentence",
",",
"HMM",
"=",
"HMM",
")",
":",
"width",
"=",
"len",
"(",
"w",
")",
"yield",
"(",
"w",
",",
"start",
",",
"start",
"+",
"width",
")",
"start",
"+=",
"width",
"else",
":",
"for",
"w",
"in",
"self",
".",
"cut",
"(",
"unicode_sentence",
",",
"HMM",
"=",
"HMM",
")",
":",
"width",
"=",
"len",
"(",
"w",
")",
"if",
"len",
"(",
"w",
")",
">",
"2",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"w",
")",
"-",
"1",
")",
":",
"gram2",
"=",
"w",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"if",
"self",
".",
"FREQ",
".",
"get",
"(",
"gram2",
")",
":",
"yield",
"(",
"gram2",
",",
"start",
"+",
"i",
",",
"start",
"+",
"i",
"+",
"2",
")",
"if",
"len",
"(",
"w",
")",
">",
"3",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"w",
")",
"-",
"2",
")",
":",
"gram3",
"=",
"w",
"[",
"i",
":",
"i",
"+",
"3",
"]",
"if",
"self",
".",
"FREQ",
".",
"get",
"(",
"gram3",
")",
":",
"yield",
"(",
"gram3",
",",
"start",
"+",
"i",
",",
"start",
"+",
"i",
"+",
"3",
")",
"yield",
"(",
"w",
",",
"start",
",",
"start",
"+",
"width",
")",
"start",
"+=",
"width"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/__init__.py#L452-L483 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/analyse/tfidf.py | python | TFIDF.extract_tags | (self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False) | Extract keywords from sentence using TF-IDF algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
- withFlag: only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words | Extract keywords from sentence using TF-IDF algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
- withFlag: only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words | [
"Extract",
"keywords",
"from",
"sentence",
"using",
"TF",
"-",
"IDF",
"algorithm",
".",
"Parameter",
":",
"-",
"topK",
":",
"return",
"how",
"many",
"top",
"keywords",
".",
"None",
"for",
"all",
"possible",
"words",
".",
"-",
"withWeight",
":",
"if",
"True",
"return",
"a",
"list",
"of",
"(",
"word",
"weight",
")",
";",
"if",
"False",
"return",
"a",
"list",
"of",
"words",
".",
"-",
"allowPOS",
":",
"the",
"allowed",
"POS",
"list",
"eg",
".",
"[",
"ns",
"n",
"vn",
"v",
"nr",
"]",
".",
"if",
"the",
"POS",
"of",
"w",
"is",
"not",
"in",
"this",
"list",
"it",
"will",
"be",
"filtered",
".",
"-",
"withFlag",
":",
"only",
"work",
"with",
"allowPOS",
"is",
"not",
"empty",
".",
"if",
"True",
"return",
"a",
"list",
"of",
"pair",
"(",
"word",
"weight",
")",
"like",
"posseg",
".",
"cut",
"if",
"False",
"return",
"a",
"list",
"of",
"words"
] | def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False):
"""
Extract keywords from sentence using TF-IDF algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
- withFlag: only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
"""
if allowPOS:
allowPOS = frozenset(allowPOS)
words = self.postokenizer.cut(sentence)
else:
words = self.tokenizer.cut(sentence)
freq = {}
for w in words:
if allowPOS:
if w.flag not in allowPOS:
continue
elif not withFlag:
w = w.word
wc = w.word if allowPOS and withFlag else w
if len(wc.strip()) < 2 or wc.lower() in self.stop_words:
continue
freq[w] = freq.get(w, 0.0) + 1.0
total = sum(freq.values())
for k in freq:
kw = k.word if allowPOS and withFlag else k
freq[k] *= self.idf_freq.get(kw, self.median_idf) / total
if withWeight:
tags = sorted(freq.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(freq, key=freq.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags | [
"def",
"extract_tags",
"(",
"self",
",",
"sentence",
",",
"topK",
"=",
"20",
",",
"withWeight",
"=",
"False",
",",
"allowPOS",
"=",
"(",
")",
",",
"withFlag",
"=",
"False",
")",
":",
"if",
"allowPOS",
":",
"allowPOS",
"=",
"frozenset",
"(",
"allowPOS",
")",
"words",
"=",
"self",
".",
"postokenizer",
".",
"cut",
"(",
"sentence",
")",
"else",
":",
"words",
"=",
"self",
".",
"tokenizer",
".",
"cut",
"(",
"sentence",
")",
"freq",
"=",
"{",
"}",
"for",
"w",
"in",
"words",
":",
"if",
"allowPOS",
":",
"if",
"w",
".",
"flag",
"not",
"in",
"allowPOS",
":",
"continue",
"elif",
"not",
"withFlag",
":",
"w",
"=",
"w",
".",
"word",
"wc",
"=",
"w",
".",
"word",
"if",
"allowPOS",
"and",
"withFlag",
"else",
"w",
"if",
"len",
"(",
"wc",
".",
"strip",
"(",
")",
")",
"<",
"2",
"or",
"wc",
".",
"lower",
"(",
")",
"in",
"self",
".",
"stop_words",
":",
"continue",
"freq",
"[",
"w",
"]",
"=",
"freq",
".",
"get",
"(",
"w",
",",
"0.0",
")",
"+",
"1.0",
"total",
"=",
"sum",
"(",
"freq",
".",
"values",
"(",
")",
")",
"for",
"k",
"in",
"freq",
":",
"kw",
"=",
"k",
".",
"word",
"if",
"allowPOS",
"and",
"withFlag",
"else",
"k",
"freq",
"[",
"k",
"]",
"*=",
"self",
".",
"idf_freq",
".",
"get",
"(",
"kw",
",",
"self",
".",
"median_idf",
")",
"/",
"total",
"if",
"withWeight",
":",
"tags",
"=",
"sorted",
"(",
"freq",
".",
"items",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"else",
":",
"tags",
"=",
"sorted",
"(",
"freq",
",",
"key",
"=",
"freq",
".",
"__getitem__",
",",
"reverse",
"=",
"True",
")",
"if",
"topK",
":",
"return",
"tags",
"[",
":",
"topK",
"]",
"else",
":",
"return",
"tags"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/analyse/tfidf.py#L75-L116 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/analyse/textrank.py | python | TextRank.textrank | (self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False) | Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words | Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words | [
"Extract",
"keywords",
"from",
"sentence",
"using",
"TextRank",
"algorithm",
".",
"Parameter",
":",
"-",
"topK",
":",
"return",
"how",
"many",
"top",
"keywords",
".",
"None",
"for",
"all",
"possible",
"words",
".",
"-",
"withWeight",
":",
"if",
"True",
"return",
"a",
"list",
"of",
"(",
"word",
"weight",
")",
";",
"if",
"False",
"return",
"a",
"list",
"of",
"words",
".",
"-",
"allowPOS",
":",
"the",
"allowed",
"POS",
"list",
"eg",
".",
"[",
"ns",
"n",
"vn",
"v",
"]",
".",
"if",
"the",
"POS",
"of",
"w",
"is",
"not",
"in",
"this",
"list",
"it",
"will",
"be",
"filtered",
".",
"-",
"withFlag",
":",
"if",
"True",
"return",
"a",
"list",
"of",
"pair",
"(",
"word",
"weight",
")",
"like",
"posseg",
".",
"cut",
"if",
"False",
"return",
"a",
"list",
"of",
"words"
] | def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False):
"""
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
- withFlag: if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
"""
self.pos_filt = frozenset(allowPOS)
g = UndirectWeightedGraph()
cm = defaultdict(int)
words = tuple(self.tokenizer.cut(sentence))
for i, wp in enumerate(words):
if self.pairfilter(wp):
for j in xrange(i + 1, i + self.span):
if j >= len(words):
break
if not self.pairfilter(words[j]):
continue
if allowPOS and withFlag:
cm[(wp, words[j])] += 1
else:
cm[(wp.word, words[j].word)] += 1
for terms, w in cm.items():
g.addEdge(terms[0], terms[1], w)
nodes_rank = g.rank()
if withWeight:
tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags | [
"def",
"textrank",
"(",
"self",
",",
"sentence",
",",
"topK",
"=",
"20",
",",
"withWeight",
"=",
"False",
",",
"allowPOS",
"=",
"(",
"'ns'",
",",
"'n'",
",",
"'vn'",
",",
"'v'",
")",
",",
"withFlag",
"=",
"False",
")",
":",
"self",
".",
"pos_filt",
"=",
"frozenset",
"(",
"allowPOS",
")",
"g",
"=",
"UndirectWeightedGraph",
"(",
")",
"cm",
"=",
"defaultdict",
"(",
"int",
")",
"words",
"=",
"tuple",
"(",
"self",
".",
"tokenizer",
".",
"cut",
"(",
"sentence",
")",
")",
"for",
"i",
",",
"wp",
"in",
"enumerate",
"(",
"words",
")",
":",
"if",
"self",
".",
"pairfilter",
"(",
"wp",
")",
":",
"for",
"j",
"in",
"xrange",
"(",
"i",
"+",
"1",
",",
"i",
"+",
"self",
".",
"span",
")",
":",
"if",
"j",
">=",
"len",
"(",
"words",
")",
":",
"break",
"if",
"not",
"self",
".",
"pairfilter",
"(",
"words",
"[",
"j",
"]",
")",
":",
"continue",
"if",
"allowPOS",
"and",
"withFlag",
":",
"cm",
"[",
"(",
"wp",
",",
"words",
"[",
"j",
"]",
")",
"]",
"+=",
"1",
"else",
":",
"cm",
"[",
"(",
"wp",
".",
"word",
",",
"words",
"[",
"j",
"]",
".",
"word",
")",
"]",
"+=",
"1",
"for",
"terms",
",",
"w",
"in",
"cm",
".",
"items",
"(",
")",
":",
"g",
".",
"addEdge",
"(",
"terms",
"[",
"0",
"]",
",",
"terms",
"[",
"1",
"]",
",",
"w",
")",
"nodes_rank",
"=",
"g",
".",
"rank",
"(",
")",
"if",
"withWeight",
":",
"tags",
"=",
"sorted",
"(",
"nodes_rank",
".",
"items",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"else",
":",
"tags",
"=",
"sorted",
"(",
"nodes_rank",
",",
"key",
"=",
"nodes_rank",
".",
"__getitem__",
",",
"reverse",
"=",
"True",
")",
"if",
"topK",
":",
"return",
"tags",
"[",
":",
"topK",
"]",
"else",
":",
"return",
"tags"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/analyse/textrank.py#L69-L108 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/posseg/__init__.py | python | cut | (sentence, HMM=True) | Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported. | Global `cut` function that supports parallel processing. | [
"Global",
"cut",
"function",
"that",
"supports",
"parallel",
"processing",
"."
] | def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
"""
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w | [
"def",
"cut",
"(",
"sentence",
",",
"HMM",
"=",
"True",
")",
":",
"global",
"dt",
"if",
"jieba",
".",
"pool",
"is",
"None",
":",
"for",
"w",
"in",
"dt",
".",
"cut",
"(",
"sentence",
",",
"HMM",
"=",
"HMM",
")",
":",
"yield",
"w",
"else",
":",
"parts",
"=",
"strdecode",
"(",
"sentence",
")",
".",
"splitlines",
"(",
"True",
")",
"if",
"HMM",
":",
"result",
"=",
"jieba",
".",
"pool",
".",
"map",
"(",
"_lcut_internal",
",",
"parts",
")",
"else",
":",
"result",
"=",
"jieba",
".",
"pool",
".",
"map",
"(",
"_lcut_internal_no_hmm",
",",
"parts",
")",
"for",
"r",
"in",
"result",
":",
"for",
"w",
"in",
"r",
":",
"yield",
"w"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/translation_and_interpretation_baseline/train/prepare_data/jieba/posseg/__init__.py#L272-L291 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/keypoint_eval/keypoint_eval.py | python | load_annotations | (anno_file, return_dict) | return annotations | Convert annotation JSON file. | Convert annotation JSON file. | [
"Convert",
"annotation",
"JSON",
"file",
"."
] | def load_annotations(anno_file, return_dict):
"""Convert annotation JSON file."""
annotations = dict()
annotations['image_ids'] = set([])
annotations['annos'] = dict()
annotations['delta'] = 2*np.array([0.01388152, 0.01515228, 0.01057665, 0.01417709, \
0.01497891, 0.01402144, 0.03909642, 0.03686941, 0.01981803, \
0.03843971, 0.03412318, 0.02415081, 0.01291456, 0.01236173])
try:
annos = json.load(open(anno_file, 'r'))
except Exception:
return_dict['error'] = 'Annotation file does not exist or is an invalid JSON file.'
exit(return_dict['error'])
for anno in annos:
annotations['image_ids'].add(anno['image_id'])
annotations['annos'][anno['image_id']] = dict()
annotations['annos'][anno['image_id']]['human_annos'] = anno['human_annotations']
annotations['annos'][anno['image_id']]['keypoint_annos'] = anno['keypoint_annotations']
return annotations | [
"def",
"load_annotations",
"(",
"anno_file",
",",
"return_dict",
")",
":",
"annotations",
"=",
"dict",
"(",
")",
"annotations",
"[",
"'image_ids'",
"]",
"=",
"set",
"(",
"[",
"]",
")",
"annotations",
"[",
"'annos'",
"]",
"=",
"dict",
"(",
")",
"annotations",
"[",
"'delta'",
"]",
"=",
"2",
"*",
"np",
".",
"array",
"(",
"[",
"0.01388152",
",",
"0.01515228",
",",
"0.01057665",
",",
"0.01417709",
",",
"0.01497891",
",",
"0.01402144",
",",
"0.03909642",
",",
"0.03686941",
",",
"0.01981803",
",",
"0.03843971",
",",
"0.03412318",
",",
"0.02415081",
",",
"0.01291456",
",",
"0.01236173",
"]",
")",
"try",
":",
"annos",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"anno_file",
",",
"'r'",
")",
")",
"except",
"Exception",
":",
"return_dict",
"[",
"'error'",
"]",
"=",
"'Annotation file does not exist or is an invalid JSON file.'",
"exit",
"(",
"return_dict",
"[",
"'error'",
"]",
")",
"for",
"anno",
"in",
"annos",
":",
"annotations",
"[",
"'image_ids'",
"]",
".",
"add",
"(",
"anno",
"[",
"'image_id'",
"]",
")",
"annotations",
"[",
"'annos'",
"]",
"[",
"anno",
"[",
"'image_id'",
"]",
"]",
"=",
"dict",
"(",
")",
"annotations",
"[",
"'annos'",
"]",
"[",
"anno",
"[",
"'image_id'",
"]",
"]",
"[",
"'human_annos'",
"]",
"=",
"anno",
"[",
"'human_annotations'",
"]",
"annotations",
"[",
"'annos'",
"]",
"[",
"anno",
"[",
"'image_id'",
"]",
"]",
"[",
"'keypoint_annos'",
"]",
"=",
"anno",
"[",
"'keypoint_annotations'",
"]",
"return",
"annotations"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/keypoint_eval/keypoint_eval.py#L44-L65 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/keypoint_eval/keypoint_eval.py | python | load_predictions | (prediction_file, return_dict) | return predictions | Convert prediction JSON file. | Convert prediction JSON file. | [
"Convert",
"prediction",
"JSON",
"file",
"."
] | def load_predictions(prediction_file, return_dict):
"""Convert prediction JSON file."""
predictions = dict()
predictions['image_ids'] = []
predictions['annos'] = dict()
id_set = set([])
try:
preds = json.load(open(prediction_file, 'r'))
except Exception:
return_dict['error'] = 'Prediction file does not exist or is an invalid JSON file.'
exit(return_dict['error'])
for pred in preds:
if 'image_id' not in pred.keys():
return_dict['warning'].append('There is an invalid annotation info, \
likely missing key \'image_id\'.')
continue
if 'keypoint_annotations' not in pred.keys():
return_dict['warning'].append(pred['image_id']+\
' does not have key \'keypoint_annotations\'.')
continue
image_id = pred['image_id'].split('.')[0]
if image_id in id_set:
return_dict['warning'].append(pred['image_id']+\
' is duplicated in prediction JSON file.')
else:
id_set.add(image_id)
predictions['image_ids'].append(image_id)
predictions['annos'][pred['image_id']] = dict()
predictions['annos'][pred['image_id']]['keypoint_annos'] = pred['keypoint_annotations']
return predictions | [
"def",
"load_predictions",
"(",
"prediction_file",
",",
"return_dict",
")",
":",
"predictions",
"=",
"dict",
"(",
")",
"predictions",
"[",
"'image_ids'",
"]",
"=",
"[",
"]",
"predictions",
"[",
"'annos'",
"]",
"=",
"dict",
"(",
")",
"id_set",
"=",
"set",
"(",
"[",
"]",
")",
"try",
":",
"preds",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"prediction_file",
",",
"'r'",
")",
")",
"except",
"Exception",
":",
"return_dict",
"[",
"'error'",
"]",
"=",
"'Prediction file does not exist or is an invalid JSON file.'",
"exit",
"(",
"return_dict",
"[",
"'error'",
"]",
")",
"for",
"pred",
"in",
"preds",
":",
"if",
"'image_id'",
"not",
"in",
"pred",
".",
"keys",
"(",
")",
":",
"return_dict",
"[",
"'warning'",
"]",
".",
"append",
"(",
"'There is an invalid annotation info, \\\n likely missing key \\'image_id\\'.'",
")",
"continue",
"if",
"'keypoint_annotations'",
"not",
"in",
"pred",
".",
"keys",
"(",
")",
":",
"return_dict",
"[",
"'warning'",
"]",
".",
"append",
"(",
"pred",
"[",
"'image_id'",
"]",
"+",
"' does not have key \\'keypoint_annotations\\'.'",
")",
"continue",
"image_id",
"=",
"pred",
"[",
"'image_id'",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"if",
"image_id",
"in",
"id_set",
":",
"return_dict",
"[",
"'warning'",
"]",
".",
"append",
"(",
"pred",
"[",
"'image_id'",
"]",
"+",
"' is duplicated in prediction JSON file.'",
")",
"else",
":",
"id_set",
".",
"add",
"(",
"image_id",
")",
"predictions",
"[",
"'image_ids'",
"]",
".",
"append",
"(",
"image_id",
")",
"predictions",
"[",
"'annos'",
"]",
"[",
"pred",
"[",
"'image_id'",
"]",
"]",
"=",
"dict",
"(",
")",
"predictions",
"[",
"'annos'",
"]",
"[",
"pred",
"[",
"'image_id'",
"]",
"]",
"[",
"'keypoint_annos'",
"]",
"=",
"pred",
"[",
"'keypoint_annotations'",
"]",
"return",
"predictions"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/keypoint_eval/keypoint_eval.py#L68-L101 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/keypoint_eval/keypoint_eval.py | python | compute_oks | (anno, predict, delta) | return oks | Compute oks matrix (size gtN*pN). | Compute oks matrix (size gtN*pN). | [
"Compute",
"oks",
"matrix",
"(",
"size",
"gtN",
"*",
"pN",
")",
"."
] | def compute_oks(anno, predict, delta):
"""Compute oks matrix (size gtN*pN)."""
anno_count = len(anno['keypoint_annos'].keys())
predict_count = len(predict.keys())
oks = np.zeros((anno_count, predict_count))
if predict_count == 0:
return oks.T
# for every human keypoint annotation
for i in range(anno_count):
anno_key = anno['keypoint_annos'].keys()[i]
anno_keypoints = np.reshape(anno['keypoint_annos'][anno_key], (14, 3))
visible = anno_keypoints[:, 2] == 1
bbox = anno['human_annos'][anno_key]
scale = np.float32((bbox[3]-bbox[1])*(bbox[2]-bbox[0]))
if np.sum(visible) == 0:
for j in range(predict_count):
oks[i, j] = 0
else:
# for every predicted human
for j in range(predict_count):
predict_key = predict.keys()[j]
predict_keypoints = np.reshape(predict[predict_key], (14, 3))
dis = np.sum((anno_keypoints[visible, :2] \
- predict_keypoints[visible, :2])**2, axis=1)
oks[i, j] = np.mean(np.exp(-dis/2/delta[visible]**2/(scale+1)))
return oks | [
"def",
"compute_oks",
"(",
"anno",
",",
"predict",
",",
"delta",
")",
":",
"anno_count",
"=",
"len",
"(",
"anno",
"[",
"'keypoint_annos'",
"]",
".",
"keys",
"(",
")",
")",
"predict_count",
"=",
"len",
"(",
"predict",
".",
"keys",
"(",
")",
")",
"oks",
"=",
"np",
".",
"zeros",
"(",
"(",
"anno_count",
",",
"predict_count",
")",
")",
"if",
"predict_count",
"==",
"0",
":",
"return",
"oks",
".",
"T",
"# for every human keypoint annotation",
"for",
"i",
"in",
"range",
"(",
"anno_count",
")",
":",
"anno_key",
"=",
"anno",
"[",
"'keypoint_annos'",
"]",
".",
"keys",
"(",
")",
"[",
"i",
"]",
"anno_keypoints",
"=",
"np",
".",
"reshape",
"(",
"anno",
"[",
"'keypoint_annos'",
"]",
"[",
"anno_key",
"]",
",",
"(",
"14",
",",
"3",
")",
")",
"visible",
"=",
"anno_keypoints",
"[",
":",
",",
"2",
"]",
"==",
"1",
"bbox",
"=",
"anno",
"[",
"'human_annos'",
"]",
"[",
"anno_key",
"]",
"scale",
"=",
"np",
".",
"float32",
"(",
"(",
"bbox",
"[",
"3",
"]",
"-",
"bbox",
"[",
"1",
"]",
")",
"*",
"(",
"bbox",
"[",
"2",
"]",
"-",
"bbox",
"[",
"0",
"]",
")",
")",
"if",
"np",
".",
"sum",
"(",
"visible",
")",
"==",
"0",
":",
"for",
"j",
"in",
"range",
"(",
"predict_count",
")",
":",
"oks",
"[",
"i",
",",
"j",
"]",
"=",
"0",
"else",
":",
"# for every predicted human",
"for",
"j",
"in",
"range",
"(",
"predict_count",
")",
":",
"predict_key",
"=",
"predict",
".",
"keys",
"(",
")",
"[",
"j",
"]",
"predict_keypoints",
"=",
"np",
".",
"reshape",
"(",
"predict",
"[",
"predict_key",
"]",
",",
"(",
"14",
",",
"3",
")",
")",
"dis",
"=",
"np",
".",
"sum",
"(",
"(",
"anno_keypoints",
"[",
"visible",
",",
":",
"2",
"]",
"-",
"predict_keypoints",
"[",
"visible",
",",
":",
"2",
"]",
")",
"**",
"2",
",",
"axis",
"=",
"1",
")",
"oks",
"[",
"i",
",",
"j",
"]",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"exp",
"(",
"-",
"dis",
"/",
"2",
"/",
"delta",
"[",
"visible",
"]",
"**",
"2",
"/",
"(",
"scale",
"+",
"1",
")",
")",
")",
"return",
"oks"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/keypoint_eval/keypoint_eval.py#L104-L131 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/keypoint_eval/keypoint_eval.py | python | keypoint_eval | (predictions, annotations, return_dict) | return return_dict | Evaluate predicted_file and return mAP. | Evaluate predicted_file and return mAP. | [
"Evaluate",
"predicted_file",
"and",
"return",
"mAP",
"."
] | def keypoint_eval(predictions, annotations, return_dict):
"""Evaluate predicted_file and return mAP."""
oks_all = np.zeros((0))
oks_num = 0
# Construct set to speed up id searching.
prediction_id_set = set(predictions['image_ids'])
# for every annotation in our test/validation set
for image_id in annotations['image_ids']:
# if the image in the predictions, then compute oks
if image_id in prediction_id_set:
oks = compute_oks(anno=annotations['annos'][image_id], \
predict=predictions['annos'][image_id]['keypoint_annos'], \
delta=annotations['delta'])
# view pairs with max OKSs as match ones, add to oks_all
oks_all = np.concatenate((oks_all, np.max(oks, axis=1)), axis=0)
# accumulate total num by max(gtN,pN)
oks_num += np.max(oks.shape)
else:
# otherwise report warning
return_dict['warning'].append(image_id+' is not in the prediction JSON file.')
# number of humen in ground truth annotations
gt_n = len(annotations['annos'][image_id]['human_annos'].keys())
# fill 0 in oks scores
oks_all = np.concatenate((oks_all, np.zeros((gt_n))), axis=0)
# accumulate total num by ground truth number
oks_num += gt_n
# compute mAP by APs under different oks thresholds
average_precision = []
for threshold in np.linspace(0.5, 0.95, 10):
average_precision.append(np.sum(oks_all > threshold)/np.float32(oks_num))
return_dict['score'] = np.mean(average_precision)
return return_dict | [
"def",
"keypoint_eval",
"(",
"predictions",
",",
"annotations",
",",
"return_dict",
")",
":",
"oks_all",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
")",
")",
"oks_num",
"=",
"0",
"# Construct set to speed up id searching.",
"prediction_id_set",
"=",
"set",
"(",
"predictions",
"[",
"'image_ids'",
"]",
")",
"# for every annotation in our test/validation set",
"for",
"image_id",
"in",
"annotations",
"[",
"'image_ids'",
"]",
":",
"# if the image in the predictions, then compute oks",
"if",
"image_id",
"in",
"prediction_id_set",
":",
"oks",
"=",
"compute_oks",
"(",
"anno",
"=",
"annotations",
"[",
"'annos'",
"]",
"[",
"image_id",
"]",
",",
"predict",
"=",
"predictions",
"[",
"'annos'",
"]",
"[",
"image_id",
"]",
"[",
"'keypoint_annos'",
"]",
",",
"delta",
"=",
"annotations",
"[",
"'delta'",
"]",
")",
"# view pairs with max OKSs as match ones, add to oks_all",
"oks_all",
"=",
"np",
".",
"concatenate",
"(",
"(",
"oks_all",
",",
"np",
".",
"max",
"(",
"oks",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"0",
")",
"# accumulate total num by max(gtN,pN)",
"oks_num",
"+=",
"np",
".",
"max",
"(",
"oks",
".",
"shape",
")",
"else",
":",
"# otherwise report warning",
"return_dict",
"[",
"'warning'",
"]",
".",
"append",
"(",
"image_id",
"+",
"' is not in the prediction JSON file.'",
")",
"# number of humen in ground truth annotations",
"gt_n",
"=",
"len",
"(",
"annotations",
"[",
"'annos'",
"]",
"[",
"image_id",
"]",
"[",
"'human_annos'",
"]",
".",
"keys",
"(",
")",
")",
"# fill 0 in oks scores",
"oks_all",
"=",
"np",
".",
"concatenate",
"(",
"(",
"oks_all",
",",
"np",
".",
"zeros",
"(",
"(",
"gt_n",
")",
")",
")",
",",
"axis",
"=",
"0",
")",
"# accumulate total num by ground truth number",
"oks_num",
"+=",
"gt_n",
"# compute mAP by APs under different oks thresholds",
"average_precision",
"=",
"[",
"]",
"for",
"threshold",
"in",
"np",
".",
"linspace",
"(",
"0.5",
",",
"0.95",
",",
"10",
")",
":",
"average_precision",
".",
"append",
"(",
"np",
".",
"sum",
"(",
"oks_all",
">",
"threshold",
")",
"/",
"np",
".",
"float32",
"(",
"oks_num",
")",
")",
"return_dict",
"[",
"'score'",
"]",
"=",
"np",
".",
"mean",
"(",
"average_precision",
")",
"return",
"return_dict"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/keypoint_eval/keypoint_eval.py#L134-L170 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/keypoint_eval/keypoint_eval.py | python | main | () | The evaluator. | The evaluator. | [
"The",
"evaluator",
"."
] | def main():
"""The evaluator."""
# Arguments parser
parser = argparse.ArgumentParser()
parser.add_argument('--submit', help='prediction json file', type=str,
default='keypoint_predictions_example.json')
parser.add_argument('--ref', help='annotation json file', type=str,
default='keypoint_annotations_example.json')
args = parser.parse_args()
# Initialize return_dict
return_dict = dict()
return_dict['error'] = None
return_dict['warning'] = []
return_dict['score'] = None
# Load annotation JSON file
start_time = time.time()
annotations = load_annotations(anno_file=args.ref,
return_dict=return_dict)
print 'Complete reading annotation JSON file in %.2f seconds.' %(time.time() - start_time)
# Load prediction JSON file
start_time = time.time()
predictions = load_predictions(prediction_file=args.submit,
return_dict=return_dict)
print 'Complete reading prediction JSON file in %.2f seconds.' %(time.time() - start_time)
# Keypoint evaluation
start_time = time.time()
return_dict = keypoint_eval(predictions=predictions,
annotations=annotations,
return_dict=return_dict)
print 'Complete evaluation in %.2f seconds.' %(time.time() - start_time)
# Print return_dict and final score
pprint.pprint(return_dict)
print 'Score: ', '%.8f' % return_dict['score'] | [
"def",
"main",
"(",
")",
":",
"# Arguments parser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--submit'",
",",
"help",
"=",
"'prediction json file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'keypoint_predictions_example.json'",
")",
"parser",
".",
"add_argument",
"(",
"'--ref'",
",",
"help",
"=",
"'annotation json file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'keypoint_annotations_example.json'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Initialize return_dict",
"return_dict",
"=",
"dict",
"(",
")",
"return_dict",
"[",
"'error'",
"]",
"=",
"None",
"return_dict",
"[",
"'warning'",
"]",
"=",
"[",
"]",
"return_dict",
"[",
"'score'",
"]",
"=",
"None",
"# Load annotation JSON file",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"annotations",
"=",
"load_annotations",
"(",
"anno_file",
"=",
"args",
".",
"ref",
",",
"return_dict",
"=",
"return_dict",
")",
"print",
"'Complete reading annotation JSON file in %.2f seconds.'",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
"# Load prediction JSON file",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"predictions",
"=",
"load_predictions",
"(",
"prediction_file",
"=",
"args",
".",
"submit",
",",
"return_dict",
"=",
"return_dict",
")",
"print",
"'Complete reading prediction JSON file in %.2f seconds.'",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
"# Keypoint evaluation",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"return_dict",
"=",
"keypoint_eval",
"(",
"predictions",
"=",
"predictions",
",",
"annotations",
"=",
"annotations",
",",
"return_dict",
"=",
"return_dict",
")",
"print",
"'Complete evaluation in %.2f seconds.'",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
"# Print return_dict and final score",
"pprint",
".",
"pprint",
"(",
"return_dict",
")",
"print",
"'Score: '",
",",
"'%.8f'",
"%",
"return_dict",
"[",
"'score'",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/keypoint_eval/keypoint_eval.py#L173-L211 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/run_evaluations.py | python | compute_m1 | (json_predictions_file, reference_file) | return m1_score | Compute m1_score | Compute m1_score | [
"Compute",
"m1_score"
] | def compute_m1(json_predictions_file, reference_file):
"""Compute m1_score"""
m1_score = {}
m1_score['error'] = 0
try:
coco = COCO(reference_file)
coco_res = coco.loadRes(json_predictions_file)
# create coco_eval object.
coco_eval = COCOEvalCap(coco, coco_res)
# evaluate results
coco_eval.evaluate()
except Exception:
m1_score['error'] = 1
else:
# print output evaluation scores
for metric, score in coco_eval.eval.items():
print '%s: %.3f'%(metric, score)
m1_score[metric] = score
return m1_score | [
"def",
"compute_m1",
"(",
"json_predictions_file",
",",
"reference_file",
")",
":",
"m1_score",
"=",
"{",
"}",
"m1_score",
"[",
"'error'",
"]",
"=",
"0",
"try",
":",
"coco",
"=",
"COCO",
"(",
"reference_file",
")",
"coco_res",
"=",
"coco",
".",
"loadRes",
"(",
"json_predictions_file",
")",
"# create coco_eval object.",
"coco_eval",
"=",
"COCOEvalCap",
"(",
"coco",
",",
"coco_res",
")",
"# evaluate results",
"coco_eval",
".",
"evaluate",
"(",
")",
"except",
"Exception",
":",
"m1_score",
"[",
"'error'",
"]",
"=",
"1",
"else",
":",
"# print output evaluation scores",
"for",
"metric",
",",
"score",
"in",
"coco_eval",
".",
"eval",
".",
"items",
"(",
")",
":",
"print",
"'%s: %.3f'",
"%",
"(",
"metric",
",",
"score",
")",
"m1_score",
"[",
"metric",
"]",
"=",
"score",
"return",
"m1_score"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/run_evaluations.py#L29-L49 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/run_evaluations.py | python | main | () | The evaluator. | The evaluator. | [
"The",
"evaluator",
"."
] | def main():
"""The evaluator."""
parser = argparse.ArgumentParser()
parser.add_argument("-submit", "--submit", type=str, required=True,
help=' JSON containing submit sentences.')
parser.add_argument("-ref", "--ref", type=str,
help=' JSON references.')
args = parser.parse_args()
json_predictions_file = args.submit
reference_file = args.ref
print compute_m1(json_predictions_file, reference_file) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"-submit\"",
",",
"\"--submit\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"' JSON containing submit sentences.'",
")",
"parser",
".",
"add_argument",
"(",
"\"-ref\"",
",",
"\"--ref\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"' JSON references.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"json_predictions_file",
"=",
"args",
".",
"submit",
"reference_file",
"=",
"args",
".",
"ref",
"print",
"compute_m1",
"(",
"json_predictions_file",
",",
"reference_file",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/run_evaluations.py#L52-L63 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | precook | (s, n=4, out=False) | return counts | Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams | Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams | [
"Takes",
"a",
"string",
"as",
"input",
"and",
"returns",
"an",
"object",
"that",
"can",
"be",
"given",
"to",
"either",
"cook_refs",
"or",
"cook_test",
".",
"This",
"is",
"optional",
":",
"cook_refs",
"and",
"cook_test",
"can",
"take",
"string",
"arguments",
"as",
"well",
".",
":",
"param",
"s",
":",
"string",
":",
"sentence",
"to",
"be",
"converted",
"into",
"ngrams",
":",
"param",
"n",
":",
"int",
":",
"number",
"of",
"ngrams",
"for",
"which",
"representation",
"is",
"calculated",
":",
"return",
":",
"term",
"frequency",
"vector",
"for",
"occuring",
"ngrams"
] | def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in xrange(1,n+1):
for i in xrange(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts | [
"def",
"precook",
"(",
"s",
",",
"n",
"=",
"4",
",",
"out",
"=",
"False",
")",
":",
"words",
"=",
"s",
".",
"split",
"(",
")",
"counts",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"k",
"in",
"xrange",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"words",
")",
"-",
"k",
"+",
"1",
")",
":",
"ngram",
"=",
"tuple",
"(",
"words",
"[",
"i",
":",
"i",
"+",
"k",
"]",
")",
"counts",
"[",
"ngram",
"]",
"+=",
"1",
"return",
"counts"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L11-L26 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | cook_refs | (refs, n=4) | return [precook(ref, n) for ref in refs] | Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict) | Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict) | [
"Takes",
"a",
"list",
"of",
"reference",
"sentences",
"for",
"a",
"single",
"segment",
"and",
"returns",
"an",
"object",
"that",
"encapsulates",
"everything",
"that",
"BLEU",
"needs",
"to",
"know",
"about",
"them",
".",
":",
"param",
"refs",
":",
"list",
"of",
"string",
":",
"reference",
"sentences",
"for",
"some",
"image",
":",
"param",
"n",
":",
"int",
":",
"number",
"of",
"ngrams",
"for",
"which",
"(",
"ngram",
")",
"representation",
"is",
"calculated",
":",
"return",
":",
"result",
"(",
"list",
"of",
"dict",
")"
] | def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs] | [
"def",
"cook_refs",
"(",
"refs",
",",
"n",
"=",
"4",
")",
":",
"## lhuang: oracle will call with \"average\"",
"return",
"[",
"precook",
"(",
"ref",
",",
"n",
")",
"for",
"ref",
"in",
"refs",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L28-L36 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | cook_test | (test, n=4) | return precook(test, n, True) | Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict) | Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict) | [
"Takes",
"a",
"test",
"sentence",
"and",
"returns",
"an",
"object",
"that",
"encapsulates",
"everything",
"that",
"BLEU",
"needs",
"to",
"know",
"about",
"it",
".",
":",
"param",
"test",
":",
"list",
"of",
"string",
":",
"hypothesis",
"sentence",
"for",
"some",
"image",
":",
"param",
"n",
":",
"int",
":",
"number",
"of",
"ngrams",
"for",
"which",
"(",
"ngram",
")",
"representation",
"is",
"calculated",
":",
"return",
":",
"result",
"(",
"dict",
")"
] | def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True) | [
"def",
"cook_test",
"(",
"test",
",",
"n",
"=",
"4",
")",
":",
"return",
"precook",
"(",
"test",
",",
"n",
",",
"True",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L38-L45 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | CiderScorer.copy | (self) | return new | copy the refs. | copy the refs. | [
"copy",
"the",
"refs",
"."
] | def copy(self):
''' copy the refs.'''
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new | [
"def",
"copy",
"(",
"self",
")",
":",
"new",
"=",
"CiderScorer",
"(",
"n",
"=",
"self",
".",
"n",
")",
"new",
".",
"ctest",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"ctest",
")",
"new",
".",
"crefs",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"crefs",
")",
"return",
"new"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L51-L56 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | CiderScorer.__init__ | (self, test=None, refs=None, n=4, sigma=6.0) | singular instance | singular instance | [
"singular",
"instance"
] | def __init__(self, test=None, refs=None, n=4, sigma=6.0):
''' singular instance '''
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.document_frequency = defaultdict(float)
self.cook_append(test, refs)
self.ref_len = None | [
"def",
"__init__",
"(",
"self",
",",
"test",
"=",
"None",
",",
"refs",
"=",
"None",
",",
"n",
"=",
"4",
",",
"sigma",
"=",
"6.0",
")",
":",
"self",
".",
"n",
"=",
"n",
"self",
".",
"sigma",
"=",
"sigma",
"self",
".",
"crefs",
"=",
"[",
"]",
"self",
".",
"ctest",
"=",
"[",
"]",
"self",
".",
"document_frequency",
"=",
"defaultdict",
"(",
"float",
")",
"self",
".",
"cook_append",
"(",
"test",
",",
"refs",
")",
"self",
".",
"ref_len",
"=",
"None"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L58-L66 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | CiderScorer.cook_append | (self, test, refs) | called by constructor and __iadd__ to avoid creating new instances. | called by constructor and __iadd__ to avoid creating new instances. | [
"called",
"by",
"constructor",
"and",
"__iadd__",
"to",
"avoid",
"creating",
"new",
"instances",
"."
] | def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
self.ctest.append(cook_test(test)) ## N.B.: -1
else:
self.ctest.append(None) | [
"def",
"cook_append",
"(",
"self",
",",
"test",
",",
"refs",
")",
":",
"if",
"refs",
"is",
"not",
"None",
":",
"self",
".",
"crefs",
".",
"append",
"(",
"cook_refs",
"(",
"refs",
")",
")",
"if",
"test",
"is",
"not",
"None",
":",
"self",
".",
"ctest",
".",
"append",
"(",
"cook_test",
"(",
"test",
")",
")",
"## N.B.: -1",
"else",
":",
"self",
".",
"ctest",
".",
"append",
"(",
"None",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L68-L76 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | CiderScorer.__iadd__ | (self, other) | return self | add an instance (e.g., from another sentence). | add an instance (e.g., from another sentence). | [
"add",
"an",
"instance",
"(",
"e",
".",
"g",
".",
"from",
"another",
"sentence",
")",
"."
] | def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new CiderScorer instances
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self | [
"def",
"__iadd__",
"(",
"self",
",",
"other",
")",
":",
"if",
"type",
"(",
"other",
")",
"is",
"tuple",
":",
"## avoid creating new CiderScorer instances",
"self",
".",
"cook_append",
"(",
"other",
"[",
"0",
"]",
",",
"other",
"[",
"1",
"]",
")",
"else",
":",
"self",
".",
"ctest",
".",
"extend",
"(",
"other",
".",
"ctest",
")",
"self",
".",
"crefs",
".",
"extend",
"(",
"other",
".",
"crefs",
")",
"return",
"self"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L82-L92 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py | python | CiderScorer.compute_doc_freq | (self) | Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None | Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None | [
"Compute",
"term",
"frequency",
"for",
"reference",
"data",
".",
"This",
"will",
"be",
"used",
"to",
"compute",
"idf",
"(",
"inverse",
"document",
"frequency",
"later",
")",
"The",
"term",
"frequency",
"is",
"stored",
"in",
"the",
"object",
":",
"return",
":",
"None"
] | def compute_doc_freq(self):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
'''
for refs in self.crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.iteritems()]):
self.document_frequency[ngram] += 1 | [
"def",
"compute_doc_freq",
"(",
"self",
")",
":",
"for",
"refs",
"in",
"self",
".",
"crefs",
":",
"# refs, k ref captions of one image",
"for",
"ngram",
"in",
"set",
"(",
"[",
"ngram",
"for",
"ref",
"in",
"refs",
"for",
"(",
"ngram",
",",
"count",
")",
"in",
"ref",
".",
"iteritems",
"(",
")",
"]",
")",
":",
"self",
".",
"document_frequency",
"[",
"ngram",
"]",
"+=",
"1"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider_scorer.py#L93-L103 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider.py | python | Cider.compute_score | (self, gts, res) | return score, scores | Main function to compute CIDEr score
:param hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
ref_for_image (dict) : dictionary with key <image> and value <tokenized reference sentence>
:return: cider (float) : computed CIDEr score for the corpus | Main function to compute CIDEr score
:param hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
ref_for_image (dict) : dictionary with key <image> and value <tokenized reference sentence>
:return: cider (float) : computed CIDEr score for the corpus | [
"Main",
"function",
"to",
"compute",
"CIDEr",
"score",
":",
"param",
"hypo_for_image",
"(",
"dict",
")",
":",
"dictionary",
"with",
"key",
"<image",
">",
"and",
"value",
"<tokenized",
"hypothesis",
"/",
"candidate",
"sentence",
">",
"ref_for_image",
"(",
"dict",
")",
":",
"dictionary",
"with",
"key",
"<image",
">",
"and",
"value",
"<tokenized",
"reference",
"sentence",
">",
":",
"return",
":",
"cider",
"(",
"float",
")",
":",
"computed",
"CIDEr",
"score",
"for",
"the",
"corpus"
] | def compute_score(self, gts, res):
"""
Main function to compute CIDEr score
:param hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
ref_for_image (dict) : dictionary with key <image> and value <tokenized reference sentence>
:return: cider (float) : computed CIDEr score for the corpus
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
cider_scorer += (hypo[0], ref)
(score, scores) = cider_scorer.compute_score()
return score, scores | [
"def",
"compute_score",
"(",
"self",
",",
"gts",
",",
"res",
")",
":",
"assert",
"(",
"gts",
".",
"keys",
"(",
")",
"==",
"res",
".",
"keys",
"(",
")",
")",
"imgIds",
"=",
"gts",
".",
"keys",
"(",
")",
"cider_scorer",
"=",
"CiderScorer",
"(",
"n",
"=",
"self",
".",
"_n",
",",
"sigma",
"=",
"self",
".",
"_sigma",
")",
"for",
"id",
"in",
"imgIds",
":",
"hypo",
"=",
"res",
"[",
"id",
"]",
"ref",
"=",
"gts",
"[",
"id",
"]",
"# Sanity check.",
"assert",
"(",
"type",
"(",
"hypo",
")",
"is",
"list",
")",
"assert",
"(",
"len",
"(",
"hypo",
")",
"==",
"1",
")",
"assert",
"(",
"type",
"(",
"ref",
")",
"is",
"list",
")",
"assert",
"(",
"len",
"(",
"ref",
")",
">",
"0",
")",
"cider_scorer",
"+=",
"(",
"hypo",
"[",
"0",
"]",
",",
"ref",
")",
"(",
"score",
",",
"scores",
")",
"=",
"cider_scorer",
".",
"compute_score",
"(",
")",
"return",
"score",
",",
"scores"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/cider/cider.py#L24-L51 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | precook | (s, n=4, out=False) | return (len(words), counts) | Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well. | Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well. | [
"Takes",
"a",
"string",
"as",
"input",
"and",
"returns",
"an",
"object",
"that",
"can",
"be",
"given",
"to",
"either",
"cook_refs",
"or",
"cook_test",
".",
"This",
"is",
"optional",
":",
"cook_refs",
"and",
"cook_test",
"can",
"take",
"string",
"arguments",
"as",
"well",
"."
] | def precook(s, n=4, out=False):
"""Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well."""
words = s.split()
counts = defaultdict(int)
for k in xrange(1,n+1):
for i in xrange(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return (len(words), counts) | [
"def",
"precook",
"(",
"s",
",",
"n",
"=",
"4",
",",
"out",
"=",
"False",
")",
":",
"words",
"=",
"s",
".",
"split",
"(",
")",
"counts",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"k",
"in",
"xrange",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"words",
")",
"-",
"k",
"+",
"1",
")",
":",
"ngram",
"=",
"tuple",
"(",
"words",
"[",
"i",
":",
"i",
"+",
"k",
"]",
")",
"counts",
"[",
"ngram",
"]",
"+=",
"1",
"return",
"(",
"len",
"(",
"words",
")",
",",
"counts",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L23-L33 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | cook_refs | (refs, eff=None, n=4) | return (reflen, maxcounts) | Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them. | Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them. | [
"Takes",
"a",
"list",
"of",
"reference",
"sentences",
"for",
"a",
"single",
"segment",
"and",
"returns",
"an",
"object",
"that",
"encapsulates",
"everything",
"that",
"BLEU",
"needs",
"to",
"know",
"about",
"them",
"."
] | def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
reflen = []
maxcounts = {}
for ref in refs:
rl, counts = precook(ref, n)
reflen.append(rl)
for (ngram,count) in counts.iteritems():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
# Calculate effective reference sentence length.
if eff == "shortest":
reflen = min(reflen)
elif eff == "average":
reflen = float(sum(reflen))/len(reflen)
## lhuang: N.B.: leave reflen computaiton to the very end!!
## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design)
return (reflen, maxcounts) | [
"def",
"cook_refs",
"(",
"refs",
",",
"eff",
"=",
"None",
",",
"n",
"=",
"4",
")",
":",
"## lhuang: oracle will call with \"average\"",
"reflen",
"=",
"[",
"]",
"maxcounts",
"=",
"{",
"}",
"for",
"ref",
"in",
"refs",
":",
"rl",
",",
"counts",
"=",
"precook",
"(",
"ref",
",",
"n",
")",
"reflen",
".",
"append",
"(",
"rl",
")",
"for",
"(",
"ngram",
",",
"count",
")",
"in",
"counts",
".",
"iteritems",
"(",
")",
":",
"maxcounts",
"[",
"ngram",
"]",
"=",
"max",
"(",
"maxcounts",
".",
"get",
"(",
"ngram",
",",
"0",
")",
",",
"count",
")",
"# Calculate effective reference sentence length.",
"if",
"eff",
"==",
"\"shortest\"",
":",
"reflen",
"=",
"min",
"(",
"reflen",
")",
"elif",
"eff",
"==",
"\"average\"",
":",
"reflen",
"=",
"float",
"(",
"sum",
"(",
"reflen",
")",
")",
"/",
"len",
"(",
"reflen",
")",
"## lhuang: N.B.: leave reflen computaiton to the very end!!",
"## lhuang: N.B.: in case of \"closest\", keep a list of reflens!! (bad design)",
"return",
"(",
"reflen",
",",
"maxcounts",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L35-L58 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | cook_test | (test, (reflen, refmaxcounts), eff=None, n=4) | return result | Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it. | Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it. | [
"Takes",
"a",
"test",
"sentence",
"and",
"returns",
"an",
"object",
"that",
"encapsulates",
"everything",
"that",
"BLEU",
"needs",
"to",
"know",
"about",
"it",
"."
] | def cook_test(test, (reflen, refmaxcounts), eff=None, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
testlen, counts = precook(test, n, True)
result = {}
# Calculate effective reference sentence length.
if eff == "closest":
result["reflen"] = min((abs(l-testlen), l) for l in reflen)[1]
else: ## i.e., "average" or "shortest" or None
result["reflen"] = reflen
result["testlen"] = testlen
result["guess"] = [max(0,testlen-k+1) for k in xrange(1,n+1)]
result['correct'] = [0]*n
for (ngram, count) in counts.iteritems():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result | [
"def",
"cook_test",
"(",
"test",
",",
"(",
"reflen",
",",
"refmaxcounts",
")",
",",
"eff",
"=",
"None",
",",
"n",
"=",
"4",
")",
":",
"testlen",
",",
"counts",
"=",
"precook",
"(",
"test",
",",
"n",
",",
"True",
")",
"result",
"=",
"{",
"}",
"# Calculate effective reference sentence length.",
"if",
"eff",
"==",
"\"closest\"",
":",
"result",
"[",
"\"reflen\"",
"]",
"=",
"min",
"(",
"(",
"abs",
"(",
"l",
"-",
"testlen",
")",
",",
"l",
")",
"for",
"l",
"in",
"reflen",
")",
"[",
"1",
"]",
"else",
":",
"## i.e., \"average\" or \"shortest\" or None",
"result",
"[",
"\"reflen\"",
"]",
"=",
"reflen",
"result",
"[",
"\"testlen\"",
"]",
"=",
"testlen",
"result",
"[",
"\"guess\"",
"]",
"=",
"[",
"max",
"(",
"0",
",",
"testlen",
"-",
"k",
"+",
"1",
")",
"for",
"k",
"in",
"xrange",
"(",
"1",
",",
"n",
"+",
"1",
")",
"]",
"result",
"[",
"'correct'",
"]",
"=",
"[",
"0",
"]",
"*",
"n",
"for",
"(",
"ngram",
",",
"count",
")",
"in",
"counts",
".",
"iteritems",
"(",
")",
":",
"result",
"[",
"\"correct\"",
"]",
"[",
"len",
"(",
"ngram",
")",
"-",
"1",
"]",
"+=",
"min",
"(",
"refmaxcounts",
".",
"get",
"(",
"ngram",
",",
"0",
")",
",",
"count",
")",
"return",
"result"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L60-L83 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | BleuScorer.copy | (self) | return new | copy the refs. | copy the refs. | [
"copy",
"the",
"refs",
"."
] | def copy(self):
''' copy the refs.'''
new = BleuScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
new._score = None
return new | [
"def",
"copy",
"(",
"self",
")",
":",
"new",
"=",
"BleuScorer",
"(",
"n",
"=",
"self",
".",
"n",
")",
"new",
".",
"ctest",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"ctest",
")",
"new",
".",
"crefs",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"crefs",
")",
"new",
".",
"_score",
"=",
"None",
"return",
"new"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L92-L98 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | BleuScorer.__init__ | (self, test=None, refs=None, n=4, special_reflen=None) | singular instance | singular instance | [
"singular",
"instance"
] | def __init__(self, test=None, refs=None, n=4, special_reflen=None):
''' singular instance '''
self.n = n
self.crefs = []
self.ctest = []
self.cook_append(test, refs)
self.special_reflen = special_reflen | [
"def",
"__init__",
"(",
"self",
",",
"test",
"=",
"None",
",",
"refs",
"=",
"None",
",",
"n",
"=",
"4",
",",
"special_reflen",
"=",
"None",
")",
":",
"self",
".",
"n",
"=",
"n",
"self",
".",
"crefs",
"=",
"[",
"]",
"self",
".",
"ctest",
"=",
"[",
"]",
"self",
".",
"cook_append",
"(",
"test",
",",
"refs",
")",
"self",
".",
"special_reflen",
"=",
"special_reflen"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L100-L107 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | BleuScorer.cook_append | (self, test, refs) | called by constructor and __iadd__ to avoid creating new instances. | called by constructor and __iadd__ to avoid creating new instances. | [
"called",
"by",
"constructor",
"and",
"__iadd__",
"to",
"avoid",
"creating",
"new",
"instances",
"."
] | def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
cooked_test = cook_test(test, self.crefs[-1])
self.ctest.append(cooked_test) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
self._score = None | [
"def",
"cook_append",
"(",
"self",
",",
"test",
",",
"refs",
")",
":",
"if",
"refs",
"is",
"not",
"None",
":",
"self",
".",
"crefs",
".",
"append",
"(",
"cook_refs",
"(",
"refs",
")",
")",
"if",
"test",
"is",
"not",
"None",
":",
"cooked_test",
"=",
"cook_test",
"(",
"test",
",",
"self",
".",
"crefs",
"[",
"-",
"1",
"]",
")",
"self",
".",
"ctest",
".",
"append",
"(",
"cooked_test",
")",
"## N.B.: -1",
"else",
":",
"self",
".",
"ctest",
".",
"append",
"(",
"None",
")",
"# lens of crefs and ctest have to match",
"self",
".",
"_score",
"=",
"None"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L109-L120 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | BleuScorer.score_ratio | (self, option=None) | return (self.fscore(option=option), self.ratio(option=option)) | return (bleu, len_ratio) pair | return (bleu, len_ratio) pair | [
"return",
"(",
"bleu",
"len_ratio",
")",
"pair"
] | def score_ratio(self, option=None):
'''return (bleu, len_ratio) pair'''
return (self.fscore(option=option), self.ratio(option=option)) | [
"def",
"score_ratio",
"(",
"self",
",",
"option",
"=",
"None",
")",
":",
"return",
"(",
"self",
".",
"fscore",
"(",
"option",
"=",
"option",
")",
",",
"self",
".",
"ratio",
"(",
"option",
"=",
"option",
")",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L126-L128 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | BleuScorer.rescore | (self, new_test) | return self.retest(new_test).compute_score() | replace test(s) with new test(s), and returns the new score. | replace test(s) with new test(s), and returns the new score. | [
"replace",
"test",
"(",
"s",
")",
"with",
"new",
"test",
"(",
"s",
")",
"and",
"returns",
"the",
"new",
"score",
"."
] | def rescore(self, new_test):
''' replace test(s) with new test(s), and returns the new score.'''
return self.retest(new_test).compute_score() | [
"def",
"rescore",
"(",
"self",
",",
"new_test",
")",
":",
"return",
"self",
".",
"retest",
"(",
"new_test",
")",
".",
"compute_score",
"(",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L152-L155 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py | python | BleuScorer.__iadd__ | (self, other) | return self | add an instance (e.g., from another sentence). | add an instance (e.g., from another sentence). | [
"add",
"an",
"instance",
"(",
"e",
".",
"g",
".",
"from",
"another",
"sentence",
")",
"."
] | def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new BleuScorer instances
self.cook_append(other[0], other[1])
else:
assert self.compatible(other), "incompatible BLEUs."
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
self._score = None ## need to recompute
return self | [
"def",
"__iadd__",
"(",
"self",
",",
"other",
")",
":",
"if",
"type",
"(",
"other",
")",
"is",
"tuple",
":",
"## avoid creating new BleuScorer instances",
"self",
".",
"cook_append",
"(",
"other",
"[",
"0",
"]",
",",
"other",
"[",
"1",
"]",
")",
"else",
":",
"assert",
"self",
".",
"compatible",
"(",
"other",
")",
",",
"\"incompatible BLEUs.\"",
"self",
".",
"ctest",
".",
"extend",
"(",
"other",
".",
"ctest",
")",
"self",
".",
"crefs",
".",
"extend",
"(",
"other",
".",
"crefs",
")",
"self",
".",
"_score",
"=",
"None",
"## need to recompute",
"return",
"self"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/bleu/bleu_scorer.py#L161-L173 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/rouge/rouge.py | python | my_lcs | (string, sub) | return lengths[len(string)][len(sub)] | Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS | Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings | [
"Calculates",
"longest",
"common",
"subsequence",
"for",
"a",
"pair",
"of",
"tokenized",
"strings",
":",
"param",
"string",
":",
"list",
"of",
"str",
":",
"tokens",
"from",
"a",
"string",
"split",
"using",
"whitespace",
":",
"param",
"sub",
":",
"list",
"of",
"str",
":",
"shorter",
"string",
"also",
"split",
"using",
"whitespace",
":",
"returns",
":",
"length",
"(",
"list",
"of",
"int",
")",
":",
"length",
"of",
"the",
"longest",
"common",
"subsequence",
"between",
"the",
"two",
"strings"
] | def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)] | [
"def",
"my_lcs",
"(",
"string",
",",
"sub",
")",
":",
"if",
"(",
"len",
"(",
"string",
")",
"<",
"len",
"(",
"sub",
")",
")",
":",
"sub",
",",
"string",
"=",
"string",
",",
"sub",
"lengths",
"=",
"[",
"[",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sub",
")",
"+",
"1",
")",
"]",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"string",
")",
"+",
"1",
")",
"]",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"sub",
")",
"+",
"1",
")",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"string",
")",
"+",
"1",
")",
":",
"if",
"(",
"string",
"[",
"i",
"-",
"1",
"]",
"==",
"sub",
"[",
"j",
"-",
"1",
"]",
")",
":",
"lengths",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"lengths",
"[",
"i",
"-",
"1",
"]",
"[",
"j",
"-",
"1",
"]",
"+",
"1",
"else",
":",
"lengths",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"max",
"(",
"lengths",
"[",
"i",
"-",
"1",
"]",
"[",
"j",
"]",
",",
"lengths",
"[",
"i",
"]",
"[",
"j",
"-",
"1",
"]",
")",
"return",
"lengths",
"[",
"len",
"(",
"string",
")",
"]",
"[",
"len",
"(",
"sub",
")",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/rouge/rouge.py#L13-L34 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/rouge/rouge.py | python | Rouge.calc_score | (self, candidate, refs) | return score | Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references) | Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references) | [
"Compute",
"ROUGE",
"-",
"L",
"score",
"given",
"one",
"candidate",
"and",
"references",
"for",
"an",
"image",
":",
"param",
"candidate",
":",
"str",
":",
"candidate",
"sentence",
"to",
"be",
"evaluated",
":",
"param",
"refs",
":",
"list",
"of",
"str",
":",
"COCO",
"reference",
"sentences",
"for",
"the",
"particular",
"image",
"to",
"be",
"evaluated",
":",
"returns",
"score",
":",
"int",
"(",
"ROUGE",
"-",
"L",
"score",
"for",
"the",
"candidate",
"evaluated",
"against",
"references",
")"
] | def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score | [
"def",
"calc_score",
"(",
"self",
",",
"candidate",
",",
"refs",
")",
":",
"assert",
"(",
"len",
"(",
"candidate",
")",
"==",
"1",
")",
"assert",
"(",
"len",
"(",
"refs",
")",
">",
"0",
")",
"prec",
"=",
"[",
"]",
"rec",
"=",
"[",
"]",
"# split into tokens",
"token_c",
"=",
"candidate",
"[",
"0",
"]",
".",
"split",
"(",
"\" \"",
")",
"for",
"reference",
"in",
"refs",
":",
"# split into tokens",
"token_r",
"=",
"reference",
".",
"split",
"(",
"\" \"",
")",
"# compute the longest common subsequence",
"lcs",
"=",
"my_lcs",
"(",
"token_r",
",",
"token_c",
")",
"prec",
".",
"append",
"(",
"lcs",
"/",
"float",
"(",
"len",
"(",
"token_c",
")",
")",
")",
"rec",
".",
"append",
"(",
"lcs",
"/",
"float",
"(",
"len",
"(",
"token_r",
")",
")",
")",
"prec_max",
"=",
"max",
"(",
"prec",
")",
"rec_max",
"=",
"max",
"(",
"rec",
")",
"if",
"(",
"prec_max",
"!=",
"0",
"and",
"rec_max",
"!=",
"0",
")",
":",
"score",
"=",
"(",
"(",
"1",
"+",
"self",
".",
"beta",
"**",
"2",
")",
"*",
"prec_max",
"*",
"rec_max",
")",
"/",
"float",
"(",
"rec_max",
"+",
"self",
".",
"beta",
"**",
"2",
"*",
"prec_max",
")",
"else",
":",
"score",
"=",
"0.0",
"return",
"score"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/rouge/rouge.py#L45-L75 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxevalcap/rouge/rouge.py | python | Rouge.compute_score | (self, gts, res) | return average_score, np.array(score) | Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images) | Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images) | [
"Computes",
"Rouge",
"-",
"L",
"score",
"given",
"a",
"set",
"of",
"reference",
"and",
"candidate",
"sentences",
"for",
"the",
"dataset",
"Invoked",
"by",
"evaluate_captions",
".",
"py",
":",
"param",
"hypo_for_image",
":",
"dict",
":",
"candidate",
"/",
"test",
"sentences",
"with",
"image",
"name",
"key",
"and",
"tokenized",
"sentences",
"as",
"values",
":",
"param",
"ref_for_image",
":",
"dict",
":",
"reference",
"MS",
"-",
"COCO",
"sentences",
"with",
"image",
"name",
"key",
"and",
"tokenized",
"sentences",
"as",
"values",
":",
"returns",
":",
"average_score",
":",
"float",
"(",
"mean",
"ROUGE",
"-",
"L",
"score",
"computed",
"by",
"averaging",
"scores",
"for",
"all",
"the",
"images",
")"
] | def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score) | [
"def",
"compute_score",
"(",
"self",
",",
"gts",
",",
"res",
")",
":",
"assert",
"(",
"gts",
".",
"keys",
"(",
")",
"==",
"res",
".",
"keys",
"(",
")",
")",
"imgIds",
"=",
"gts",
".",
"keys",
"(",
")",
"score",
"=",
"[",
"]",
"for",
"id",
"in",
"imgIds",
":",
"hypo",
"=",
"res",
"[",
"id",
"]",
"ref",
"=",
"gts",
"[",
"id",
"]",
"score",
".",
"append",
"(",
"self",
".",
"calc_score",
"(",
"hypo",
",",
"ref",
")",
")",
"# Sanity check.",
"assert",
"(",
"type",
"(",
"hypo",
")",
"is",
"list",
")",
"assert",
"(",
"len",
"(",
"hypo",
")",
"==",
"1",
")",
"assert",
"(",
"type",
"(",
"ref",
")",
"is",
"list",
")",
"assert",
"(",
"len",
"(",
"ref",
")",
">",
"0",
")",
"average_score",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"array",
"(",
"score",
")",
")",
"return",
"average_score",
",",
"np",
".",
"array",
"(",
"score",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxevalcap/rouge/rouge.py#L77-L102 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.__init__ | (self, annotation_file=None) | Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return: | Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return: | [
"Constructor",
"of",
"Microsoft",
"COCO",
"helper",
"class",
"for",
"reading",
"and",
"visualizing",
"annotations",
".",
":",
"param",
"annotation_file",
"(",
"str",
")",
":",
"location",
"of",
"annotation",
"file",
":",
"param",
"image_folder",
"(",
"str",
")",
":",
"location",
"to",
"the",
"folder",
"that",
"hosts",
"images",
".",
":",
"return",
":"
] | def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = []
self.cats = []
self.image2hash = {}
if not annotation_file == None:
print('loading annotations into memory...')
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
print( datetime.datetime.utcnow() - time_t)
self.dataset = dataset
self.createIndex() | [
"def",
"__init__",
"(",
"self",
",",
"annotation_file",
"=",
"None",
")",
":",
"# load dataset",
"self",
".",
"dataset",
"=",
"{",
"}",
"self",
".",
"anns",
"=",
"[",
"]",
"self",
".",
"imgToAnns",
"=",
"{",
"}",
"self",
".",
"catToImgs",
"=",
"{",
"}",
"self",
".",
"imgs",
"=",
"[",
"]",
"self",
".",
"cats",
"=",
"[",
"]",
"self",
".",
"image2hash",
"=",
"{",
"}",
"if",
"not",
"annotation_file",
"==",
"None",
":",
"print",
"(",
"'loading annotations into memory...'",
")",
"time_t",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"dataset",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"annotation_file",
",",
"'r'",
")",
")",
"print",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"time_t",
")",
"self",
".",
"dataset",
"=",
"dataset",
"self",
".",
"createIndex",
"(",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L65-L87 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.info | (self) | Print information about the annotation file.
:return: | Print information about the annotation file.
:return: | [
"Print",
"information",
"about",
"the",
"annotation",
"file",
".",
":",
"return",
":"
] | def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print( '%s: %s'%(key, value)) | [
"def",
"info",
"(",
"self",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"datset",
"[",
"'info'",
"]",
".",
"items",
"(",
")",
":",
"print",
"(",
"'%s: %s'",
"%",
"(",
"key",
",",
"value",
")",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L129-L135 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.getAnnIds | (self, imgIds=[], catIds=[], areaRng=[], iscrowd=None) | return ids | Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids | Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids | [
"Get",
"ann",
"ids",
"that",
"satisfy",
"given",
"filter",
"conditions",
".",
"default",
"skips",
"that",
"filter",
":",
"param",
"imgIds",
"(",
"int",
"array",
")",
":",
"get",
"anns",
"for",
"given",
"imgs",
"catIds",
"(",
"int",
"array",
")",
":",
"get",
"anns",
"for",
"given",
"cats",
"areaRng",
"(",
"float",
"array",
")",
":",
"get",
"anns",
"for",
"given",
"area",
"range",
"(",
"e",
".",
"g",
".",
"[",
"0",
"inf",
"]",
")",
"iscrowd",
"(",
"boolean",
")",
":",
"get",
"anns",
"for",
"given",
"crowd",
"label",
"(",
"False",
"or",
"True",
")",
":",
"return",
":",
"ids",
"(",
"int",
"array",
")",
":",
"integer",
"array",
"of",
"ann",
"ids"
] | def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if self.dataset['type'] == 'instances':
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
else:
ids = [ann['id'] for ann in anns]
return ids | [
"def",
"getAnnIds",
"(",
"self",
",",
"imgIds",
"=",
"[",
"]",
",",
"catIds",
"=",
"[",
"]",
",",
"areaRng",
"=",
"[",
"]",
",",
"iscrowd",
"=",
"None",
")",
":",
"imgIds",
"=",
"imgIds",
"if",
"type",
"(",
"imgIds",
")",
"==",
"list",
"else",
"[",
"imgIds",
"]",
"catIds",
"=",
"catIds",
"if",
"type",
"(",
"catIds",
")",
"==",
"list",
"else",
"[",
"catIds",
"]",
"if",
"len",
"(",
"imgIds",
")",
"==",
"len",
"(",
"catIds",
")",
"==",
"len",
"(",
"areaRng",
")",
"==",
"0",
":",
"anns",
"=",
"self",
".",
"dataset",
"[",
"'annotations'",
"]",
"else",
":",
"if",
"not",
"len",
"(",
"imgIds",
")",
"==",
"0",
":",
"anns",
"=",
"sum",
"(",
"[",
"self",
".",
"imgToAnns",
"[",
"imgId",
"]",
"for",
"imgId",
"in",
"imgIds",
"if",
"imgId",
"in",
"self",
".",
"imgToAnns",
"]",
",",
"[",
"]",
")",
"else",
":",
"anns",
"=",
"self",
".",
"dataset",
"[",
"'annotations'",
"]",
"anns",
"=",
"anns",
"if",
"len",
"(",
"catIds",
")",
"==",
"0",
"else",
"[",
"ann",
"for",
"ann",
"in",
"anns",
"if",
"ann",
"[",
"'category_id'",
"]",
"in",
"catIds",
"]",
"anns",
"=",
"anns",
"if",
"len",
"(",
"areaRng",
")",
"==",
"0",
"else",
"[",
"ann",
"for",
"ann",
"in",
"anns",
"if",
"ann",
"[",
"'area'",
"]",
">",
"areaRng",
"[",
"0",
"]",
"and",
"ann",
"[",
"'area'",
"]",
"<",
"areaRng",
"[",
"1",
"]",
"]",
"if",
"self",
".",
"dataset",
"[",
"'type'",
"]",
"==",
"'instances'",
":",
"if",
"not",
"iscrowd",
"==",
"None",
":",
"ids",
"=",
"[",
"ann",
"[",
"'id'",
"]",
"for",
"ann",
"in",
"anns",
"if",
"ann",
"[",
"'iscrowd'",
"]",
"==",
"iscrowd",
"]",
"else",
":",
"ids",
"=",
"[",
"ann",
"[",
"'id'",
"]",
"for",
"ann",
"in",
"anns",
"]",
"else",
":",
"ids",
"=",
"[",
"ann",
"[",
"'id'",
"]",
"for",
"ann",
"in",
"anns",
"]",
"return",
"ids"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L137-L165 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.getCatIds | (self, catNms=[], supNms=[], catIds=[]) | return ids | filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids | filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids | [
"filtering",
"parameters",
".",
"default",
"skips",
"that",
"filter",
".",
":",
"param",
"catNms",
"(",
"str",
"array",
")",
":",
"get",
"cats",
"for",
"given",
"cat",
"names",
":",
"param",
"supNms",
"(",
"str",
"array",
")",
":",
"get",
"cats",
"for",
"given",
"supercategory",
"names",
":",
"param",
"catIds",
"(",
"int",
"array",
")",
":",
"get",
"cats",
"for",
"given",
"cat",
"ids",
":",
"return",
":",
"ids",
"(",
"int",
"array",
")",
":",
"integer",
"array",
"of",
"cat",
"ids"
] | def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids | [
"def",
"getCatIds",
"(",
"self",
",",
"catNms",
"=",
"[",
"]",
",",
"supNms",
"=",
"[",
"]",
",",
"catIds",
"=",
"[",
"]",
")",
":",
"catNms",
"=",
"catNms",
"if",
"type",
"(",
"catNms",
")",
"==",
"list",
"else",
"[",
"catNms",
"]",
"supNms",
"=",
"supNms",
"if",
"type",
"(",
"supNms",
")",
"==",
"list",
"else",
"[",
"supNms",
"]",
"catIds",
"=",
"catIds",
"if",
"type",
"(",
"catIds",
")",
"==",
"list",
"else",
"[",
"catIds",
"]",
"if",
"len",
"(",
"catNms",
")",
"==",
"len",
"(",
"supNms",
")",
"==",
"len",
"(",
"catIds",
")",
"==",
"0",
":",
"cats",
"=",
"self",
".",
"dataset",
"[",
"'categories'",
"]",
"else",
":",
"cats",
"=",
"self",
".",
"dataset",
"[",
"'categories'",
"]",
"cats",
"=",
"cats",
"if",
"len",
"(",
"catNms",
")",
"==",
"0",
"else",
"[",
"cat",
"for",
"cat",
"in",
"cats",
"if",
"cat",
"[",
"'name'",
"]",
"in",
"catNms",
"]",
"cats",
"=",
"cats",
"if",
"len",
"(",
"supNms",
")",
"==",
"0",
"else",
"[",
"cat",
"for",
"cat",
"in",
"cats",
"if",
"cat",
"[",
"'supercategory'",
"]",
"in",
"supNms",
"]",
"cats",
"=",
"cats",
"if",
"len",
"(",
"catIds",
")",
"==",
"0",
"else",
"[",
"cat",
"for",
"cat",
"in",
"cats",
"if",
"cat",
"[",
"'id'",
"]",
"in",
"catIds",
"]",
"ids",
"=",
"[",
"cat",
"[",
"'id'",
"]",
"for",
"cat",
"in",
"cats",
"]",
"return",
"ids"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L167-L187 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.getImgIds | (self, imgIds=[], catIds=[]) | return list(ids) | Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids | Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids | [
"Get",
"img",
"ids",
"that",
"satisfy",
"given",
"filter",
"conditions",
".",
":",
"param",
"imgIds",
"(",
"int",
"array",
")",
":",
"get",
"imgs",
"for",
"given",
"ids",
":",
"param",
"catIds",
"(",
"int",
"array",
")",
":",
"get",
"imgs",
"with",
"all",
"given",
"cats",
":",
"return",
":",
"ids",
"(",
"int",
"array",
")",
":",
"integer",
"array",
"of",
"img",
"ids"
] | def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids) | [
"def",
"getImgIds",
"(",
"self",
",",
"imgIds",
"=",
"[",
"]",
",",
"catIds",
"=",
"[",
"]",
")",
":",
"imgIds",
"=",
"imgIds",
"if",
"type",
"(",
"imgIds",
")",
"==",
"list",
"else",
"[",
"imgIds",
"]",
"catIds",
"=",
"catIds",
"if",
"type",
"(",
"catIds",
")",
"==",
"list",
"else",
"[",
"catIds",
"]",
"if",
"len",
"(",
"imgIds",
")",
"==",
"len",
"(",
"catIds",
")",
"==",
"0",
":",
"ids",
"=",
"self",
".",
"imgs",
".",
"keys",
"(",
")",
"else",
":",
"ids",
"=",
"set",
"(",
"imgIds",
")",
"for",
"catId",
"in",
"catIds",
":",
"if",
"len",
"(",
"ids",
")",
"==",
"0",
":",
"ids",
"=",
"set",
"(",
"self",
".",
"catToImgs",
"[",
"catId",
"]",
")",
"else",
":",
"ids",
"&=",
"set",
"(",
"self",
".",
"catToImgs",
"[",
"catId",
"]",
")",
"return",
"list",
"(",
"ids",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L189-L208 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.loadAnns | (self, ids=[]) | Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects | Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects | [
"Load",
"anns",
"with",
"the",
"specified",
"ids",
".",
":",
"param",
"ids",
"(",
"int",
"array",
")",
":",
"integer",
"ids",
"specifying",
"anns",
":",
"return",
":",
"anns",
"(",
"object",
"array",
")",
":",
"loaded",
"ann",
"objects"
] | def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]] | [
"def",
"loadAnns",
"(",
"self",
",",
"ids",
"=",
"[",
"]",
")",
":",
"if",
"type",
"(",
"ids",
")",
"==",
"list",
":",
"return",
"[",
"self",
".",
"anns",
"[",
"id",
"]",
"for",
"id",
"in",
"ids",
"]",
"elif",
"type",
"(",
"ids",
")",
"==",
"int",
":",
"return",
"[",
"self",
".",
"anns",
"[",
"ids",
"]",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L210-L219 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.loadCats | (self, ids=[]) | Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects | Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects | [
"Load",
"cats",
"with",
"the",
"specified",
"ids",
".",
":",
"param",
"ids",
"(",
"int",
"array",
")",
":",
"integer",
"ids",
"specifying",
"cats",
":",
"return",
":",
"cats",
"(",
"object",
"array",
")",
":",
"loaded",
"cat",
"objects"
] | def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]] | [
"def",
"loadCats",
"(",
"self",
",",
"ids",
"=",
"[",
"]",
")",
":",
"if",
"type",
"(",
"ids",
")",
"==",
"list",
":",
"return",
"[",
"self",
".",
"cats",
"[",
"id",
"]",
"for",
"id",
"in",
"ids",
"]",
"elif",
"type",
"(",
"ids",
")",
"==",
"int",
":",
"return",
"[",
"self",
".",
"cats",
"[",
"ids",
"]",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L221-L230 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.loadImgs | (self, ids=[]) | Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects | Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects | [
"Load",
"anns",
"with",
"the",
"specified",
"ids",
".",
":",
"param",
"ids",
"(",
"int",
"array",
")",
":",
"integer",
"ids",
"specifying",
"img",
":",
"return",
":",
"imgs",
"(",
"object",
"array",
")",
":",
"loaded",
"img",
"objects"
] | def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]] | [
"def",
"loadImgs",
"(",
"self",
",",
"ids",
"=",
"[",
"]",
")",
":",
"if",
"type",
"(",
"ids",
")",
"==",
"list",
":",
"return",
"[",
"self",
".",
"imgs",
"[",
"id",
"]",
"for",
"id",
"in",
"ids",
"]",
"elif",
"type",
"(",
"ids",
")",
"==",
"int",
":",
"return",
"[",
"self",
".",
"imgs",
"[",
"ids",
"]",
"]"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L232-L241 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.showAnns | (self, anns) | Display the specified annotations.
:param anns (array of object): annotations to display
:return: None | Display the specified annotations.
:param anns (array of object): annotations to display
:return: None | [
"Display",
"the",
"specified",
"annotations",
".",
":",
"param",
"anns",
"(",
"array",
"of",
"object",
")",
":",
"annotations",
"to",
"display",
":",
"return",
":",
"None"
] | def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if self.dataset['type'] == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
mask = COCO.decodeMask(ann['segmentation'])
img = np.ones( (mask.shape[0], mask.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, mask*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
if self.dataset['type'] == 'captions':
for ann in anns:
print( ann['caption']) | [
"def",
"showAnns",
"(",
"self",
",",
"anns",
")",
":",
"if",
"len",
"(",
"anns",
")",
"==",
"0",
":",
"return",
"0",
"if",
"self",
".",
"dataset",
"[",
"'type'",
"]",
"==",
"'instances'",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"polygons",
"=",
"[",
"]",
"color",
"=",
"[",
"]",
"for",
"ann",
"in",
"anns",
":",
"c",
"=",
"np",
".",
"random",
".",
"random",
"(",
"(",
"1",
",",
"3",
")",
")",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"if",
"type",
"(",
"ann",
"[",
"'segmentation'",
"]",
")",
"==",
"list",
":",
"# polygon",
"for",
"seg",
"in",
"ann",
"[",
"'segmentation'",
"]",
":",
"poly",
"=",
"np",
".",
"array",
"(",
"seg",
")",
".",
"reshape",
"(",
"(",
"len",
"(",
"seg",
")",
"/",
"2",
",",
"2",
")",
")",
"polygons",
".",
"append",
"(",
"Polygon",
"(",
"poly",
",",
"True",
",",
"alpha",
"=",
"0.4",
")",
")",
"color",
".",
"append",
"(",
"c",
")",
"else",
":",
"# mask",
"mask",
"=",
"COCO",
".",
"decodeMask",
"(",
"ann",
"[",
"'segmentation'",
"]",
")",
"img",
"=",
"np",
".",
"ones",
"(",
"(",
"mask",
".",
"shape",
"[",
"0",
"]",
",",
"mask",
".",
"shape",
"[",
"1",
"]",
",",
"3",
")",
")",
"if",
"ann",
"[",
"'iscrowd'",
"]",
"==",
"1",
":",
"color_mask",
"=",
"np",
".",
"array",
"(",
"[",
"2.0",
",",
"166.0",
",",
"101.0",
"]",
")",
"/",
"255",
"if",
"ann",
"[",
"'iscrowd'",
"]",
"==",
"0",
":",
"color_mask",
"=",
"np",
".",
"random",
".",
"random",
"(",
"(",
"1",
",",
"3",
")",
")",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"img",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"color_mask",
"[",
"i",
"]",
"ax",
".",
"imshow",
"(",
"np",
".",
"dstack",
"(",
"(",
"img",
",",
"mask",
"*",
"0.5",
")",
")",
")",
"p",
"=",
"PatchCollection",
"(",
"polygons",
",",
"facecolors",
"=",
"color",
",",
"edgecolors",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"1",
")",
",",
"linewidths",
"=",
"3",
",",
"alpha",
"=",
"0.4",
")",
"ax",
".",
"add_collection",
"(",
"p",
")",
"if",
"self",
".",
"dataset",
"[",
"'type'",
"]",
"==",
"'captions'",
":",
"for",
"ann",
"in",
"anns",
":",
"print",
"(",
"ann",
"[",
"'caption'",
"]",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L243-L278 |
||
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.loadRes | (self, resFile) | return res | change by ZhengHe
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object | change by ZhengHe
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object | [
"change",
"by",
"ZhengHe",
"Load",
"result",
"file",
"and",
"return",
"a",
"result",
"api",
"object",
".",
":",
"param",
"resFile",
"(",
"str",
")",
":",
"file",
"name",
"of",
"result",
"file",
":",
"return",
":",
"res",
"(",
"obj",
")",
":",
"result",
"api",
"object"
] | def loadRes(self, resFile):
"""
change by ZhengHe
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
res.dataset['info'] = copy.deepcopy(self.dataset['info'])
res.dataset['type'] = copy.deepcopy(self.dataset['type'])
res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
# str to hex int for image_id
imgdict = {}
def get_image_dict(img_name):
# image_hash = int(int(hashlib.sha256(img_name).hexdigest(), 16) % sys.maxint)
image_hash = self.image2hash[img_name]
if image_hash in imgdict:
assert imgdict[image_hash] == img_name, 'hash colision: {0}: {1}'.format(image_hash, img_name)
else:
imgdict[image_hash] = img_name
image_dict = {"id": image_hash,
"width": 0,
"height": 0,
"file_name": img_name,
"license": '',
"url": img_name,
"date_captured": '',
}
return image_hash
print ('Loading and preparing results... ')
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
# annsImgIds = [ann['image_id'] for ann in anns]
# change by ZhengHe
annsImgIds = []
for ann in anns:
assert ann['image_id'] != '','image_id must have a name'
assert ann['caption'] != '', 'caption must be a string'
w = jieba.cut(ann['caption'].strip().replace('。',''), cut_all=False)
p = ' '.join(w)
ann['caption'] = p
ann['image_id'] = get_image_dict(ann['image_id'])
annsImgIds.append((ann['image_id']))
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
ann['area']=sum(ann['segmentation']['counts'][2:-1:2])
ann['bbox'] = []
ann['id'] = id
ann['iscrowd'] = 0
print( 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds()))
res.dataset['annotations'] = anns
res.createIndex()
return res | [
"def",
"loadRes",
"(",
"self",
",",
"resFile",
")",
":",
"res",
"=",
"COCO",
"(",
")",
"res",
".",
"dataset",
"[",
"'images'",
"]",
"=",
"[",
"img",
"for",
"img",
"in",
"self",
".",
"dataset",
"[",
"'images'",
"]",
"]",
"res",
".",
"dataset",
"[",
"'info'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"dataset",
"[",
"'info'",
"]",
")",
"res",
".",
"dataset",
"[",
"'type'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"dataset",
"[",
"'type'",
"]",
")",
"res",
".",
"dataset",
"[",
"'licenses'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"dataset",
"[",
"'licenses'",
"]",
")",
"# str to hex int for image_id",
"imgdict",
"=",
"{",
"}",
"def",
"get_image_dict",
"(",
"img_name",
")",
":",
"# image_hash = int(int(hashlib.sha256(img_name).hexdigest(), 16) % sys.maxint)",
"image_hash",
"=",
"self",
".",
"image2hash",
"[",
"img_name",
"]",
"if",
"image_hash",
"in",
"imgdict",
":",
"assert",
"imgdict",
"[",
"image_hash",
"]",
"==",
"img_name",
",",
"'hash colision: {0}: {1}'",
".",
"format",
"(",
"image_hash",
",",
"img_name",
")",
"else",
":",
"imgdict",
"[",
"image_hash",
"]",
"=",
"img_name",
"image_dict",
"=",
"{",
"\"id\"",
":",
"image_hash",
",",
"\"width\"",
":",
"0",
",",
"\"height\"",
":",
"0",
",",
"\"file_name\"",
":",
"img_name",
",",
"\"license\"",
":",
"''",
",",
"\"url\"",
":",
"img_name",
",",
"\"date_captured\"",
":",
"''",
",",
"}",
"return",
"image_hash",
"print",
"(",
"'Loading and preparing results... '",
")",
"time_t",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"anns",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"resFile",
")",
")",
"assert",
"type",
"(",
"anns",
")",
"==",
"list",
",",
"'results in not an array of objects'",
"# annsImgIds = [ann['image_id'] for ann in anns]",
"# change by ZhengHe",
"annsImgIds",
"=",
"[",
"]",
"for",
"ann",
"in",
"anns",
":",
"assert",
"ann",
"[",
"'image_id'",
"]",
"!=",
"''",
",",
"'image_id must have a name'",
"assert",
"ann",
"[",
"'caption'",
"]",
"!=",
"''",
",",
"'caption must be a string'",
"w",
"=",
"jieba",
".",
"cut",
"(",
"ann",
"[",
"'caption'",
"]",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'。','",
"'",
"),",
" ",
"c",
"t_all=F",
"a",
"lse)",
"",
"p",
"=",
"' '",
".",
"join",
"(",
"w",
")",
"ann",
"[",
"'caption'",
"]",
"=",
"p",
"ann",
"[",
"'image_id'",
"]",
"=",
"get_image_dict",
"(",
"ann",
"[",
"'image_id'",
"]",
")",
"annsImgIds",
".",
"append",
"(",
"(",
"ann",
"[",
"'image_id'",
"]",
")",
")",
"assert",
"set",
"(",
"annsImgIds",
")",
"==",
"(",
"set",
"(",
"annsImgIds",
")",
"&",
"set",
"(",
"self",
".",
"getImgIds",
"(",
")",
")",
")",
",",
"'Results do not correspond to current coco set'",
"if",
"'caption'",
"in",
"anns",
"[",
"0",
"]",
":",
"imgIds",
"=",
"set",
"(",
"[",
"img",
"[",
"'id'",
"]",
"for",
"img",
"in",
"res",
".",
"dataset",
"[",
"'images'",
"]",
"]",
")",
"&",
"set",
"(",
"[",
"ann",
"[",
"'image_id'",
"]",
"for",
"ann",
"in",
"anns",
"]",
")",
"res",
".",
"dataset",
"[",
"'images'",
"]",
"=",
"[",
"img",
"for",
"img",
"in",
"res",
".",
"dataset",
"[",
"'images'",
"]",
"if",
"img",
"[",
"'id'",
"]",
"in",
"imgIds",
"]",
"for",
"id",
",",
"ann",
"in",
"enumerate",
"(",
"anns",
")",
":",
"ann",
"[",
"'id'",
"]",
"=",
"id",
"elif",
"'bbox'",
"in",
"anns",
"[",
"0",
"]",
"and",
"not",
"anns",
"[",
"0",
"]",
"[",
"'bbox'",
"]",
"==",
"[",
"]",
":",
"res",
".",
"dataset",
"[",
"'categories'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"dataset",
"[",
"'categories'",
"]",
")",
"for",
"id",
",",
"ann",
"in",
"enumerate",
"(",
"anns",
")",
":",
"bb",
"=",
"ann",
"[",
"'bbox'",
"]",
"x1",
",",
"x2",
",",
"y1",
",",
"y2",
"=",
"[",
"bb",
"[",
"0",
"]",
",",
"bb",
"[",
"0",
"]",
"+",
"bb",
"[",
"2",
"]",
",",
"bb",
"[",
"1",
"]",
",",
"bb",
"[",
"1",
"]",
"+",
"bb",
"[",
"3",
"]",
"]",
"ann",
"[",
"'segmentation'",
"]",
"=",
"[",
"[",
"x1",
",",
"y1",
",",
"x1",
",",
"y2",
",",
"x2",
",",
"y2",
",",
"x2",
",",
"y1",
"]",
"]",
"ann",
"[",
"'area'",
"]",
"=",
"bb",
"[",
"2",
"]",
"*",
"bb",
"[",
"3",
"]",
"ann",
"[",
"'id'",
"]",
"=",
"id",
"ann",
"[",
"'iscrowd'",
"]",
"=",
"0",
"elif",
"'segmentation'",
"in",
"anns",
"[",
"0",
"]",
":",
"res",
".",
"dataset",
"[",
"'categories'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"dataset",
"[",
"'categories'",
"]",
")",
"for",
"id",
",",
"ann",
"in",
"enumerate",
"(",
"anns",
")",
":",
"ann",
"[",
"'area'",
"]",
"=",
"sum",
"(",
"ann",
"[",
"'segmentation'",
"]",
"[",
"'counts'",
"]",
"[",
"2",
":",
"-",
"1",
":",
"2",
"]",
")",
"ann",
"[",
"'bbox'",
"]",
"=",
"[",
"]",
"ann",
"[",
"'id'",
"]",
"=",
"id",
"ann",
"[",
"'iscrowd'",
"]",
"=",
"0",
"print",
"(",
"'DONE (t=%0.2fs)'",
"%",
"(",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"time_t",
")",
".",
"total_seconds",
"(",
")",
")",
")",
"res",
".",
"dataset",
"[",
"'annotations'",
"]",
"=",
"anns",
"res",
".",
"createIndex",
"(",
")",
"return",
"res"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L280-L360 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.decodeMask | (R) | return M.reshape((R['size']), order='F') | Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask | Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask | [
"Decode",
"binary",
"mask",
"M",
"encoded",
"via",
"run",
"-",
"length",
"encoding",
".",
":",
"param",
"R",
"(",
"object",
"RLE",
")",
":",
"run",
"-",
"length",
"encoding",
"of",
"binary",
"mask",
":",
"return",
":",
"M",
"(",
"bool",
"2D",
"array",
")",
":",
"decoded",
"binary",
"mask"
] | def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F') | [
"def",
"decodeMask",
"(",
"R",
")",
":",
"N",
"=",
"len",
"(",
"R",
"[",
"'counts'",
"]",
")",
"M",
"=",
"np",
".",
"zeros",
"(",
"(",
"R",
"[",
"'size'",
"]",
"[",
"0",
"]",
"*",
"R",
"[",
"'size'",
"]",
"[",
"1",
"]",
",",
")",
")",
"n",
"=",
"0",
"val",
"=",
"1",
"for",
"pos",
"in",
"range",
"(",
"N",
")",
":",
"val",
"=",
"not",
"val",
"for",
"c",
"in",
"range",
"(",
"R",
"[",
"'counts'",
"]",
"[",
"pos",
"]",
")",
":",
"R",
"[",
"'counts'",
"]",
"[",
"pos",
"]",
"M",
"[",
"n",
"]",
"=",
"val",
"n",
"+=",
"1",
"return",
"M",
".",
"reshape",
"(",
"(",
"R",
"[",
"'size'",
"]",
")",
",",
"order",
"=",
"'F'",
")"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L364-L380 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.encodeMask | (M) | return {'size': [h, w],
'counts': counts_list ,
} | Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask | Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask | [
"Encode",
"binary",
"mask",
"M",
"using",
"run",
"-",
"length",
"encoding",
".",
":",
"param",
"M",
"(",
"bool",
"2D",
"array",
")",
":",
"binary",
"mask",
"to",
"encode",
":",
"return",
":",
"R",
"(",
"object",
"RLE",
")",
":",
"run",
"-",
"length",
"encoding",
"of",
"binary",
"mask"
] | def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
} | [
"def",
"encodeMask",
"(",
"M",
")",
":",
"[",
"h",
",",
"w",
"]",
"=",
"M",
".",
"shape",
"M",
"=",
"M",
".",
"flatten",
"(",
"order",
"=",
"'F'",
")",
"N",
"=",
"len",
"(",
"M",
")",
"counts_list",
"=",
"[",
"]",
"pos",
"=",
"0",
"# counts",
"counts_list",
".",
"append",
"(",
"1",
")",
"diffs",
"=",
"np",
".",
"logical_xor",
"(",
"M",
"[",
"0",
":",
"N",
"-",
"1",
"]",
",",
"M",
"[",
"1",
":",
"N",
"]",
")",
"for",
"diff",
"in",
"diffs",
":",
"if",
"diff",
":",
"pos",
"+=",
"1",
"counts_list",
".",
"append",
"(",
"1",
")",
"else",
":",
"counts_list",
"[",
"pos",
"]",
"+=",
"1",
"# if array starts from 1. start with 0 counts for 0",
"if",
"M",
"[",
"0",
"]",
"==",
"1",
":",
"counts_list",
"=",
"[",
"0",
"]",
"+",
"counts_list",
"return",
"{",
"'size'",
":",
"[",
"h",
",",
"w",
"]",
",",
"'counts'",
":",
"counts_list",
",",
"}"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L383-L408 |
|
AIChallenger/AI_Challenger_2017 | 52014e0defbbdd85bf94ab05d308300d5764022f | Evaluation/caption_eval/coco_caption/pycxtools/coco.py | python | COCO.segToMask | ( S, h, w ) | return M | Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask | Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask | [
"Convert",
"polygon",
"segmentation",
"to",
"binary",
"mask",
".",
":",
"param",
"S",
"(",
"float",
"array",
")",
":",
"polygon",
"segmentation",
"mask",
":",
"param",
"h",
"(",
"int",
")",
":",
"target",
"mask",
"height",
":",
"param",
"w",
"(",
"int",
")",
":",
"target",
"mask",
"width",
":",
"return",
":",
"M",
"(",
"bool",
"2D",
"array",
")",
":",
"binary",
"mask"
] | def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)
M[rr, cc] = 1
return M | [
"def",
"segToMask",
"(",
"S",
",",
"h",
",",
"w",
")",
":",
"M",
"=",
"np",
".",
"zeros",
"(",
"(",
"h",
",",
"w",
")",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"for",
"s",
"in",
"S",
":",
"N",
"=",
"len",
"(",
"s",
")",
"rr",
",",
"cc",
"=",
"polygon",
"(",
"np",
".",
"array",
"(",
"s",
"[",
"1",
":",
"N",
":",
"2",
"]",
")",
",",
"np",
".",
"array",
"(",
"s",
"[",
"0",
":",
"N",
":",
"2",
"]",
")",
")",
"# (y, x)",
"M",
"[",
"rr",
",",
"cc",
"]",
"=",
"1",
"return",
"M"
] | https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Evaluation/caption_eval/coco_caption/pycxtools/coco.py#L411-L424 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/table.py | python | Box.__init__ | (self, pos) | pos: (x1, x2, y1, y2) | pos: (x1, x2, y1, y2) | [
"pos",
":",
"(",
"x1",
"x2",
"y1",
"y2",
")"
] | def __init__(self, pos):
"""pos: (x1, x2, y1, y2)"""
self.set_pos(pos) | [
"def",
"__init__",
"(",
"self",
",",
"pos",
")",
":",
"self",
".",
"set_pos",
"(",
"pos",
")"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/table.py#L30-L32 |
||
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/eval.py | python | eval_relations | (gt:List[List], res:List[List], cmp_blank=True) | return precision, recall | Evaluate results
Args:
gt: a list of list of Relation
res: a list of list of Relation | Evaluate results | [
"Evaluate",
"results"
] | def eval_relations(gt:List[List], res:List[List], cmp_blank=True):
"""Evaluate results
Args:
gt: a list of list of Relation
res: a list of list of Relation
"""
#TODO to know how to calculate the total recall and prec
assert len(gt) == len(res)
tot_prec = 0
tot_recall = 0
total = 0
# print("evaluating result...")
# for _gt, _res in tqdm(zip(gt, res)):
# for _gt, _res in tqdm(zip(gt, res), total=len(gt), desc='eval'):
idx, t = 0, len(gt)
for _gt, _res in zip(gt, res):
idx += 1
print('Eval %d/%d (%d%%)' % (idx, t, idx / t * 100), ' ' * 45, end='\r')
corr = compare_rel(_gt, _res, cmp_blank)
precision = corr / len(_res) if len(_res) != 0 else 0
recall = corr / len(_gt) if len(_gt) != 0 else 0
tot_prec += precision
tot_recall += recall
total += 1
# print()
precision = tot_prec / total
recall = tot_recall / total
# print("Test on %d instances. Precision: %.2f, Recall: %.2f" % (
# total, precision, recall))
return precision, recall | [
"def",
"eval_relations",
"(",
"gt",
":",
"List",
"[",
"List",
"]",
",",
"res",
":",
"List",
"[",
"List",
"]",
",",
"cmp_blank",
"=",
"True",
")",
":",
"#TODO to know how to calculate the total recall and prec",
"assert",
"len",
"(",
"gt",
")",
"==",
"len",
"(",
"res",
")",
"tot_prec",
"=",
"0",
"tot_recall",
"=",
"0",
"total",
"=",
"0",
"# print(\"evaluating result...\")",
"# for _gt, _res in tqdm(zip(gt, res)):",
"# for _gt, _res in tqdm(zip(gt, res), total=len(gt), desc='eval'):",
"idx",
",",
"t",
"=",
"0",
",",
"len",
"(",
"gt",
")",
"for",
"_gt",
",",
"_res",
"in",
"zip",
"(",
"gt",
",",
"res",
")",
":",
"idx",
"+=",
"1",
"print",
"(",
"'Eval %d/%d (%d%%)'",
"%",
"(",
"idx",
",",
"t",
",",
"idx",
"/",
"t",
"*",
"100",
")",
",",
"' '",
"*",
"45",
",",
"end",
"=",
"'\\r'",
")",
"corr",
"=",
"compare_rel",
"(",
"_gt",
",",
"_res",
",",
"cmp_blank",
")",
"precision",
"=",
"corr",
"/",
"len",
"(",
"_res",
")",
"if",
"len",
"(",
"_res",
")",
"!=",
"0",
"else",
"0",
"recall",
"=",
"corr",
"/",
"len",
"(",
"_gt",
")",
"if",
"len",
"(",
"_gt",
")",
"!=",
"0",
"else",
"0",
"tot_prec",
"+=",
"precision",
"tot_recall",
"+=",
"recall",
"total",
"+=",
"1",
"# print()",
"precision",
"=",
"tot_prec",
"/",
"total",
"recall",
"=",
"tot_recall",
"/",
"total",
"# print(\"Test on %d instances. Precision: %.2f, Recall: %.2f\" % (",
"# total, precision, recall))",
"return",
"precision",
",",
"recall"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/eval.py#L30-L64 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/eval.py | python | Table2Relations | (t:Table) | return ret | Convert a Table object to a List of Relation. | Convert a Table object to a List of Relation. | [
"Convert",
"a",
"Table",
"object",
"to",
"a",
"List",
"of",
"Relation",
"."
] | def Table2Relations(t:Table):
"""Convert a Table object to a List of Relation.
"""
ret = []
cl = t.coo2cell_id
# remove duplicates with pair set
used = set()
# look right
for r in range(t.row_n):
for cFrom in range(t.col_n - 1):
cTo = cFrom + 1
loop = True
while loop and cTo < t.col_n:
fid, tid = cl[r][cFrom], cl[r][cTo]
if fid != -1 and tid != -1 and fid != tid:
if (fid, tid) not in used:
ret.append(Relation(
from_text=t.cells[fid].text,
to_text=t.cells[tid].text,
direction=DIR_HORIZ,
from_id=fid,
to_id=tid,
no_blanks=cTo - cFrom - 1
))
used.add((fid, tid))
loop = False
else:
if fid != -1 and tid != -1 and fid == tid:
cFrom = cTo
cTo += 1
# look down
for c in range(t.col_n):
for rFrom in range(t.row_n - 1):
rTo = rFrom + 1
loop = True
while loop and rTo < t.row_n:
fid, tid = cl[rFrom][c], cl[rTo][c]
if fid != -1 and tid != -1 and fid != tid:
if (fid, tid) not in used:
ret.append(Relation(
from_text=t.cells[fid].text,
to_text=t.cells[tid].text,
direction=DIR_VERT,
from_id=fid,
to_id=tid,
no_blanks=rTo - rFrom - 1
))
used.add((fid, tid))
loop = False
else:
if fid != -1 and tid != -1 and fid == tid:
rFrom = rTo
rTo += 1
return ret | [
"def",
"Table2Relations",
"(",
"t",
":",
"Table",
")",
":",
"ret",
"=",
"[",
"]",
"cl",
"=",
"t",
".",
"coo2cell_id",
"# remove duplicates with pair set",
"used",
"=",
"set",
"(",
")",
"# look right",
"for",
"r",
"in",
"range",
"(",
"t",
".",
"row_n",
")",
":",
"for",
"cFrom",
"in",
"range",
"(",
"t",
".",
"col_n",
"-",
"1",
")",
":",
"cTo",
"=",
"cFrom",
"+",
"1",
"loop",
"=",
"True",
"while",
"loop",
"and",
"cTo",
"<",
"t",
".",
"col_n",
":",
"fid",
",",
"tid",
"=",
"cl",
"[",
"r",
"]",
"[",
"cFrom",
"]",
",",
"cl",
"[",
"r",
"]",
"[",
"cTo",
"]",
"if",
"fid",
"!=",
"-",
"1",
"and",
"tid",
"!=",
"-",
"1",
"and",
"fid",
"!=",
"tid",
":",
"if",
"(",
"fid",
",",
"tid",
")",
"not",
"in",
"used",
":",
"ret",
".",
"append",
"(",
"Relation",
"(",
"from_text",
"=",
"t",
".",
"cells",
"[",
"fid",
"]",
".",
"text",
",",
"to_text",
"=",
"t",
".",
"cells",
"[",
"tid",
"]",
".",
"text",
",",
"direction",
"=",
"DIR_HORIZ",
",",
"from_id",
"=",
"fid",
",",
"to_id",
"=",
"tid",
",",
"no_blanks",
"=",
"cTo",
"-",
"cFrom",
"-",
"1",
")",
")",
"used",
".",
"add",
"(",
"(",
"fid",
",",
"tid",
")",
")",
"loop",
"=",
"False",
"else",
":",
"if",
"fid",
"!=",
"-",
"1",
"and",
"tid",
"!=",
"-",
"1",
"and",
"fid",
"==",
"tid",
":",
"cFrom",
"=",
"cTo",
"cTo",
"+=",
"1",
"# look down",
"for",
"c",
"in",
"range",
"(",
"t",
".",
"col_n",
")",
":",
"for",
"rFrom",
"in",
"range",
"(",
"t",
".",
"row_n",
"-",
"1",
")",
":",
"rTo",
"=",
"rFrom",
"+",
"1",
"loop",
"=",
"True",
"while",
"loop",
"and",
"rTo",
"<",
"t",
".",
"row_n",
":",
"fid",
",",
"tid",
"=",
"cl",
"[",
"rFrom",
"]",
"[",
"c",
"]",
",",
"cl",
"[",
"rTo",
"]",
"[",
"c",
"]",
"if",
"fid",
"!=",
"-",
"1",
"and",
"tid",
"!=",
"-",
"1",
"and",
"fid",
"!=",
"tid",
":",
"if",
"(",
"fid",
",",
"tid",
")",
"not",
"in",
"used",
":",
"ret",
".",
"append",
"(",
"Relation",
"(",
"from_text",
"=",
"t",
".",
"cells",
"[",
"fid",
"]",
".",
"text",
",",
"to_text",
"=",
"t",
".",
"cells",
"[",
"tid",
"]",
".",
"text",
",",
"direction",
"=",
"DIR_VERT",
",",
"from_id",
"=",
"fid",
",",
"to_id",
"=",
"tid",
",",
"no_blanks",
"=",
"rTo",
"-",
"rFrom",
"-",
"1",
")",
")",
"used",
".",
"add",
"(",
"(",
"fid",
",",
"tid",
")",
")",
"loop",
"=",
"False",
"else",
":",
"if",
"fid",
"!=",
"-",
"1",
"and",
"tid",
"!=",
"-",
"1",
"and",
"fid",
"==",
"tid",
":",
"rFrom",
"=",
"rTo",
"rTo",
"+=",
"1",
"return",
"ret"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/eval.py#L89-L145 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/eval.py | python | json2Table | (json_obj, tid="", splitted_content=False) | return Table(row_n + 1, col_n + 1, cells, tid) | Construct a Table object from json object
Args:
json_obj: a json object
Returns:
a Table object | Construct a Table object from json object | [
"Construct",
"a",
"Table",
"object",
"from",
"json",
"object"
] | def json2Table(json_obj, tid="", splitted_content=False):
"""Construct a Table object from json object
Args:
json_obj: a json object
Returns:
a Table object
"""
jo = json_obj["cells"]
row_n, col_n = 0, 0
cells = []
for co in jo:
content = co["content"]
if content is None: continue
if splitted_content:
content = " ".join(content)
else:
content = content.strip()
if content == "": continue
start_row = co["start_row"]
end_row = co["end_row"]
start_col = co["start_col"]
end_col = co["end_col"]
row_n = max(row_n, end_row)
col_n = max(col_n, end_col)
cell = Chunk(content, (start_row, end_row, start_col, end_col))
cells.append(cell)
return Table(row_n + 1, col_n + 1, cells, tid) | [
"def",
"json2Table",
"(",
"json_obj",
",",
"tid",
"=",
"\"\"",
",",
"splitted_content",
"=",
"False",
")",
":",
"jo",
"=",
"json_obj",
"[",
"\"cells\"",
"]",
"row_n",
",",
"col_n",
"=",
"0",
",",
"0",
"cells",
"=",
"[",
"]",
"for",
"co",
"in",
"jo",
":",
"content",
"=",
"co",
"[",
"\"content\"",
"]",
"if",
"content",
"is",
"None",
":",
"continue",
"if",
"splitted_content",
":",
"content",
"=",
"\" \"",
".",
"join",
"(",
"content",
")",
"else",
":",
"content",
"=",
"content",
".",
"strip",
"(",
")",
"if",
"content",
"==",
"\"\"",
":",
"continue",
"start_row",
"=",
"co",
"[",
"\"start_row\"",
"]",
"end_row",
"=",
"co",
"[",
"\"end_row\"",
"]",
"start_col",
"=",
"co",
"[",
"\"start_col\"",
"]",
"end_col",
"=",
"co",
"[",
"\"end_col\"",
"]",
"row_n",
"=",
"max",
"(",
"row_n",
",",
"end_row",
")",
"col_n",
"=",
"max",
"(",
"col_n",
",",
"end_col",
")",
"cell",
"=",
"Chunk",
"(",
"content",
",",
"(",
"start_row",
",",
"end_row",
",",
"start_col",
",",
"end_col",
")",
")",
"cells",
".",
"append",
"(",
"cell",
")",
"return",
"Table",
"(",
"row_n",
"+",
"1",
",",
"col_n",
"+",
"1",
",",
"cells",
",",
"tid",
")"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/eval.py#L147-L174 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/model.py | python | Attention.forward | (self, x, y, mask) | return x | Shapes:
mask: [nodes/edges, edges/nodes]
q: [nodes/edges, h]
k: [edges/nodes, h]
v: [edges/nodes, h]
score: [nodes/edges, edges/nodes]
x_atten: [nodes/edges, h] | Shapes:
mask: [nodes/edges, edges/nodes]
q: [nodes/edges, h]
k: [edges/nodes, h]
v: [edges/nodes, h]
score: [nodes/edges, edges/nodes]
x_atten: [nodes/edges, h] | [
"Shapes",
":",
"mask",
":",
"[",
"nodes",
"/",
"edges",
"edges",
"/",
"nodes",
"]",
"q",
":",
"[",
"nodes",
"/",
"edges",
"h",
"]",
"k",
":",
"[",
"edges",
"/",
"nodes",
"h",
"]",
"v",
":",
"[",
"edges",
"/",
"nodes",
"h",
"]",
"score",
":",
"[",
"nodes",
"/",
"edges",
"edges",
"/",
"nodes",
"]",
"x_atten",
":",
"[",
"nodes",
"/",
"edges",
"h",
"]"
] | def forward(self, x, y, mask):
"""
Shapes:
mask: [nodes/edges, edges/nodes]
q: [nodes/edges, h]
k: [edges/nodes, h]
v: [edges/nodes, h]
score: [nodes/edges, edges/nodes]
x_atten: [nodes/edges, h]
"""
q = self.linear_q(x)
k = self.linear_k(y)
v = self.linear_v(y)
score = torch.mm(q, k.t()) / math.sqrt(self.size)
score = self.masked_softmax(score, mask, dim=1)
x_atten = torch.mm(score, v)
# dropout
x_atten = self.dropout(x_atten)
x = self.layer_norm_1(x + x_atten)
x_linear = self.feed_forward(x)
# dropout
x_linear = self.dropout(x_linear)
x = self.layer_norm_2(x + x_linear)
return x | [
"def",
"forward",
"(",
"self",
",",
"x",
",",
"y",
",",
"mask",
")",
":",
"q",
"=",
"self",
".",
"linear_q",
"(",
"x",
")",
"k",
"=",
"self",
".",
"linear_k",
"(",
"y",
")",
"v",
"=",
"self",
".",
"linear_v",
"(",
"y",
")",
"score",
"=",
"torch",
".",
"mm",
"(",
"q",
",",
"k",
".",
"t",
"(",
")",
")",
"/",
"math",
".",
"sqrt",
"(",
"self",
".",
"size",
")",
"score",
"=",
"self",
".",
"masked_softmax",
"(",
"score",
",",
"mask",
",",
"dim",
"=",
"1",
")",
"x_atten",
"=",
"torch",
".",
"mm",
"(",
"score",
",",
"v",
")",
"# dropout",
"x_atten",
"=",
"self",
".",
"dropout",
"(",
"x_atten",
")",
"x",
"=",
"self",
".",
"layer_norm_1",
"(",
"x",
"+",
"x_atten",
")",
"x_linear",
"=",
"self",
".",
"feed_forward",
"(",
"x",
")",
"# dropout",
"x_linear",
"=",
"self",
".",
"dropout",
"(",
"x_linear",
")",
"x",
"=",
"self",
".",
"layer_norm_2",
"(",
"x",
"+",
"x_linear",
")",
"return",
"x"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/model.py#L38-L61 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/graph.py | python | Vertex.__init__ | (self, vid: int, chunk: Chunk, tab_h, tab_w) | Args:
vid: Vertex id
chunk: the chunk to extract features
tab_h: height of the table (y-axis)
tab_w: width of the table (x-axis) | Args:
vid: Vertex id
chunk: the chunk to extract features
tab_h: height of the table (y-axis)
tab_w: width of the table (x-axis) | [
"Args",
":",
"vid",
":",
"Vertex",
"id",
"chunk",
":",
"the",
"chunk",
"to",
"extract",
"features",
"tab_h",
":",
"height",
"of",
"the",
"table",
"(",
"y",
"-",
"axis",
")",
"tab_w",
":",
"width",
"of",
"the",
"table",
"(",
"x",
"-",
"axis",
")"
] | def __init__(self, vid: int, chunk: Chunk, tab_h, tab_w):
"""
Args:
vid: Vertex id
chunk: the chunk to extract features
tab_h: height of the table (y-axis)
tab_w: width of the table (x-axis)
"""
self.vid = vid
self.tab_h = tab_h
self.tab_w = tab_w
self.chunk = chunk
self.features = self.get_features() | [
"def",
"__init__",
"(",
"self",
",",
"vid",
":",
"int",
",",
"chunk",
":",
"Chunk",
",",
"tab_h",
",",
"tab_w",
")",
":",
"self",
".",
"vid",
"=",
"vid",
"self",
".",
"tab_h",
"=",
"tab_h",
"self",
".",
"tab_w",
"=",
"tab_w",
"self",
".",
"chunk",
"=",
"chunk",
"self",
".",
"features",
"=",
"self",
".",
"get_features",
"(",
")"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/graph.py#L15-L27 |
||
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/train.py | python | patch_chunks | (dataset_folder) | return 1 | To patch the all chunk files of the train & test dataset that have the problem of duplicate last character
of the last cell in all chunk files
:param dataset_folder: train dataset path
:return: 1 | To patch the all chunk files of the train & test dataset that have the problem of duplicate last character
of the last cell in all chunk files
:param dataset_folder: train dataset path
:return: 1 | [
"To",
"patch",
"the",
"all",
"chunk",
"files",
"of",
"the",
"train",
"&",
"test",
"dataset",
"that",
"have",
"the",
"problem",
"of",
"duplicate",
"last",
"character",
"of",
"the",
"last",
"cell",
"in",
"all",
"chunk",
"files",
":",
"param",
"dataset_folder",
":",
"train",
"dataset",
"path",
":",
"return",
":",
"1"
] | def patch_chunks(dataset_folder):
"""
To patch the all chunk files of the train & test dataset that have the problem of duplicate last character
of the last cell in all chunk files
:param dataset_folder: train dataset path
:return: 1
"""
import os
import shutil
from pathlib import Path
shutil.move(os.path.join(dataset_folder, "chunk"), os.path.join(dataset_folder, "chunk-old"))
dir_ = Path(os.path.join(dataset_folder, "chunk-old"))
os.makedirs(os.path.join(dataset_folder, "chunk"), exist_ok=True)
for chunk_path in dir_.iterdir():
# print(chunk_path)
with open(str(chunk_path), encoding="utf-8") as f:
chunks = json.load(f)['chunks']
chunks[-1]['text'] = chunks[-1]['text'][:-1]
with open(str(chunk_path).replace("chunk-old", "chunk"), "w", encoding="utf-8") as ofile:
json.dump({"chunks": chunks}, ofile)
print("Input files patched, ready for the use")
return 1 | [
"def",
"patch_chunks",
"(",
"dataset_folder",
")",
":",
"import",
"os",
"import",
"shutil",
"from",
"pathlib",
"import",
"Path",
"shutil",
".",
"move",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataset_folder",
",",
"\"chunk\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"dataset_folder",
",",
"\"chunk-old\"",
")",
")",
"dir_",
"=",
"Path",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataset_folder",
",",
"\"chunk-old\"",
")",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataset_folder",
",",
"\"chunk\"",
")",
",",
"exist_ok",
"=",
"True",
")",
"for",
"chunk_path",
"in",
"dir_",
".",
"iterdir",
"(",
")",
":",
"# print(chunk_path)",
"with",
"open",
"(",
"str",
"(",
"chunk_path",
")",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"chunks",
"=",
"json",
".",
"load",
"(",
"f",
")",
"[",
"'chunks'",
"]",
"chunks",
"[",
"-",
"1",
"]",
"[",
"'text'",
"]",
"=",
"chunks",
"[",
"-",
"1",
"]",
"[",
"'text'",
"]",
"[",
":",
"-",
"1",
"]",
"with",
"open",
"(",
"str",
"(",
"chunk_path",
")",
".",
"replace",
"(",
"\"chunk-old\"",
",",
"\"chunk\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"ofile",
":",
"json",
".",
"dump",
"(",
"{",
"\"chunks\"",
":",
"chunks",
"}",
",",
"ofile",
")",
"print",
"(",
"\"Input files patched, ready for the use\"",
")",
"return",
"1"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/train.py#L146-L170 |