Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import mpi4py.rc
mpi4py.rc.initialize = False
here = os.path.dirname(os.path.abspath(__file__))
top = os.path.dirname(os.path.dirname(os.path.dirname(here)))
sys.path.append(top)
BNAME = os.path.splitext(os.path.basename(__file__))[0]
from deephyper.benchmarks import util
timer = util.Timer()
timer.start('module loading')
from deephyper.benchmarks.util import TerminateOnTimeOut
print("using python:", sys.executable)
print("using deephyper lib:", os.path.abspath(util.__file__))
print("importing keras...")
from keras.models import Sequential, Model
from keras.layers.embeddings import Embedding
from keras.layers import Input, Activation, Dense, Permute, Dropout, add, dot, concatenate
from keras.layers import LSTM
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping
from functools import reduce
import tarfile
import numpy as np
import re
from keras import layers
from deephyper.benchmarks import keras_cmdline
import hashlib
Five digits inverted:
+ One layer LSTM (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
'''
import os
from pprint import pprint
import sys
here = os.path.dirname(os.path.abspath(__file__))
top = os.path.dirname(os.path.dirname(os.path.dirname(here)))
sys.path.append(top)
BNAME = os.path.splitext(os.path.basename(__file__))[0]
from deephyper.benchmarks import util
timer = util.Timer()
timer.start('module loading')
from deephyper.benchmarks.util import TerminateOnTimeOut
import numpy as np
from six.moves import range
from keras.models import Sequential
from keras import layers
from keras.models import load_model
from deephyper.benchmarks import keras_cmdline
from keras.callbacks import EarlyStopping
from numpy.random import seed
from tensorflow import set_random_seed
timer.end()
seed(1)
set_random_seed(2)
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]
timer.start('stage in')
if param_dict['data_source']:
data_source = param_dict['data_source']
else:
data_source = os.path.dirname(os.path.abspath(__file__))
data_source = os.path.join(data_source, 'data')
try:
paths = util.stage_in(['babi-tasks-v1-2.tar.gz'],
source=data_source,
dest=param_dict['stage_in_destination'])
path = paths['babi-tasks-v1-2.tar.gz']
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
print('Extracting stories for the challenge:', challenge_type)
with tarfile.open(path) as tar:
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
timer.end()
timer.start('preprocessing')
steps_per_epoch=steps_per_epoch, verbose=1,
validation_data=datagen.flow(x_test, y_test, batch_size=BATCH_SIZE),
validation_steps=10,
workers=1)
#validation_split=0.30,
#validation_data=(x_test, y_test),
timer.end()
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
if model_path:
timer.start('model save')
model.save(model_path)
util.save_meta_data(param_dict, model_mda_path)
timer.end()
end_time = time.time()
print('OUTPUT:', -score[1])
return -score[1]
def run(param_dict):
param_dict = keras_cmdline.fill_missing_defaults(augment_parser, param_dict)
pprint(param_dict)
timer.start('stage in')
if param_dict['data_source']:
data_source = param_dict['data_source']
else:
data_source = os.path.dirname(os.path.abspath(__file__))
data_source = os.path.join(data_source, 'data')
paths = util.stage_in(['dataset'], source=data_source, dest=param_dict['stage_in_destination'])
path = paths['dataset']
data = np.loadtxt(path)
training_x = data[:,0]
training_y = data[:,1]
n_pt = len(training_x)
timer.end()
timer.start('preprocessing')
penalty = param_dict['penalty']
epochs = param_dict['epochs']
if type(epochs) is not int:
print("converting epochs to int:", epochs)
epochs = int(epochs)
lr = param_dict['lr']
def augment_parser(parser):
parser.add_argument('--data_aug', action='store', dest='data_aug',
nargs='?', const=1, type=util.str2bool, default=False,
help='boolean. Whether to apply data augumentation?')
parser.add_argument('--num_conv', action='store', dest='num_conv',
nargs='?', const=2, type=int, default='2',
help='number of convolution layers')
parser.add_argument('--dim_capsule', action='store', dest='dim_capsule',
nargs='?', const=2, type=int, default='16',
help='dimension of capsule')
parser.add_argument('--routings', action='store', dest='routings',
nargs='?', const=2, type=int, default='3',
help='dimension of capsule')
parser.add_argument('--share_weights', action='store', dest='share_weights',
nargs='?', const=1, type=float, default=0.5,
help='float >= 0. Gradients will be clipped when their \
absolute value exceeds this value.')
# optimizer parameters
parser.add_argument('--learning_rate', action='store', dest='lr',
nargs='?', const=1, type=float, default=7.543875,
help='float >= 0. Learning rate')
parser.add_argument('--momentum', action='store', dest='momentum',
nargs='?', const=1, type=float, default=0.0,
help='float >= 0. Parameter updates momentum')
parser.add_argument('--decay', action='store', dest='decay',
nargs='?', const=1, type=float, default=0.0,
help='float >= 0. Learning rate decay over each update')
parser.add_argument('--nesterov', action='store', dest='nesterov',
nargs='?', const=1, type=util.str2bool, default=False,
help='boolean. Whether to apply Nesterov momentum?')
parser.add_argument('--rho', action='store', dest='rho',
nargs='?', const=1, type=float, default=0.9,
help='float >= 0')
parser.add_argument('--epsilon', action='store',
dest='epsilon',
nargs='?', const=1, type=float, default=1e-08,
help='float >= 0')
parser.add_argument('--beta1', action='store', dest='beta1',
nargs='?', const=1, type=float, default=0.9,
help='float >= 0')
parser.add_argument('--beta2', action='store', dest='beta2',
nargs='?', const=1, type=float, default=0.999,
help='float >= 0')
# model and data I/O options