Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dependancies #24

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,3 @@ dmypy.json

# Pyre type checker
.pyre/

# Working folder
working/
1 change: 0 additions & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ channels:
- conda-forge
- defaults
dependencies:
- deepchem=2.4.0
- numpy=1.19.2
- pandas=1.1.3
- pip=20.3.3
Expand Down
137 changes: 52 additions & 85 deletions gandy/models/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@
# deep learning imports
import deepchem
import tensorflow as tf
from tensorflow.keras.layers import Concatenate, Dense, Input
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Concatenate, Dense, Input, Flatten, Dropout, BatchNormalization

# typing imports
from typing import Tuple, Type
Expand Down Expand Up @@ -59,12 +58,12 @@ def __init__(self, xshape, yshape, noise_shape, **kwargs):
# base hyperparameters for generator and discirminator

Base_hyperparams = dict(layer_dimensions=[128],
dropout=0.05,
dropout=0.0,
activation='relu',
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer='l2',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
Expand Down Expand Up @@ -102,6 +101,38 @@ def __init__(self, xshape, yshape, noise_shape, **kwargs):
# Deepchem init function + class atributes.
super(DCGAN, self).__init__(**kwargs)

@classmethod
def _create_DNN(cls, input_layer, kwargs):

# get hyperparameters from kwargs
layer_dimensions = kwargs.get('layer_dimensions', [128])
dropout = kwargs.get('dropout', 0.05)
batch_norm = kwargs.get('batch_norm', True)
# every other kwarg is for the layers
layer_kwargs = {key: kwargs[key] for key in kwargs.keys()
- {'layer_dimensions', 'dropout'}}
# handle activation, which may be a function that cannot
# be passed to Dense
activation = layer_kwargs.pop('activation')

# build first layer of network
dnn = Flatten()(input_layer)
# build subsequent layers
for layer_dim in layer_dimensions:
if type(activation) == str:
dnn = Dense(layer_dim, activation=activation, **layer_kwargs)(dnn)
else:
dnn = Dense(layer_dim, **layer_kwargs)(dnn)

if batch_norm:
dnn = BatchNormalization()(dnn)

if type(activation) != str:
dnn = activation(dnn)
dnn = Dropout(dropout)(dnn)

return dnn

def create_generator(self):
"""
Create the generator as a keras model.
Expand Down Expand Up @@ -145,29 +176,15 @@ def create_generator(self):

"""
# adapted from deepchem tutorial 14:

kwargs = self.generator_hyperparameters

# get hyperparameters from kwargs
layer_dimensions = kwargs.get('layer_dimensions', [128])
dropout = kwargs.get('dropout', 0.05)
# every other kwarg is for the layers
layer_kwargs = {key: kwargs[key] for key in kwargs.keys()
- {'layer_dimensions', 'dropout'}}

# construct input
noise_in = Input(shape=self.get_noise_input_shape())
# build first layer of network
gen = Dense(layer_dimensions[0], **layer_kwargs)(noise_in)
# adding dropout to the weights
gen = Dropout(dropout)(gen)
# build subsequent layers
for layer_dim in layer_dimensions[1:]:
gen = Dense(layer_dim, **layer_kwargs)(gen)
gen = Dropout(dropout)(gen)


gen = DCGAN._create_DNN(noise_in, kwargs)

# generator outputs
gen = Dense(self.yshape[0], **layer_kwargs)(gen)
gen = Dense(self.yshape[0])(gen)

# final construction of Keras model
generator = tf.keras.Model(inputs=[noise_in],
Expand Down Expand Up @@ -221,29 +238,14 @@ def create_discriminator(self):

kwargs = self.discriminator_hyperparameters

# get hyperparameters from kwargs
layer_dimensions = kwargs.get('layer_dimensions', [128])
dropout = kwargs.get('dropout', 0.05)
# every other kwarg is for the layers
layer_kwargs = {key: kwargs[key] for key in kwargs.keys()
- {'layer_dimensions', 'dropout'}}

# construct input
data_in = Input(shape=self.yshape)
# build first layer of network
discrim = Dense(layer_dimensions[0], **layer_kwargs)(data_in)
# adding dropout to the weights
discrim = Dropout(dropout)(discrim)
# build subsequent layers
for layer_dim in layer_dimensions[1:]:
discrim = Dense(layer_dim, **layer_kwargs)(discrim)
discrim = Dropout(dropout)(discrim)

# To maintain the interpretation of a probability,
# the final activation function is not a kwarg
final_layer_kwargs = layer_kwargs.copy()
final_layer_kwargs.update(activation='sigmoid')
discrim_prob = Dense(1, **final_layer_kwargs)(discrim)

# the body of the model
discrim = DCGAN._create_DNN(data_in, kwargs)

# last layer
discrim_prob = Dense(1, activation='sigmoid')(discrim)

# final construction of Keras model
discriminator = tf.keras.Model(inputs=[data_in],
Expand Down Expand Up @@ -462,29 +464,16 @@ def create_generator(self):

kwargs = self.generator_hyperparameters

# get hyperparameters from kwargs
layer_dimensions = kwargs.get('layer_dimensions', [128])
dropout = kwargs.get('dropout', 0.05)
# every other kwarg is for the layers
layer_kwargs = {key: kwargs[key] for key in kwargs.keys()
- {'layer_dimensions', 'dropout'}}

# construct input
noise_in = Input(shape=self.get_noise_input_shape())
conditional_in = Input(shape=self.xshape)
gen_input = Concatenate()([noise_in, conditional_in])

# build first layer of network
gen = Dense(layer_dimensions[0], **layer_kwargs)(gen_input)
# adding dropout to the weights
gen = Dropout(dropout)(gen)
# build subsequent layers
for layer_dim in layer_dimensions[1:]:
gen = Dense(layer_dim, **layer_kwargs)(gen)
gen = Dropout(dropout)(gen)
# get the body
gen = DCGAN._create_DNN(gen_input, kwargs)

# generator outputs
gen = Dense(self.yshape[0], **layer_kwargs)(gen)
gen = Dense(self.yshape[0])(gen)

# final construction of Keras model
generator = tf.keras.Model(inputs=[noise_in, conditional_in],
Expand Down Expand Up @@ -535,39 +524,17 @@ def create_discriminator(self):

"""
# adapted from deepchem tutorial 14:

kwargs = self.discriminator_hyperparameters

# get hyperparameters from kwargs
layer_dimensions = kwargs.get('layer_dimensions', [128])
dropout = kwargs.get('dropout', 0.05)
# every other kwarg is for the layers
layer_kwargs = {key: kwargs[key] for key in kwargs.keys()
- {'layer_dimensions', 'dropout'}}
# removing activation to implemetn LeakyReLU
# layer_kwargs.update(activation=None)

# construct input
data_in = Input(shape=self.yshape)
conditional_in = Input(shape=self.xshape,)
discrim_input = Concatenate()([data_in, conditional_in])

# build first layer of network
discrim = Dense(layer_dimensions[0], **layer_kwargs)(discrim_input)
# discrim = LeakyReLU()(discrim)
# adding dropout to the weights
discrim = Dropout(dropout)(discrim)
# build subsequent layers
for layer_dim in layer_dimensions[1:]:
discrim = Dense(layer_dim, **layer_kwargs)(discrim)
# discrim = LeakyReLU()(discrim)
discrim = Dropout(dropout)(discrim)

# To maintain the interpretation of a probability,
# the final activation function is not a kwarg
final_layer_kwargs = layer_kwargs.copy()
final_layer_kwargs.update(activation='sigmoid')
discrim_prob = Dense(1, **final_layer_kwargs)(discrim)
# body
discrim = DCGAN._create_DNN(discrim_input, kwargs)

discrim_prob = Dense(1, activation='sigmoid')(discrim)

# final construction of Keras model
discriminator = tf.keras.Model(inputs=[data_in, conditional_in],
Expand Down
2 changes: 1 addition & 1 deletion gandy/models/gans.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def _train(self,
# train GAN on data
# self.model = deepchem GAN instance
# generator + discriminator losses
losses = self._model.fit_gan(self.iterbatches(Xs, Ys, batches))
losses = self._model.fit_gan(self.iterbatches(Xs, Ys, batches), **kwargs)
# compute metric here
if metric is not None:
losses[0] = metric(losses[0]) # gen
Expand Down
2 changes: 1 addition & 1 deletion gandy/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def train(self,
if session is not None:
sname = session
else:
sname = 'Starttime: ' + str(time.clock())
sname = 'Starttime: ' + str(time.time())
metric = self._get_metric(metric)

Xs_, Ys_ = self.check(Xs, Ys)
Expand Down
2 changes: 1 addition & 1 deletion gandy/tests/test_models/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def test_train(self, mocked__build):
)
subject._get_metric = mocked__get_metric
# run the train and check proper calls
with unittest.mock.patch('time.clock', return_value='thetime'
with unittest.mock.patch('time.time', return_value='thetime'
) as mocked_time:
# first specify a session name
subject.train(Xs_in, Ys_in,
Expand Down
Empty file removed working/Evan/notes.md
Empty file.
Empty file removed working/Kyle/notes.md
Empty file.
Empty file removed working/Sam/notes.md
Empty file.
Empty file removed working/Yu-Chi/notes.md
Empty file.
Empty file removed working/Yuxuan/notes.md
Empty file.
Loading