Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ValueError: None values not supported. #2

Open
sirwh opened this issue Jan 21, 2020 · 0 comments
Open

ValueError: None values not supported. #2

sirwh opened this issue Jan 21, 2020 · 0 comments

Comments

@sirwh
Copy link

sirwh commented Jan 21, 2020

Hi, I ran the code in jupyter notebook like the following
!python3 train.py dummy.csv --num_class 5 --epochs 200 --batch_size 64

I got the following error:

Traceback (most recent call last):
  File "train.py", line 219, in <module>
    class_weight=params['class_weight']
  File "/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
    return func(*args, **kwargs)
  File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 1732, in fit_generator
    initial_epoch=initial_epoch)
  File "/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py", line 42, in fit_generator
    model._make_train_function()
  File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 316, in _make_train_function
    loss=self.total_loss)
  File "/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
    return func(*args, **kwargs)
  File "/usr/local/lib/python3.6/dist-packages/keras/optimizers.py", line 622, in get_updates
    p_t = p - lr_t * m_t / (u_t + self.epsilon)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py", line 888, in binary_op_wrapper
    y, dtype_hint=x.dtype.base_dtype, name="y")
  File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1145, in convert_to_tensor_v2
    as_ref=False)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1224, in internal_convert_to_tensor
    ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py", line 305, in _constant_tensor_conversion_function
    return constant(v, dtype=dtype, name=name)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py", line 246, in constant
    allow_broadcast=True)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py", line 284, in _constant_impl
    allow_broadcast=allow_broadcast))
  File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/tensor_util.py", line 454, in make_tensor_proto
    raise ValueError("None values not supported.")
ValueError: None values not supported.

I also tried to run the code step by step. After I got trainX and trainY, instead of using a generator, I just ran
model.fit(x=trainX, y=trainY, epochs=1, verbose=1)
but the error is the same:

ValueError                                Traceback (most recent call last)
<ipython-input-8-57446bf71ca4> in <module>
----> 1 model.fit(x=trainX, y=trainY, epochs=1, verbose=1)

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
   1211         else:
   1212             fit_inputs = x + y + sample_weights
-> 1213         self._make_train_function()
   1214         fit_function = self.train_function
   1215 

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in _make_train_function(self)
    314                     training_updates = self.optimizer.get_updates(
    315                         params=self._collected_trainable_weights,
--> 316                         loss=self.total_loss)
    317                 updates = self.updates + training_updates
    318 

/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/local/lib/python3.6/dist-packages/keras/optimizers.py in get_updates(self, loss, params)
    620             m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
    621             u_t = K.maximum(self.beta_2 * u, K.abs(g))
--> 622             p_t = p - lr_t * m_t / (u_t + self.epsilon)
    623 
    624             self.updates.append(K.update(m, m_t))

/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y)
    886         try:
    887           y = ops.convert_to_tensor_v2(
--> 888               y, dtype_hint=x.dtype.base_dtype, name="y")
    889         except TypeError:
    890           # If the RHS is not a tensor, it might be a tensor aware object

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in convert_to_tensor_v2(value, dtype, dtype_hint, name)
   1143       name=name,
   1144       preferred_dtype=dtype_hint,
-> 1145       as_ref=False)
   1146 
   1147 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx, accept_symbolic_tensors, accept_composite_tensors)
   1222 
   1223     if ret is None:
-> 1224       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1225 
   1226     if ret is NotImplemented:

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
    303                                          as_ref=False):
    304   _ = as_ref
--> 305   return constant(v, dtype=dtype, name=name)
    306 
    307 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
    244   """
    245   return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 246                         allow_broadcast=True)
    247 
    248 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
    282       tensor_util.make_tensor_proto(
    283           value, dtype=dtype, shape=shape, verify_shape=verify_shape,
--> 284           allow_broadcast=allow_broadcast))
    285   dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
    286   const_tensor = g.create_op(

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape, allow_broadcast)
    452   else:
    453     if values is None:
--> 454       raise ValueError("None values not supported.")
    455     # if dtype is provided, forces numpy array to be the type
    456     # provided if possible.

ValueError: None values not supported.

Could you please help.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant