---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py in apply_op(self, op_type_name, name, **keywords)
434 preferred_dtype=default_dtype,
--> 435 as_ref=input_arg.is_ref)
436 if input_arg.number_attr and len(
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in internal_convert_n_to_tensor(values, dtype, name, as_ref, preferred_dtype)
801 as_ref=as_ref,
--> 802 preferred_dtype=preferred_dtype))
803 return ret
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
740 if ret is None:
--> 741 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
742
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)
613 "Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
--> 614 % (dtype.name, t.dtype.name, str(t)))
615 return t
ValueError: Tensor conversion requested dtype float32 for Tensor with dtype float64: 'Tensor("bidirectional_rnn/fw/fw/while/Identity_3:0", shape=(?, 20), dtype=float64)'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-9-feef21b21ace> in <module>()
8 inputs=encoder_inputs_embedded,
9 sequence_length=encoder_inputs_length,
---> 10 dtype=tf.float64, time_major=True)
11 )
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py in bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length, initial_state_fw, initial_state_bw, dtype, parallel_iterations, swap_memory, time_major, scope)
373 initial_state=initial_state_fw, dtype=dtype,
374 parallel_iterations=parallel_iterations, swap_memory=swap_memory,
--> 375 time_major=time_major, scope=fw_scope)
376
377 # Backward direction
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py in dynamic_rnn(cell, inputs, sequence_length, initial_state, dtype, parallel_iterations, swap_memory, time_major, scope)
572 swap_memory=swap_memory,
573 sequence_length=sequence_length,
--> 574 dtype=dtype)
575
576 # Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py in _dynamic_rnn_loop(cell, inputs, initial_state, parallel_iterations, swap_memory, sequence_length, dtype)
735 loop_vars=(time, output_ta, state),
736 parallel_iterations=parallel_iterations,
--> 737 swap_memory=swap_memory)
738
739 # Unpack final output if not using output tuples.
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name)
2768 context = WhileContext(parallel_iterations, back_prop, swap_memory, name)
2769 ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, context)
-> 2770 result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
2771 return result
2772
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py in BuildLoop(self, pred, body, loop_vars, shape_invariants)
2597 self.Enter()
2598 original_body_result, exit_vars = self._BuildLoop(
-> 2599 pred, body, original_loop_vars, loop_vars, shape_invariants)
2600 finally:
2601 self.Exit()
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py in _BuildLoop(self, pred, body, original_loop_vars, loop_vars, shape_invariants)
2547 structure=original_loop_vars,
2548 flat_sequence=vars_for_body_with_tensor_arrays)
-> 2549 body_result = body(*packed_vars_for_body)
2550 if not nest.is_sequence(body_result):
2551 body_result = [body_result]
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py in _time_step(time, output_ta_t, state)
718 call_cell=call_cell,
719 state_size=state_size,
--> 720 skip_conditionals=True)
721 else:
722 (output, new_state) = call_cell()
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py in _rnn_step(time, sequence_length, min_sequence_length, max_sequence_length, zero_output, state, call_cell, state_size, skip_conditionals)
204 # steps. This is faster when max_seq_len is equal to the number of unrolls
205 # (which is typical for dynamic_rnn).
--> 206 new_output, new_state = call_cell()
207 nest.assert_same_structure(state, new_state)
208 new_state = nest.flatten(new_state)
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py in <lambda>()
706
707 input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
--> 708 call_cell = lambda: cell(input_t, state)
709
710 if sequence_length is not None:
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py in __call__(self, inputs, state, scope)
178 with vs.variable_scope(vs.get_variable_scope(),
179 custom_getter=self._rnn_get_variable):
--> 180 return super(RNNCell, self).__call__(inputs, state)
181
182 def _rnn_get_variable(self, getter, *args, **kwargs):
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/layers/base.py in __call__(self, inputs, *args, **kwargs)
439 # Check input assumptions set after layer building, e.g. input shape.
440 self._assert_input_compatibility(inputs)
--> 441 outputs = self.call(inputs, *args, **kwargs)
442
443 # Apply activity regularization.
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py in call(self, inputs, state)
540 self._num_unit_shards))
541 # i = input_gate, j = new_input, f = forget_gate, o = output_gate
--> 542 lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)
543 i, j, f, o = array_ops.split(
544 value=lstm_matrix, num_or_size_splits=4, axis=1)
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py in _linear(args, output_size, bias, bias_initializer, kernel_initializer)
1019 res = math_ops.matmul(args[0], weights)
1020 else:
-> 1021 res = math_ops.matmul(array_ops.concat(args, 1), weights)
1022 if not bias:
1023 return res
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py in concat(values, axis, name)
1046 return gen_array_ops._concat_v2(values=values,
1047 axis=axis,
-> 1048 name=name)
1049
1050
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py in _concat_v2(values, axis, name)
493 """
494 result = _op_def_lib.apply_op("ConcatV2", values=values, axis=axis,
--> 495 name=name)
496 return result
497
/Users/lipingzhang/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py in apply_op(self, op_type_name, name, **keywords)
461 (prefix, dtype.name))
462 else:
--> 463 raise TypeError("%s that don't all match." % prefix)
464 else:
465 raise TypeError("%s that are invalid." % prefix)
TypeError: Tensors in list passed to 'values' of 'ConcatV2' Op have types [float32, float64] that don't all match.