The following issues were found
keras/utils/multi_gpu_utils.py
81 issues
Line: 17
Column: 1
# ==============================================================================
"""Utilities for multi-gpu training."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine.training import Model
from keras.layers.core.lambda_layer import Lambda
from keras.layers.merge import concatenate
Reported by Pylint.
Line: 200
Column: 1
# Relocate the model definition under CPU device scope if needed
if cpu_relocation:
from keras.models import clone_model # pylint: disable=g-import-not-at-top
with tf.device('/cpu:0'):
model = clone_model(model)
all_outputs = [[] for _ in range(len(model.outputs))]
Reported by Pylint.
Line: 25
Column: 1
def _get_available_devices():
return [x.name for x in backend.get_session().list_devices()]
def _normalize_device_name(name):
name = '/' + name.lower().split('device:')[1]
return name
Reported by Pylint.
Line: 29
Column: 1
def _normalize_device_name(name):
name = '/' + name.lower().split('device:')[1]
return name
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
"""Replicates a model on different GPUs.
Reported by Pylint.
Line: 30
Column: 1
def _normalize_device_name(name):
name = '/' + name.lower().split('device:')[1]
return name
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
"""Replicates a model on different GPUs.
Reported by Pylint.
Line: 33
Column: 1
return name
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
Reported by Pylint.
Line: 33
Column: 1
return name
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
Reported by Pylint.
Line: 33
Column: 1
return name
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
Reported by Pylint.
Line: 34
Column: 1
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
- Divide the model's input(s) into multiple sub-batches.
Reported by Pylint.
Line: 146
Column: 1
Raises:
ValueError: if the `gpus` argument does not match available devices.
"""
if isinstance(gpus, (list, tuple)):
if len(gpus) <= 1:
raise ValueError('For multi-gpu usage to be effective, '
'call `multi_gpu_model` with `len(gpus) >= 2`. '
'Received: `gpus=%s`' % gpus)
num_gpus = len(gpus)
Reported by Pylint.
keras/engine/training_utils.py
81 issues
Line: 17
Column: 1
# ==============================================================================
"""Training-related utilities."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras.utils import generic_utils
Reported by Pylint.
Line: 24
Column: 1
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
Reported by Pylint.
Line: 41
Column: 1
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
Reported by Pylint.
Line: 42
Column: 1
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
Reported by Pylint.
Line: 43
Column: 1
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [tf.concat(x, axis=0) for x in entries]
Reported by Pylint.
Line: 44
Column: 1
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [tf.concat(x, axis=0) for x in entries]
else:
Reported by Pylint.
Line: 45
Column: 1
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [tf.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
Reported by Pylint.
Line: 46
Column: 1
converted_to_list = True
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [tf.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
Reported by Pylint.
Line: 47
Column: 1
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [tf.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
Reported by Pylint.
Line: 48
Column: 1
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [tf.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
Reported by Pylint.
keras/optimizer_v2/utils.py
80 issues
Line: 17
Column: 1
# ==============================================================================
"""Optimizer utilities."""
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
def all_reduce_sum_gradients(grads_and_vars):
"""Returns all-reduced gradients aggregated via summation.
Reported by Pylint.
Line: 18
Column: 1
"""Optimizer utilities."""
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
def all_reduce_sum_gradients(grads_and_vars):
"""Returns all-reduced gradients aggregated via summation.
Reported by Pylint.
Line: 38
Column: 3
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads)
else:
# TODO(b/183257003): Remove this branch
reduced = tf.distribute.get_replica_context().merge_call(
_all_reduce_sum_fn, args=(filtered_grads_and_vars,))
else:
reduced = []
# Copy 'reduced' but add None gradients back in
Reported by Pylint.
Line: 22
Column: 1
def all_reduce_sum_gradients(grads_and_vars):
"""Returns all-reduced gradients aggregated via summation.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
Reported by Pylint.
Line: 30
Column: 1
Returns:
List of (gradient, variable) pairs where gradients have been all-reduced.
"""
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
if strategy_supports_no_merge_call():
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
Reported by Pylint.
Line: 31
Column: 1
List of (gradient, variable) pairs where gradients have been all-reduced.
"""
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
if strategy_supports_no_merge_call():
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads)
Reported by Pylint.
Line: 32
Column: 1
"""
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
if strategy_supports_no_merge_call():
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads)
else:
Reported by Pylint.
Line: 33
Column: 1
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
if strategy_supports_no_merge_call():
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads)
else:
# TODO(b/183257003): Remove this branch
Reported by Pylint.
Line: 34
Column: 1
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
if strategy_supports_no_merge_call():
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads)
else:
# TODO(b/183257003): Remove this branch
reduced = tf.distribute.get_replica_context().merge_call(
Reported by Pylint.
Line: 35
Column: 1
if filtered_grads_and_vars:
if strategy_supports_no_merge_call():
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads)
else:
# TODO(b/183257003): Remove this branch
reduced = tf.distribute.get_replica_context().merge_call(
_all_reduce_sum_fn, args=(filtered_grads_and_vars,))
Reported by Pylint.
keras/activations.py
80 issues
Line: 17
Column: 1
# ==============================================================================
"""Built-in activation functions."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.layers import advanced_activations
from keras.utils.generic_utils import deserialize_keras_object
from keras.utils.generic_utils import serialize_keras_object
Reported by Pylint.
Line: 23
Column: 1
from keras.layers import advanced_activations
from keras.utils.generic_utils import deserialize_keras_object
from keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.util.tf_export import keras_export
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
Reported by Pylint.
Line: 23
Column: 1
from keras.layers import advanced_activations
from keras.utils.generic_utils import deserialize_keras_object
from keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.util.tf_export import keras_export
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
Reported by Pylint.
Line: 40
Column: 1
@keras_export('keras.activations.softmax')
@tf.__internal__.dispatch.add_dispatch_support
def softmax(x, axis=-1):
"""Softmax converts a vector of values to a probability distribution.
The elements of the output vector are in range (0, 1) and sum to 1.
Each vector is handled independently. The `axis` argument sets which axis
Reported by Pylint.
Line: 41
Column: 1
@keras_export('keras.activations.softmax')
@tf.__internal__.dispatch.add_dispatch_support
def softmax(x, axis=-1):
"""Softmax converts a vector of values to a probability distribution.
The elements of the output vector are in range (0, 1) and sum to 1.
Each vector is handled independently. The `axis` argument sets which axis
of the input the function is applied along.
Reported by Pylint.
Line: 78
Column: 1
>>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax)
"""
if x.shape.rank > 1:
if isinstance(axis, int):
output = tf.nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
Reported by Pylint.
Line: 79
Column: 1
>>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax)
"""
if x.shape.rank > 1:
if isinstance(axis, int):
output = tf.nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
s = tf.reduce_sum(e, axis=axis, keepdims=True)
Reported by Pylint.
Line: 80
Column: 1
"""
if x.shape.rank > 1:
if isinstance(axis, int):
output = tf.nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
s = tf.reduce_sum(e, axis=axis, keepdims=True)
output = e / s
Reported by Pylint.
Line: 81
Column: 1
if x.shape.rank > 1:
if isinstance(axis, int):
output = tf.nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
s = tf.reduce_sum(e, axis=axis, keepdims=True)
output = e / s
else:
Reported by Pylint.
Line: 83
Column: 1
output = tf.nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
s = tf.reduce_sum(e, axis=axis, keepdims=True)
output = e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D. '
f'Received input: {x}')
Reported by Pylint.
keras/layers/noise.py
80 issues
Line: 17
Column: 1
# ==============================================================================
"""Layers that operate regularization via the addition of noise."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras import backend
from keras.engine.base_layer import Layer
Reported by Pylint.
Line: 24
Column: 1
from keras import backend
from keras.engine.base_layer import Layer
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.GaussianNoise')
class GaussianNoise(Layer):
"""Apply additive zero-centered Gaussian noise.
Reported by Pylint.
Line: 60
Column: 3
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
return inputs + backend.random_normal(
shape=tf.shape(inputs),
mean=0.,
Reported by Pylint.
Line: 111
Column: 3
self.supports_masking = True
self.rate = rate
def call(self, inputs, training=None):
if 0 < self.rate < 1:
def noised():
stddev = np.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random_normal(
Reported by Pylint.
Line: 175
Column: 3
def _get_noise_shape(self, inputs):
return self.noise_shape if self.noise_shape else tf.shape(inputs)
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring
alpha = 1.6732632423543772848170429916717
Reported by Pylint.
Line: 29
Column: 1
@keras_export('keras.layers.GaussianNoise')
class GaussianNoise(Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
Reported by Pylint.
Line: 55
Column: 1
Same shape as input.
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
Reported by Pylint.
Line: 56
Column: 1
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
Reported by Pylint.
Line: 56
Column: 5
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
Reported by Pylint.
Line: 57
Column: 1
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
Reported by Pylint.
keras/layers/core/dense.py
78 issues
Line: 16
Column: 1
# limitations under the License.
# ==============================================================================
"""Contains the Dense layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import activations
from keras import backend as K
from keras import constraints
from keras import initializers
Reported by Pylint.
Line: 16
Column: 1
# limitations under the License.
# ==============================================================================
"""Contains the Dense layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import activations
from keras import backend as K
from keras import constraints
from keras import initializers
Reported by Pylint.
Line: 25
Column: 1
from keras import regularizers
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Dense')
Reported by Pylint.
Line: 27
Column: 1
from keras.engine.input_spec import InputSpec
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
Reported by Pylint.
Line: 143
Column: 5
'should be defined. Found None. '
f'Full input shape received: {input_shape}')
self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
Reported by Pylint.
Line: 152
Column: 7
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
Reported by Pylint.
Line: 161
Column: 7
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
if inputs.dtype.base_dtype != self._compute_dtype_object.base_dtype:
inputs = tf.cast(inputs, dtype=self._compute_dtype_object)
Reported by Pylint.
Line: 164
Column: 3
self.bias = None
self.built = True
def call(self, inputs):
if inputs.dtype.base_dtype != self._compute_dtype_object.base_dtype:
inputs = tf.cast(inputs, dtype=self._compute_dtype_object)
if isinstance(inputs, tf.RaggedTensor) and inputs.shape[-1] is not None:
# In case we encounter a RaggedTensor with a fixed last dimension (last
Reported by Pylint.
Line: 31
Column: 1
@keras_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
Reported by Pylint.
Line: 32
Column: 1
@keras_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
Reported by Pylint.
keras/saving/saved_model/json_utils.py
78 issues
Line: 24
Column: 1
input if the given shape is a tuple.
"""
import tensorflow.compat.v2 as tf
import collections
import enum
import json
import numpy as np
Reported by Pylint.
Line: 31
Column: 1
import json
import numpy as np
import wrapt
from tensorflow.python.framework import type_spec
class Encoder(json.JSONEncoder):
"""JSON encoder and decoder that handles TensorShapes and tuples."""
Reported by Pylint.
Line: 37
Column: 3
class Encoder(json.JSONEncoder):
"""JSON encoder and decoder that handles TensorShapes and tuples."""
def default(self, obj): # pylint: disable=method-hidden
"""Encodes objects for types that aren't handled by the default encoder."""
if isinstance(obj, tf.TensorShape):
items = obj.as_list() if obj.rank is not None else None
return {'class_name': 'TensorShape', 'items': items}
return get_json_type(obj)
Reported by Pylint.
Line: 44
Column: 3
return {'class_name': 'TensorShape', 'items': items}
return get_json_type(obj)
def encode(self, obj):
return super(Encoder, self).encode(_encode_tuple(obj))
def _encode_tuple(x):
if isinstance(x, tuple):
Reported by Pylint.
Line: 135
Column: 7
return {'class_name': 'TypeSpec', 'type_spec': type_spec_name,
'serialized': obj._serialize()} # pylint: disable=protected-access
except ValueError:
raise ValueError(
f'Unable to serialize {obj} to JSON, because the TypeSpec '
f'class {type(obj)} has not been registered.')
if isinstance(obj, enum.Enum):
return obj.value
Reported by Pylint.
Line: 26
Column: 1
import tensorflow.compat.v2 as tf
import collections
import enum
import json
import numpy as np
import wrapt
from tensorflow.python.framework import type_spec
Reported by Pylint.
Line: 27
Column: 1
import tensorflow.compat.v2 as tf
import collections
import enum
import json
import numpy as np
import wrapt
from tensorflow.python.framework import type_spec
Reported by Pylint.
Line: 28
Column: 1
import collections
import enum
import json
import numpy as np
import wrapt
from tensorflow.python.framework import type_spec
Reported by Pylint.
Line: 35
Column: 1
class Encoder(json.JSONEncoder):
"""JSON encoder and decoder that handles TensorShapes and tuples."""
def default(self, obj): # pylint: disable=method-hidden
"""Encodes objects for types that aren't handled by the default encoder."""
if isinstance(obj, tf.TensorShape):
items = obj.as_list() if obj.rank is not None else None
Reported by Pylint.
Line: 37
Column: 1
class Encoder(json.JSONEncoder):
"""JSON encoder and decoder that handles TensorShapes and tuples."""
def default(self, obj): # pylint: disable=method-hidden
"""Encodes objects for types that aren't handled by the default encoder."""
if isinstance(obj, tf.TensorShape):
items = obj.as_list() if obj.rank is not None else None
return {'class_name': 'TensorShape', 'items': items}
return get_json_type(obj)
Reported by Pylint.
keras/distribute/dataset_creator_model_fit_ps_only_test.py
78 issues
Line: 21
Column: 1
from keras import callbacks as callbacks_lib
from keras.distribute import dataset_creator_model_fit_test_base as test_base
from keras.distribute import strategy_combinations
import tensorflow.compat.v2 as tf
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.parameter_server_strategies_multi_worker,
Reported by Pylint.
Line: 51
Column: 9
model = self._model_fit(strategy, use_dataset_creator=use_dataset_creator)
strategy = model.distribute_strategy
self.assertIs(
strategy._cluster_coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy))
def testModelFitErrorOnBatchLevelCallbacks(self, strategy,
use_dataset_creator):
Reported by Pylint.
Line: 29
Column: 1
strategy=strategy_combinations.parameter_server_strategies_multi_worker,
use_dataset_creator=[True, False],
mode="eager"))
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
test_base.DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
Reported by Pylint.
Line: 32
Column: 3
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
test_base.DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
Reported by Pylint.
Line: 32
Column: 3
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
test_base.DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
Reported by Pylint.
Line: 32
Column: 1
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
test_base.DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
Reported by Pylint.
Line: 33
Column: 1
test_base.DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
Reported by Pylint.
Line: 36
Column: 1
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
def testModelPredict(self, strategy, use_dataset_creator):
if use_dataset_creator:
self.skipTest("Unused option.")
Reported by Pylint.
Line: 39
Column: 3
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
def testModelPredict(self, strategy, use_dataset_creator):
if use_dataset_creator:
self.skipTest("Unused option.")
model, _ = self._model_compile(strategy)
test_data = tf.data.Dataset.from_tensor_slices(
[[1.], [2.], [3.], [1.], [5.], [1.]]).repeat().batch(2)
Reported by Pylint.
Line: 39
Column: 3
self._model_fit(
strategy, run_eagerly=True, use_dataset_creator=use_dataset_creator)
def testModelPredict(self, strategy, use_dataset_creator):
if use_dataset_creator:
self.skipTest("Unused option.")
model, _ = self._model_compile(strategy)
test_data = tf.data.Dataset.from_tensor_slices(
[[1.], [2.], [3.], [1.], [5.], [1.]]).repeat().batch(2)
Reported by Pylint.
keras/feature_column/sequence_feature_column_integration_test.py
78 issues
Line: 21
Column: 1
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
Reported by Pylint.
Line: 24
Column: 1
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import test_util
from keras import backend
Reported by Pylint.
Line: 26
Column: 1
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import test_util
from keras import backend
from keras.feature_column import dense_features
from keras.feature_column import sequence_feature_column as ksfc
Reported by Pylint.
Line: 27
Column: 1
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import test_util
from keras import backend
from keras.feature_column import dense_features
from keras.feature_column import sequence_feature_column as ksfc
from keras.layers import merge
Reported by Pylint.
Line: 28
Column: 1
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import test_util
from keras import backend
from keras.feature_column import dense_features
from keras.feature_column import sequence_feature_column as ksfc
from keras.layers import merge
from keras.layers import recurrent
Reported by Pylint.
Line: 36
Column: 1
from keras.layers import recurrent
class SequenceFeatureColumnIntegrationTest(tf.test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
Reported by Pylint.
Line: 38
Column: 1
class SequenceFeatureColumnIntegrationTest(tf.test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
Reported by Pylint.
Line: 38
Column: 3
class SequenceFeatureColumnIntegrationTest(tf.test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
Reported by Pylint.
Line: 39
Column: 1
class SequenceFeatureColumnIntegrationTest(tf.test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
Reported by Pylint.
Line: 40
Column: 1
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
Reported by Pylint.
keras/distribute/keras_image_model_correctness_test.py
77 issues
Line: 17
Column: 1
# ==============================================================================
"""Correctness tests for tf.keras CNN models using DistributionStrategy."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import testing_utils
from keras.distribute import keras_correctness_test_base
Reported by Pylint.
Line: 30
Column: 1
'Uses Dense layers, which call matmul. Even if Dense layers run in '
'float64, the test sometimes fails with TensorFloat-32 enabled for unknown '
'reasons')
class DistributionStrategyCnnCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
Reported by Pylint.
Line: 33
Column: 3
class DistributionStrategyCnnCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
Reported by Pylint.
Line: 88
Column: 15
def get_data(self):
x_train, y_train = self._get_data(
count=keras_correctness_test_base._GLOBAL_BATCH_SIZE *
keras_correctness_test_base._EVAL_STEPS)
x_predict = x_train
return x_train, y_train, x_predict
def get_data_with_partial_last_batch_eval(self):
Reported by Pylint.
Line: 89
Column: 9
def get_data(self):
x_train, y_train = self._get_data(
count=keras_correctness_test_base._GLOBAL_BATCH_SIZE *
keras_correctness_test_base._EVAL_STEPS)
x_predict = x_train
return x_train, y_train, x_predict
def get_data_with_partial_last_batch_eval(self):
x_train, y_train = self._get_data(count=1280)
Reported by Pylint.
Line: 30
Column: 1
'Uses Dense layers, which call matmul. Even if Dense layers run in '
'float64, the test sometimes fails with TensorFloat-32 enabled for unknown '
'reasons')
class DistributionStrategyCnnCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
Reported by Pylint.
Line: 33
Column: 1
class DistributionStrategyCnnCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
Reported by Pylint.
Line: 37
Column: 1
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1',
filters=16,
Reported by Pylint.
Line: 38
Column: 1
distribution=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1',
filters=16,
kernel_size=(3, 3),
Reported by Pylint.
Line: 39
Column: 1
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1',
filters=16,
kernel_size=(3, 3),
strides=(4, 4),
Reported by Pylint.