The following issues were found
keras/layers/normalization/batch_normalization.py
539 issues
Line: 17
Column: 1
# ==============================================================================
"""The V2 implementation of Normalization layers."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
Reported by Pylint.
Line: 25
Column: 1
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import control_flow_util
from tensorflow.python.ops.control_flow_ops import get_enclosing_xla_context
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
class BatchNormalizationBase(Layer):
Reported by Pylint.
Line: 26
Column: 1
from keras.engine.input_spec import InputSpec
from keras.utils import control_flow_util
from tensorflow.python.ops.control_flow_ops import get_enclosing_xla_context
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
class BatchNormalizationBase(Layer):
r"""Layer that normalizes its inputs.
Reported by Pylint.
Line: 27
Column: 1
from keras.utils import control_flow_util
from tensorflow.python.ops.control_flow_ops import get_enclosing_xla_context
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
class BatchNormalizationBase(Layer):
r"""Layer that normalizes its inputs.
Reported by Pylint.
Line: 617
Column: 1
train_op = _fused_batch_norm_training
if use_fused_avg_updates and input_batch_size is not None:
# pylint: disable=g-long-lambda
train_op = lambda: control_flow_util.smart_cond(
input_batch_size > 0, _fused_batch_norm_training,
_fused_batch_norm_training_empty)
# pylint: enable=g-long-lambda
Reported by Pylint.
Line: 621
Column: 1
train_op = lambda: control_flow_util.smart_cond(
input_batch_size > 0, _fused_batch_norm_training,
_fused_batch_norm_training_empty)
# pylint: enable=g-long-lambda
output, mean, variance = control_flow_util.smart_cond(
training, train_op, _fused_batch_norm_inference)
variance = _maybe_add_or_remove_bessels_correction(variance, remove=True)
Reported by Pylint.
Line: 811
Column: 1
# Determine a boolean value for `training`: could be True, False, or None.
training_value = control_flow_util.constant_value(training)
if training_value == False: # pylint: disable=singleton-comparison,g-explicit-bool-comparison
mean, variance = self.moving_mean, self.moving_variance
else:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(tf.shape(inputs))
# Adjust only during training.
Reported by Pylint.
Line: 962
Column: 1
return dict(list(base_config.items()) + list(config.items()))
# pylint: disable=g-classes-have-attributes
@keras_export('keras.layers.experimental.SyncBatchNormalization', v1=[])
class SyncBatchNormalization(BatchNormalizationBase):
r"""Normalize and scale inputs or activations synchronously across replicas.
Applies batch normalization to activations of the previous layer at each batch
Reported by Pylint.
Line: 253
Column: 3
if self.adjustment is not None:
raise ValueError('Passing `fused=True` is not supported when '
'`adjustment` is specified.')
# TODO(reedwm): Support fp64 in FusedBatchNorm then remove this check.
if self._compute_dtype not in ('float16', 'bfloat16', 'float32', None):
raise ValueError(
'Passing `fused=True` is only supported when the compute '
'dtype is float16, bfloat16, or float32. Got dtype: %s' %
(self._compute_dtype,))
Reported by Pylint.
Line: 287
Column: 3
if not tf.distribute.has_strategy():
return False
strategy = tf.distribute.get_strategy()
# TODO(b/195085185): remove experimental_enable_get_next_as_optional after
# migrating all users.
return getattr(
strategy.extended, 'enable_partial_batch_handling',
getattr(strategy.extended, 'experimental_enable_get_next_as_optional',
False))
Reported by Pylint.
keras/mixed_precision/loss_scale_optimizer.py
533 issues
Line: 23
Column: 1
from keras.optimizer_v2 import optimizer_v2
from keras.optimizer_v2 import utils as optimizer_utils
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as legacy_optimizer
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
Reported by Pylint.
Line: 24
Column: 1
from keras.optimizer_v2 import utils as optimizer_utils
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as legacy_optimizer
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
Reported by Pylint.
Line: 25
Column: 1
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as legacy_optimizer
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
class _UnwrapPreventer:
Reported by Pylint.
Line: 26
Column: 1
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as legacy_optimizer
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
class _UnwrapPreventer:
"""Wrapper that DistributionStrategy will not unwrap.
Reported by Pylint.
Line: 27
Column: 1
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 as legacy_optimizer
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
class _UnwrapPreventer:
"""Wrapper that DistributionStrategy will not unwrap.
Reported by Pylint.
Line: 373
Column: 1
_DEFAULT_GROWTH_STEPS = 2000
# pylint: disable=g-classes-have-attributes
@keras_export('keras.mixed_precision.LossScaleOptimizer')
class LossScaleOptimizer(_DelegatingTrackableMixin, optimizer_v2.OptimizerV2):
"""An optimizer that applies loss scaling to prevent numeric underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
Reported by Pylint.
Line: 375
Column: 1
# pylint: disable=g-classes-have-attributes
@keras_export('keras.mixed_precision.LossScaleOptimizer')
class LossScaleOptimizer(_DelegatingTrackableMixin, optimizer_v2.OptimizerV2):
"""An optimizer that applies loss scaling to prevent numeric underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
gradients when float16 is used. To prevent underflow, the loss is multiplied
(or "scaled") by a certain factor called the "loss scale", which causes
Reported by Pylint.
Line: 375
Column: 1
# pylint: disable=g-classes-have-attributes
@keras_export('keras.mixed_precision.LossScaleOptimizer')
class LossScaleOptimizer(_DelegatingTrackableMixin, optimizer_v2.OptimizerV2):
"""An optimizer that applies loss scaling to prevent numeric underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
gradients when float16 is used. To prevent underflow, the loss is multiplied
(or "scaled") by a certain factor called the "loss scale", which causes
Reported by Pylint.
Line: 500
Column: 3
_HAS_AGGREGATE_GRAD = True
def __init__(self, inner_optimizer, dynamic=True, initial_scale=None,
dynamic_growth_steps=None):
if not isinstance(inner_optimizer, optimizer_v2.OptimizerV2):
msg = ('"inner_optimizer" must be an instance of '
'`tf.keras.optimizers.Optimizer`, but got: %s. ' % inner_optimizer)
if isinstance(inner_optimizer, legacy_optimizer.OptimizerV2):
Reported by Pylint.
Line: 522
Column: 3
'LossScaleOptimizer, but got: %s' % (inner_optimizer,))
self._raise_if_strategy_unsupported()
if getattr(inner_optimizer, '_is_wrapped_by_loss_scale_optimizer', False):
# TODO(reedwm): Maybe support this. The difficulty is that LSO has the
# same checkpoint format as the inner optimizer, so multiple LSOs wrapping
# the same optimizer causes the checkpointing logic to become confused.
raise ValueError('"inner_optimizer" is already wrapped by a '
'LossScaleOptimizer. An optimizer can only be wrapped '
'by a single LossScaleOptimizer')
Reported by Pylint.
keras/legacy_tf_layers/core_test.py
530 issues
Line: 21
Column: 1
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import collections
import platform
from absl.testing import parameterized
Reported by Pylint.
Line: 26
Column: 1
import collections
import platform
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from keras import combinations
from keras.legacy_tf_layers import core as core_layers
from tensorflow.python.ops import variable_scope
Reported by Pylint.
Line: 28
Column: 1
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from keras import combinations
from keras.legacy_tf_layers import core as core_layers
from tensorflow.python.ops import variable_scope
Reported by Pylint.
Line: 31
Column: 1
from tensorflow.python.framework import test_util
from keras import combinations
from keras.legacy_tf_layers import core as core_layers
from tensorflow.python.ops import variable_scope
class DenseTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
Reported by Pylint.
Line: 243
Column: 3
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.cached_session():
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = tf.compat.v1.trainable_variables()
Reported by Pylint.
Line: 253
Column: 3
vars2 = tf.compat.v1.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope('scope'):
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
Reported by Pylint.
Line: 23
Column: 1
import tensorflow.compat.v2 as tf
import collections
import platform
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
Reported by Pylint.
Line: 24
Column: 1
import tensorflow.compat.v2 as tf
import collections
import platform
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from keras import combinations
Reported by Pylint.
Line: 31
Column: 1
from tensorflow.python.framework import test_util
from keras import combinations
from keras.legacy_tf_layers import core as core_layers
from tensorflow.python.ops import variable_scope
class DenseTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
Reported by Pylint.
Line: 34
Column: 1
from tensorflow.python.ops import variable_scope
class DenseTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=tf.nn.relu, name='my_dense')
self.assertEqual(dense.units, 2)
Reported by Pylint.
keras/layers/tensorflow_op_layer_test.py
528 issues
Line: 17
Column: 1
# ==============================================================================
"""Test for allowing TF ops to work with Keras Functional API."""
import tensorflow.compat.v2 as tf
import time
from absl.testing import parameterized
import numpy as np
Reported by Pylint.
Line: 21
Column: 1
import time
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
Reported by Pylint.
Line: 150
Column: 22
# of the max tensor size Keras can try inferring values for.
inputs = keras.Input(batch_size=2, shape=(10,))
batch_size = tf.shape(inputs)[0]
num_features = int(keras_tensor._MAX_TENSOR_RANK / int(inputs.shape[0]))
x = tf.range(batch_size * num_features, dtype='int32')
assert x.shape.as_list() == [keras_tensor._MAX_TENSOR_RANK]
# Verify that a value was actually inferred for a tensor that *might*
# represent the shape, bying checking that a value in
Reported by Pylint.
Line: 152
Column: 32
batch_size = tf.shape(inputs)[0]
num_features = int(keras_tensor._MAX_TENSOR_RANK / int(inputs.shape[0]))
x = tf.range(batch_size * num_features, dtype='int32')
assert x.shape.as_list() == [keras_tensor._MAX_TENSOR_RANK]
# Verify that a value was actually inferred for a tensor that *might*
# represent the shape, bying checking that a value in
# the range appears in the printed inferred value
if tf.compat.v1.executing_eagerly_outside_functions():
Reported by Pylint.
Line: 158
Column: 16
# represent the shape, bying checking that a value in
# the range appears in the printed inferred value
if tf.compat.v1.executing_eagerly_outside_functions():
assert str(keras_tensor._MAX_TENSOR_RANK - 1) in str(x)
x = tf.reshape(x, (batch_size, num_features))
x = tf.cast(x, dtype='float32')
outputs = keras.layers.Dense(10)(x)
if tf.executing_eagerly():
Reported by Pylint.
Line: 230
Column: 3
class MyAdd(keras.layers.Layer):
def call(self, x, y):
return x + y
def _layer_with_tensor_arg():
inputs = keras.Input(shape=(10,))
Reported by Pylint.
Line: 244
Column: 5
class LayerWithLayer(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', dtype='float32')
self.layer = keras.layers.Dense(10)
def call(self, inputs):
inputs = inputs * self.bias
# Would throw an error if Keras History was created here.
Reported by Pylint.
Line: 245
Column: 5
def build(self, input_shape):
self.bias = self.add_weight(name='bias', dtype='float32')
self.layer = keras.layers.Dense(10)
def call(self, inputs):
inputs = inputs * self.bias
# Would throw an error if Keras History was created here.
return self.layer(inputs)
Reported by Pylint.
Line: 247
Column: 3
self.bias = self.add_weight(name='bias', dtype='float32')
self.layer = keras.layers.Dense(10)
def call(self, inputs):
inputs = inputs * self.bias
# Would throw an error if Keras History was created here.
return self.layer(inputs)
Reported by Pylint.
Line: 450
Column: 3
if tf.compat.v1.executing_eagerly_outside_functions():
self.assertIn('tf.__operators__.getitem', (
x.name for x in model.layers))
# TODO(b/161925288): Fix the dispatch triggering then uncomment:
# self.assertNotIn('tf.strided_slice', (
# x.name for x in model.layers))
self.assertAllEqual(model(args), expected)
self.assertAllEqual(model.predict(args, batch_size=batch_size), expected)
Reported by Pylint.
keras/optimizer_v2/gradient_descent_test.py
506 issues
Line: 17
Column: 1
# ==============================================================================
"""Functional test for GradientDescent."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import gradient_descent
Reported by Pylint.
Line: 19
Column: 1
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import learning_rate_schedule
Reported by Pylint.
Line: 21
Column: 1
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import learning_rate_schedule
class GradientDescentOptimizerTest(tf.test.TestCase, parameterized.TestCase):
Reported by Pylint.
Line: 22
Column: 1
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import learning_rate_schedule
class GradientDescentOptimizerTest(tf.test.TestCase, parameterized.TestCase):
Reported by Pylint.
Line: 23
Column: 1
import numpy as np
from keras import combinations
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import learning_rate_schedule
class GradientDescentOptimizerTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
Reported by Pylint.
Line: 139
Column: 3
self.assertAllCloseAccordingToType([3.0 - 1.0], self.evaluate(var1))
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in [tf.half, tf.float32, tf.float64]:
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
var1 = tf.Variable([3.0], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
Reported by Pylint.
Line: 181
Column: 3
self.evaluate(var1))
def testGradWrtRef(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in [tf.half, tf.float32, tf.float64]:
opt = gradient_descent.SGD(3.0)
values = [1.0, 3.0]
vars_ = [tf.Variable([v], dtype=dtype) for v in values]
Reported by Pylint.
Line: 188
Column: 26
values = [1.0, 3.0]
vars_ = [tf.Variable([v], dtype=dtype) for v in values]
loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop
grads_and_vars = opt._compute_gradients(loss, vars_)
self.evaluate(tf.compat.v1.global_variables_initializer())
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], self.evaluate(grad))
def testSparseBasic(self):
Reported by Pylint.
Line: 194
Column: 3
self.assertAllCloseAccordingToType([1.0], self.evaluate(grad))
def testSparseBasic(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in [tf.half, tf.float32, tf.float64]:
var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = tf.IndexedSlices(
Reported by Pylint.
Line: 217
Column: 3
self.evaluate(var1))
def testSparseBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in [tf.half, tf.float32, tf.float64]:
var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = tf.IndexedSlices(
Reported by Pylint.
keras/tests/tracking_util_with_v1_optimizers_test.py
496 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for object-based saving which use tf.train.* optimizers."""
import tensorflow.compat.v2 as tf
import functools
import os
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
Reported by Pylint.
Line: 21
Column: 1
import functools
import os
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import training
Reported by Pylint.
Line: 22
Column: 1
import functools
import os
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import training
from keras.layers import core
Reported by Pylint.
Line: 23
Column: 1
import os
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import training
from keras.layers import core
from tensorflow.python.training.tracking import util as trackable_utils
Reported by Pylint.
Line: 24
Column: 1
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import training
from keras.layers import core
from tensorflow.python.training.tracking import util as trackable_utils
Reported by Pylint.
Line: 25
Column: 1
from tensorflow.python.framework import test_util
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import training
from keras.layers import core
from tensorflow.python.training.tracking import util as trackable_utils
Reported by Pylint.
Line: 26
Column: 1
from keras import combinations
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import training
from keras.layers import core
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tf.Module):
Reported by Pylint.
Line: 27
Column: 1
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import training
from keras.layers import core
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tf.Module):
Reported by Pylint.
Line: 28
Column: 1
from keras import testing_utils
from keras.engine import training
from keras.layers import core
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tf.Module):
def __init__(self):
Reported by Pylint.
Line: 151
Column: 17
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=model._named_dense.kernel,
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
Reported by Pylint.
keras/distribute/distributed_training_utils_v1.py
496 issues
Line: 17
Column: 1
# ==============================================================================
"""Utilities related to distributed training."""
import tensorflow.compat.v2 as tf
# pylint:disable=protected-access
import functools
import numpy as np
Reported by Pylint.
Line: 33
Column: 1
from keras.optimizer_v2 import optimizer_v2
from keras.utils import tf_contextlib
from keras.utils.mode_keys import ModeKeys
from tensorflow.python.platform import tf_logging as logging
def set_weights(distribution_strategy, dist_model, weights):
"""Sets the weights of the replicated models.
Reported by Pylint.
Line: 214
Column: 1
List of values of all the PerReplica objects.
"""
# pylint: disable=g-complex-comprehension
# This function takes a PerReplica object or a list of PerReplica objects and
# returns all the values associated with it.
return [e for flattened in tf.nest.flatten(per_replica_values)
for e in distribution_strategy.unwrap(flattened)]
Reported by Pylint.
Line: 695
Column: 1
A new model with shared layers with the old model.
"""
# Need to do imports here since we run into a circular dependency error.
from keras import models # pylint: disable=g-import-not-at-top
from keras.engine import sequential # pylint: disable=g-import-not-at-top
# We rely on the internal methods to avoid having share_weights weights in the
# public API.
if isinstance(model, sequential.Sequential):
Reported by Pylint.
Line: 696
Column: 1
"""
# Need to do imports here since we run into a circular dependency error.
from keras import models # pylint: disable=g-import-not-at-top
from keras.engine import sequential # pylint: disable=g-import-not-at-top
# We rely on the internal methods to avoid having share_weights weights in the
# public API.
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(
Reported by Pylint.
Line: 753
Column: 1
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
Reported by Pylint.
Line: 969
Column: 5
# First we gather the relevant portions of the model across all replicas.
# `backend._scratch_graph(global_graph)` signals to Keras that it should not
# lift to a separate graph when creating the per-replica functions.
with backend._scratch_graph(global_graph):
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
grouped = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
grouped_inputs, grouped_outputs = grouped
Reported by Pylint.
Line: 1064
Column: 3
@tf_contextlib.contextmanager
def distributed_scope(strategy, learning_phase):
with strategy.scope(), backend.learning_phase_scope(learning_phase):
yield
def is_current_worker_chief():
return dc.get_current_worker_context().is_chief
Reported by Pylint.
Line: 117
Column: 3
all_session_args['fetches'] = flatten_per_replica_values(
distribution_strategy, grouped_fetches)
# TODO(priyag): Return only non empty/None values
return all_inputs, all_outputs, all_updates, all_session_args
def unwrap_output_dict(strategy, grouped_outputs, mode):
"""Unwrap the list of outputs contained in the PerReplica parameters."""
Reported by Pylint.
Line: 461
Column: 3
ValueError: If the number of batches or steps evaluates to 0.
"""
# TODO(b/118776054): Use global batch size for Keras/DS support.
# Currently this is only supported in TPUStrategy and CoreMirroredStrategy.
use_per_replica_batch = not dist_utils.global_batch_size_supported(
distribution_strategy)
# TODO(b/128995245): In eager mode, uneven batch sizes are allowed except for
Reported by Pylint.
keras/saving/save_weights_test.py
465 issues
Line: 17
Column: 1
#,============================================================================
"""Tests for model saving in the HDF5 format."""
import tensorflow.compat.v2 as tf
import os
import shutil
import uuid
Reported by Pylint.
Line: 23
Column: 1
import shutil
import uuid
from absl.testing import parameterized
import numpy as np
import keras
from keras import combinations
from keras import keras_parameterized
Reported by Pylint.
Line: 35
Column: 1
from keras.saving import hdf5_format
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
Reported by Pylint.
Line: 358
Column: 3
save_format = testing_utils.get_save_format()
if save_format == 'h5' and testing_utils.get_model_type() == 'subclass':
# TODO(b/173646281): HDF5 format currently does not allow saving
# subclassed models.
return
with self.cached_session():
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
Reported by Pylint.
Line: 377
Column: 1
self.assertAllClose(model.weights, new_model.weights)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
Reported by Pylint.
Line: 384
Column: 3
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(tf.test.TestCase, parameterized.TestCase):
Reported by Pylint.
Line: 580
Column: 5
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
Reported by Pylint.
Line: 588
Column: 7
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore)
Reported by Pylint.
Line: 601
Column: 5
m = DummySubclassModel()
with self.assertRaisesRegex(AssertionError, 'Nothing to load'):
m.load_weights(save_path)
m.dense = keras.layers.Dense(2)
m.dense(tf.constant([[1.]]))
with self.assertRaisesRegex(AssertionError,
'Nothing except the root object matched'):
m.load_weights(save_path)
Reported by Pylint.
Line: 663
Column: 1
self.assertEqual(42., self.evaluate(v))
class DummySubclassModel(training.Model):
pass
if __name__ == '__main__':
tf.test.main()
Reported by Pylint.
keras/layers/dense_attention_test.py
458 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests dense attention layers."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
Reported by Pylint.
Line: 19
Column: 1
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import combinations
from keras import testing_utils
Reported by Pylint.
Line: 40
Column: 29
v = np.array([[[1.6]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 1]
scores_mask = np.array([[[True]]], dtype=np.bool_)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected softmax_scores = [[[1]]]
expected_scores = np.array([[[1.]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
Reported by Pylint.
Line: 56
Column: 29
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected softmax_scores = [[[1]]]
expected_scores = np.array([[[1.]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
Reported by Pylint.
Line: 74
Column: 29
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 3]
scores_mask = np.array([[[True, True, False]]], dtype=np.bool_)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected softmax scores = softmax(scores) with zeros in positions where
# v_mask == False.
# => softmax_scores000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863
Reported by Pylint.
Line: 96
Column: 29
scores = np.array([[[1., 0., 1.]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected softmax_scores = softmax(scores).
# => softmax_scores000 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
Reported by Pylint.
Line: 123
Column: 29
v = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
# Scpres mask tensor of shape [2, 1, 1]
scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected softmax_scores = [[[1]], [[1]]]
expected_scores = np.array([[[1.]], [[1.]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
Reported by Pylint.
Line: 144
Column: 29
dim = 7
scores = np.ones((batch_size, tq, tv))
value = np.ones((batch_size, tv, dim))
actual, actual_scores = dense_attention.BaseDenseAttention(
dropout=0.1)._apply_scores(
scores=scores, value=value, training=False)
# Expected Tensor of shape `[batch_size, tq, tv]`.
expected_scores_shape = [batch_size, tq, tv]
Reported by Pylint.
Line: 178
Column: 14
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 1.1*1.6 = 1.76
expected = np.array([[[1.76]]], dtype=np.float32)
self.assertAllClose(expected, actual)
Reported by Pylint.
Line: 194
Column: 14
dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 2, 3].
# expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64
# expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24
# expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84
Reported by Pylint.
keras/layers/wrappers.py
454 issues
Line: 16
Column: 1
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-classes-have-attributes
"""Wrapper layers: layers that augment the functionality of another layer."""
import tensorflow.compat.v2 as tf
import copy
Reported by Pylint.
Line: 19
Column: 1
# pylint: disable=g-classes-have-attributes
"""Wrapper layers: layers that augment the functionality of another layer."""
import tensorflow.compat.v2 as tf
import copy
from keras import backend
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
Reported by Pylint.
Line: 30
Column: 1
from keras.utils import layer_utils
from keras.utils import tf_inspect
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Wrapper')
class Wrapper(Layer):
"""Abstract wrapper base class.
Reported by Pylint.
Line: 70
Column: 1
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
# Avoid mutating the input dict
config = copy.deepcopy(config)
layer = deserialize_layer(
config.pop('layer'), custom_objects=custom_objects)
return cls(layer, **config)
Reported by Pylint.
Line: 228
Column: 1
input_length=row_lengths[0] if is_ragged_input else input_length,
mask=mask,
unroll=False)
# pylint: disable=g-long-lambda
y = tf.nest.map_structure(
lambda output: backend.maybe_convert_to_ragged(
is_ragged_input, output, row_lengths), outputs)
else:
# No batch size specified, therefore the layer will be able
Reported by Pylint.
Line: 271
Column: 1
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape)
# pylint: disable=g-long-lambda
output_shape = tf.nest.map_structure(
lambda tensor, int_shape: self._get_shape_tuple(
(-1, input_length), tensor, 1, int_shape[2:]), y, output_shape)
y = tf.__internal__.nest.map_structure_up_to(y, tf.reshape, y, output_shape)
if not tf.executing_eagerly():
Reported by Pylint.
Line: 367
Column: 35
output_mask_int_shape = backend.int_shape(mask)
else:
input_shape = generic_utils.to_list(tf.nest.flatten(input_shape))[0]
output_mask_int_shape = backend.compute_output_shape(input_shape)[:-1]
output_mask_shape = self._get_shape_tuple(
(-1, input_length), output_mask, 1, output_mask_int_shape[1:])
output_mask = backend.reshape(output_mask, output_mask_shape)
return output_mask
Reported by Pylint.
Line: 792
Column: 1
config = copy.deepcopy(config)
num_constants = config.pop('num_constants', 0)
# Handle forward layer instantiation (as would parent class).
from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
config['layer'] = deserialize_layer(
config['layer'], custom_objects=custom_objects)
# Handle (optional) backward layer instantiation.
backward_layer_config = config.pop('backward_layer', None)
if backward_layer_config is not None:
Reported by Pylint.
Line: 69
Column: 3
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
# Avoid mutating the input dict
config = copy.deepcopy(config)
layer = deserialize_layer(
config.pop('layer'), custom_objects=custom_objects)
Reported by Pylint.
Line: 169
Column: 3
dims = dims.as_list()
return tf.TensorShape([dims[0]] + dims[2:])
def build(self, input_shape):
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
input_dims = tf.nest.flatten(
tf.nest.map_structure(lambda x: x.ndims, input_shape))
if any(dim < 3 for dim in input_dims):
raise ValueError(
Reported by Pylint.