The following issues were found
keras/engine/training_eager_v1.py
124 issues
Line: 17
Column: 1
# ==============================================================================
"""Keras training and evaluation routines for eager execution."""
import tensorflow.compat.v2 as tf
# pylint: disable=protected-access
import numpy as np
from tensorflow.python.eager.backprop import GradientTape
Reported by Pylint.
Line: 22
Column: 1
import numpy as np
from tensorflow.python.eager.backprop import GradientTape
from keras import backend
from keras.engine import training_utils
from keras.engine import training_utils_v1
from keras.mixed_precision import loss_scale_optimizer
from keras.utils import losses_utils
Reported by Pylint.
Line: 28
Column: 1
from keras.engine import training_utils_v1
from keras.mixed_precision import loss_scale_optimizer
from keras.utils import losses_utils
from tensorflow.python.platform import tf_logging as logging
def _eager_loss_fn(outputs, targets, loss_fn, output_name):
with backend.name_scope(output_name + '_loss'):
loss = loss_fn(targets, outputs)
Reported by Pylint.
Line: 247
Column: 3
Raises:
ValueError: If the model has no loss to optimize.
"""
with backend.eager_learning_phase_scope(1 if training else 0), \
training_utils.RespectCompiledTrainableState(model):
with GradientTape() as tape:
outs, total_loss, output_losses, masks = (
_model_loss(
model,
Reported by Pylint.
Line: 347
Column: 3
"""
inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)
with backend.eager_learning_phase_scope(0):
outs, total_loss, output_losses, masks = (
_model_loss(
model,
inputs,
targets,
Reported by Pylint.
Line: 104
Column: 3
regularization losses and applies masking and sample weighting
to the loss value.
"""
# TODO(psv): Dedup code here with graph mode prepare_total_loss() fn.
# Used to keep track of the total loss value (stateless).
# eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
# loss_weight_2 * output_2_loss_fn(...) +
# layer losses.
total_loss = 0
Reported by Pylint.
Line: 128
Column: 3
if targets:
targets = training_utils_v1.cast_if_floating_dtype_and_mismatch(
targets, outs)
# TODO(sallymatson/psv): check if we should do same mismatch fix for weights
if sample_weights:
sample_weights = [
training_utils_v1.cast_if_floating_dtype(
tf.convert_to_tensor(val))
if val is not None else None for val in sample_weights
Reported by Pylint.
Line: 168
Column: 13
else:
# Update dimensions of weights to match with mask if possible.
weights = tf.cast(weights, outs[i].dtype)
mask, _, weights = (
losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=weights))
weights *= mask
if hasattr(loss_fn, 'reduction'):
Reported by Pylint.
Line: 265
Column: 3
if training:
trainable_weights = model.trainable_weights
if trainable_weights:
# TODO(tanzheny) b/132690565: Provide mechanism for user to override
# model.train_on_batch.
if hasattr(model, '_backwards'):
model._backwards(tape, scaled_total_loss)
else:
grads = tape.gradient(scaled_total_loss, trainable_weights)
Reported by Pylint.
Line: 28
Column: 1
from keras.engine import training_utils_v1
from keras.mixed_precision import loss_scale_optimizer
from keras.utils import losses_utils
from tensorflow.python.platform import tf_logging as logging
def _eager_loss_fn(outputs, targets, loss_fn, output_name):
with backend.name_scope(output_name + '_loss'):
loss = loss_fn(targets, outputs)
Reported by Pylint.
keras/combinations_test.py
123 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for Keras combinations."""
import tensorflow.compat.v2 as tf
import unittest
from absl.testing import parameterized
from keras import combinations
from keras import models as keras_models
Reported by Pylint.
Line: 20
Column: 1
import tensorflow.compat.v2 as tf
import unittest
from absl.testing import parameterized
from keras import combinations
from keras import models as keras_models
from keras import testing_utils
Reported by Pylint.
Line: 115
Column: 21
])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras_models.Sequential)
self.assertNotIsInstance(models[1], keras_models.Sequential)
self.assertIsInstance(models[2], keras_models.Sequential)
Reported by Pylint.
Line: 116
Column: 22
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras_models.Sequential)
self.assertNotIsInstance(models[1], keras_models.Sequential)
self.assertIsInstance(models[2], keras_models.Sequential)
ts = unittest.makeSuite(ExampleTest)
Reported by Pylint.
Line: 19
Column: 1
import tensorflow.compat.v2 as tf
import unittest
from absl.testing import parameterized
from keras import combinations
from keras import models as keras_models
from keras import testing_utils
Reported by Pylint.
Line: 26
Column: 1
from keras import testing_utils
class CombinationsTest(tf.test.TestCase):
def test_run_all_keras_modes(self):
test_params = []
class ExampleTest(parameterized.TestCase):
Reported by Pylint.
Line: 28
Column: 1
class CombinationsTest(tf.test.TestCase):
def test_run_all_keras_modes(self):
test_params = []
class ExampleTest(parameterized.TestCase):
def runTest(self):
Reported by Pylint.
Line: 28
Column: 3
class CombinationsTest(tf.test.TestCase):
def test_run_all_keras_modes(self):
test_params = []
class ExampleTest(parameterized.TestCase):
def runTest(self):
Reported by Pylint.
Line: 29
Column: 1
class CombinationsTest(tf.test.TestCase):
def test_run_all_keras_modes(self):
test_params = []
class ExampleTest(parameterized.TestCase):
def runTest(self):
pass
Reported by Pylint.
Line: 31
Column: 5
def test_run_all_keras_modes(self):
test_params = []
class ExampleTest(parameterized.TestCase):
def runTest(self):
pass
@combinations.generate(combinations.keras_mode_combinations())
Reported by Pylint.
keras/engine/functional_utils_test.py
123 issues
Line: 27
Column: 1
from keras.engine import input_layer as input_layer_lib
import numpy as np
import tensorflow.compat.v2 as tf
class FunctionalModelSlideTest(keras_parameterized.TestCase):
def test_find_nodes_by_inputs_and_outputs(self):
Reported by Pylint.
Line: 30
Column: 1
import tensorflow.compat.v2 as tf
class FunctionalModelSlideTest(keras_parameterized.TestCase):
def test_find_nodes_by_inputs_and_outputs(self):
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
Reported by Pylint.
Line: 32
Column: 3
class FunctionalModelSlideTest(keras_parameterized.TestCase):
def test_find_nodes_by_inputs_and_outputs(self):
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
Reported by Pylint.
Line: 32
Column: 1
class FunctionalModelSlideTest(keras_parameterized.TestCase):
def test_find_nodes_by_inputs_and_outputs(self):
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
Reported by Pylint.
Line: 33
Column: 1
class FunctionalModelSlideTest(keras_parameterized.TestCase):
def test_find_nodes_by_inputs_and_outputs(self):
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
Reported by Pylint.
Line: 34
Column: 1
def test_find_nodes_by_inputs_and_outputs(self):
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
nodes_in_graph = functional_utils.find_nodes_by_inputs_and_outputs(
Reported by Pylint.
Line: 35
Column: 1
def test_find_nodes_by_inputs_and_outputs(self):
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
nodes_in_graph = functional_utils.find_nodes_by_inputs_and_outputs(
x, output)
Reported by Pylint.
Line: 35
Column: 5
def test_find_nodes_by_inputs_and_outputs(self):
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
nodes_in_graph = functional_utils.find_nodes_by_inputs_and_outputs(
x, output)
Reported by Pylint.
Line: 36
Column: 1
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
nodes_in_graph = functional_utils.find_nodes_by_inputs_and_outputs(
x, output)
self.assertLen(nodes_in_graph, 2)
Reported by Pylint.
Line: 36
Column: 5
inputs = input_layer_lib.Input((10,))
unconnected_inputs = input_layer_lib.Input((10,))
x = layers.Dense(8)(inputs)
y = layers.Dense(6)(x)
output = layers.Dense(4)(y)
nodes_in_graph = functional_utils.find_nodes_by_inputs_and_outputs(
x, output)
self.assertLen(nodes_in_graph, 2)
Reported by Pylint.
keras/layers/einsum_dense.py
122 issues
Line: 17
Column: 1
# ==============================================================================
"""Keras-based einsum dense layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import re
from keras import activations
from keras import constraints
Reported by Pylint.
Line: 18
Column: 1
"""Keras-based einsum dense layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import re
from keras import activations
from keras import constraints
from keras import initializers
Reported by Pylint.
Line: 26
Column: 1
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.experimental.EinsumDense")
class EinsumDense(Layer):
"""A layer that uses tf.einsum as the backing computation.
Reported by Pylint.
Line: 117
Column: 16
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(EinsumDense, self).__init__(**kwargs)
self.equation = equation
Reported by Pylint.
Line: 142
Column: 31
self.bias_axes,
input_shape,
self.partial_output_shape)
kernel_shape, bias_shape, self.full_output_shape = shape_data
self.kernel = self.add_weight(
"kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
Reported by Pylint.
Line: 143
Column: 5
input_shape,
self.partial_output_shape)
kernel_shape, bias_shape, self.full_output_shape = shape_data
self.kernel = self.add_weight(
"kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
Reported by Pylint.
Line: 153
Column: 7
trainable=True)
if bias_shape is not None:
self.bias = self.add_weight(
"bias",
shape=bias_shape,
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
Reported by Pylint.
Line: 162
Column: 7
dtype=self.dtype,
trainable=True)
else:
self.bias = None
super(EinsumDense, self).build(input_shape)
def compute_output_shape(self, _):
return tf.TensorShape(self.full_output_shape)
Reported by Pylint.
Line: 186
Column: 3
base_config = super(EinsumDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
ret = tf.einsum(self.equation, inputs, self.kernel)
if self.bias is not None:
ret += self.bias
if self.activation is not None:
ret = self.activation(ret)
Reported by Pylint.
Line: 20
Column: 1
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import re
from keras import activations
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
Reported by Pylint.
keras/integration_test/tpu_strategy_test.py
122 issues
Line: 20
Column: 1
import random
import tempfile
from absl import flags
import tensorflow as tf
from tensorflow.python.framework import test_util
FLAGS = flags.FLAGS
Reported by Pylint.
Line: 22
Column: 1
from absl import flags
import tensorflow as tf
from tensorflow.python.framework import test_util
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
Reported by Pylint.
Line: 23
Column: 1
from absl import flags
import tensorflow as tf
from tensorflow.python.framework import test_util
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
Reported by Pylint.
Line: 139
Column: 1
"label": tf.TensorSpec([1], tf.dtypes.string)
}).shuffle(100).batch(32)
train_dataset = raw_dataset.map(lambda x: ( # pylint: disable=g-long-lambda
{
"features": feature_mapper(x["features"])
}, label_mapper(x["label"])))
return train_dataset
Reported by Pylint.
Line: 38
Column: 1
LABEL_VOCAB = ["yes", "no"]
def get_tpu_cluster_resolver():
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
Reported by Pylint.
Line: 39
Column: 1
def get_tpu_cluster_resolver():
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
Reported by Pylint.
Line: 44
Column: 1
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
tf.config.experimental_connect_to_cluster(resolver)
Reported by Pylint.
Line: 47
Column: 1
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
return tf.distribute.experimental.TPUStrategy(resolver)
Reported by Pylint.
Line: 48
Column: 1
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
return tf.distribute.experimental.TPUStrategy(resolver)
Reported by Pylint.
Line: 49
Column: 1
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
return tf.distribute.experimental.TPUStrategy(resolver)
class TpuStrategyTest(tf.test.TestCase):
Reported by Pylint.
keras/layers/normalization/layer_normalization.py
122 issues
Line: 17
Column: 1
# ==============================================================================
"""Layer Normalization layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
from keras import constraints
from keras import initializers
from keras import regularizers
Reported by Pylint.
Line: 18
Column: 1
"""Layer Normalization layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
Reported by Pylint.
Line: 25
Column: 1
from keras import regularizers
from keras.engine.base_layer import Layer
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LayerNormalization')
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Reported by Pylint.
Line: 234
Column: 7
param_shape = [input_shape[dim] for dim in self.axis]
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
Reported by Pylint.
Line: 243
Column: 7
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
Reported by Pylint.
Line: 246
Column: 7
self.gamma = None
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
Reported by Pylint.
Line: 255
Column: 7
trainable=True,
experimental_autocast=False)
else:
self.beta = None
self._fused = self._fused_can_be_used(ndims)
self.built = True
Reported by Pylint.
Line: 261
Column: 3
self.built = True
def call(self, inputs):
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
# Broadcasting only necessary for norm when the axis is not just
Reported by Pylint.
Line: 29
Column: 1
@keras_export('keras.layers.LayerNormalization')
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
Reported by Pylint.
Line: 30
Column: 1
@keras_export('keras.layers.LayerNormalization')
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
Reported by Pylint.
keras/preprocessing/timeseries_test.py
121 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for timeseries."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras.preprocessing import timeseries
Reported by Pylint.
Line: 23
Column: 1
from keras.preprocessing import timeseries
class TimeseriesDatasetTest(tf.test.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
Reported by Pylint.
Line: 25
Column: 1
class TimeseriesDatasetTest(tf.test.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5)
Reported by Pylint.
Line: 25
Column: 3
class TimeseriesDatasetTest(tf.test.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5)
Reported by Pylint.
Line: 27
Column: 1
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5)
# Expect 19 batches
for i, batch in enumerate(dataset):
Reported by Pylint.
Line: 28
Column: 1
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
Reported by Pylint.
Line: 29
Column: 1
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
Reported by Pylint.
Line: 32
Column: 1
dataset = timeseries.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
Reported by Pylint.
Line: 33
Column: 1
data, targets, sequence_length=9, batch_size=5)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
# Last batch: size 2
Reported by Pylint.
Line: 34
Column: 1
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
# Last batch: size 2
self.assertEqual(inputs.shape, (2, 9))
Reported by Pylint.
keras/layers/preprocessing/normalization.py
120 issues
Line: 17
Column: 1
# ==============================================================================
"""Normalization preprocessing layer."""
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-direct-tensorflow-import
from keras import backend
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import preprocessing_utils as utils
Reported by Pylint.
Line: 18
Column: 1
"""Normalization preprocessing layer."""
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-direct-tensorflow-import
from keras import backend
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import preprocessing_utils as utils
import numpy as np
Reported by Pylint.
Line: 24
Column: 1
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import preprocessing_utils as utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Normalization',
'keras.layers.experimental.preprocessing.Normalization')
Reported by Pylint.
Line: 25
Column: 1
from keras.layers.preprocessing import preprocessing_utils as utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Normalization',
'keras.layers.experimental.preprocessing.Normalization')
class Normalization(base_preprocessing_layer.PreprocessingLayer):
Reported by Pylint.
Line: 142
Column: 5
# Axes to be kept, replacing negative values with positive equivalents.
# Sorted to avoid transposing axes.
self._keep_axis = sorted([d if d >= 0 else d + ndim for d in self.axis])
# All axes to be kept should have known shape.
for d in self._keep_axis:
if input_shape[d] is None:
raise ValueError(
'All `axis` values to be kept must have known shape. Got axis: {}, '
Reported by Pylint.
Line: 151
Column: 5
'input shape: {}, with unknown axis at index: {}'.format(
self.axis, input_shape, d))
# Axes to be reduced.
self._reduce_axis = [d for d in range(ndim) if d not in self._keep_axis]
# 1 if an axis should be reduced, 0 otherwise.
self._reduce_axis_mask = [
0 if d in self._keep_axis else 1 for d in range(ndim)
]
# Broadcast any reduced axes.
Reported by Pylint.
Line: 153
Column: 5
# Axes to be reduced.
self._reduce_axis = [d for d in range(ndim) if d not in self._keep_axis]
# 1 if an axis should be reduced, 0 otherwise.
self._reduce_axis_mask = [
0 if d in self._keep_axis else 1 for d in range(ndim)
]
# Broadcast any reduced axes.
self._broadcast_shape = [
input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)
Reported by Pylint.
Line: 157
Column: 5
0 if d in self._keep_axis else 1 for d in range(ndim)
]
# Broadcast any reduced axes.
self._broadcast_shape = [
input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)
]
mean_and_var_shape = tuple(input_shape[d] for d in self._keep_axis)
if self.input_mean is None:
Reported by Pylint.
Line: 163
Column: 7
mean_and_var_shape = tuple(input_shape[d] for d in self._keep_axis)
if self.input_mean is None:
self.adapt_mean = self.add_weight(
name='mean',
shape=mean_and_var_shape,
dtype=self.dtype,
initializer='zeros',
trainable=False)
Reported by Pylint.
Line: 169
Column: 7
dtype=self.dtype,
initializer='zeros',
trainable=False)
self.adapt_variance = self.add_weight(
name='variance',
shape=mean_and_var_shape,
dtype=self.dtype,
initializer='ones',
trainable=False)
Reported by Pylint.
keras/integration_test/gradient_checkpoint_test.py
115 issues
Line: 18
Column: 1
import gc
import tensorflow as tf
from tensorflow.python.platform import test as test_lib
layers = tf.keras.layers
optimizers = tf.keras.optimizers
Reported by Pylint.
Line: 20
Column: 1
import tensorflow as tf
from tensorflow.python.platform import test as test_lib
layers = tf.keras.layers
optimizers = tf.keras.optimizers
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 28
Column: 1
def _get_big_cnn_model(img_dim, n_channels, num_partitions,
blocks_per_partition):
"""Creates a test model whose activations are significantly larger than model size."""
model = tf.keras.Sequential()
model.add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
Reported by Pylint.
Line: 29
Column: 1
def _get_big_cnn_model(img_dim, n_channels, num_partitions,
blocks_per_partition):
"""Creates a test model whose activations are significantly larger than model size."""
model = tf.keras.Sequential()
model.add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
Reported by Pylint.
Line: 30
Column: 1
blocks_per_partition):
"""Creates a test model whose activations are significantly larger than model size."""
model = tf.keras.Sequential()
model.add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu))
Reported by Pylint.
Line: 31
Column: 1
"""Creates a test model whose activations are significantly larger than model size."""
model = tf.keras.Sequential()
model.add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
Reported by Pylint.
Line: 32
Column: 1
model = tf.keras.Sequential()
model.add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(20, 5, padding='same', activation=tf.nn.relu))
Reported by Pylint.
Line: 33
Column: 1
model.add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(20, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
Reported by Pylint.
Line: 34
Column: 1
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(20, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Flatten())
Reported by Pylint.
keras/optimizer_v2/adadelta_test.py
114 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for Adadelta Optimizer."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import adadelta
Reported by Pylint.
Line: 19
Column: 1
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import adadelta
_DATA_TYPES = [
Reported by Pylint.
Line: 21
Column: 1
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import adadelta
_DATA_TYPES = [
tf.half, tf.float32, tf.float64, tf.complex64,
tf.complex128
Reported by Pylint.
Line: 22
Column: 1
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras.optimizer_v2 import adadelta
_DATA_TYPES = [
tf.half, tf.float32, tf.float64, tf.complex64,
tf.complex128
]
Reported by Pylint.
Line: 106
Column: 3
if not tf.executing_eagerly():
# Check that the accumulators have been updated
# TODO(lxuechen): This is hard to test in eager mode
for slot_idx in range(2):
self.assertAllCloseAccordingToType(
np.array([accum, accum], dtype=dtype.as_numpy_dtype(0)),
self.evaluate(slot[slot_idx]),
rtol=1e-5)
Reported by Pylint.
Line: 144
Column: 3
self.doTestBasic(use_resource=True, use_callable_params=True)
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
Reported by Pylint.
Line: 30
Column: 1
]
class AdadeltaOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def doTestBasic(self, use_resource=False, use_callable_params=False):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in _DATA_TYPES:
for grad in [0.2, 0.1, 0.01]:
Reported by Pylint.
Line: 32
Column: 3
class AdadeltaOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def doTestBasic(self, use_resource=False, use_callable_params=False):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in _DATA_TYPES:
for grad in [0.2, 0.1, 0.01]:
for lr in [1.0, 0.5, 0.1]:
var0_init = [1.0, 2.0]
Reported by Pylint.
Line: 32
Column: 3
class AdadeltaOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def doTestBasic(self, use_resource=False, use_callable_params=False):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in _DATA_TYPES:
for grad in [0.2, 0.1, 0.01]:
for lr in [1.0, 0.5, 0.1]:
var0_init = [1.0, 2.0]
Reported by Pylint.
Line: 32
Column: 3
class AdadeltaOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def doTestBasic(self, use_resource=False, use_callable_params=False):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in _DATA_TYPES:
for grad in [0.2, 0.1, 0.01]:
for lr in [1.0, 0.5, 0.1]:
var0_init = [1.0, 2.0]
Reported by Pylint.