The following issues were found
keras/premade/wide_deep_test.py
181 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for Keras Premade WideNDeep models."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import input_layer
Reported by Pylint.
Line: 33
Column: 1
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class WideDeepModelTest(keras_parameterized.TestCase):
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
Reported by Pylint.
Line: 35
Column: 1
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class WideDeepModelTest(keras_parameterized.TestCase):
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
Reported by Pylint.
Line: 35
Column: 3
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class WideDeepModelTest(keras_parameterized.TestCase):
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
Reported by Pylint.
Line: 36
Column: 1
class WideDeepModelTest(keras_parameterized.TestCase):
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
inputs = [linear_inp, dnn_inp]
Reported by Pylint.
Line: 37
Column: 1
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
Reported by Pylint.
Line: 38
Column: 1
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
wide_deep_model.compile(
Reported by Pylint.
Line: 39
Column: 1
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
wide_deep_model.compile(
optimizer=['sgd', 'adam'],
Reported by Pylint.
Line: 40
Column: 1
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
wide_deep_model.compile(
optimizer=['sgd', 'adam'],
loss='mse',
Reported by Pylint.
Line: 41
Column: 1
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5., high=5., size=(64, 2))
dnn_inp = np.random.uniform(low=-5., high=5., size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
wide_deep_model.compile(
optimizer=['sgd', 'adam'],
loss='mse',
metrics=[],
Reported by Pylint.
keras/applications/imagenet_utils.py
181 issues
Line: 25
Column: 1
from keras import activations
from keras import backend
from keras.utils import data_utils
from tensorflow.python.util.tf_export import keras_export
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
Reported by Pylint.
Line: 144
Column: 3
ValueError: In case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
Reported by Pylint.
Line: 101
Column: 1
@keras_export('keras.applications.imagenet_utils.preprocess_input')
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {'caffe', 'tf', 'torch'}:
raise ValueError('Expected mode to be one of `caffe`, `tf` or `torch`. '
f'Received: mode={mode}')
Reported by Pylint.
Line: 102
Column: 1
@keras_export('keras.applications.imagenet_utils.preprocess_input')
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {'caffe', 'tf', 'torch'}:
raise ValueError('Expected mode to be one of `caffe`, `tf` or `torch`. '
f'Received: mode={mode}')
if data_format is None:
Reported by Pylint.
Line: 103
Column: 1
@keras_export('keras.applications.imagenet_utils.preprocess_input')
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {'caffe', 'tf', 'torch'}:
raise ValueError('Expected mode to be one of `caffe`, `tf` or `torch`. '
f'Received: mode={mode}')
if data_format is None:
data_format = backend.image_data_format()
Reported by Pylint.
Line: 104
Column: 1
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {'caffe', 'tf', 'torch'}:
raise ValueError('Expected mode to be one of `caffe`, `tf` or `torch`. '
f'Received: mode={mode}')
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {'channels_first', 'channels_last'}:
Reported by Pylint.
Line: 107
Column: 1
raise ValueError('Expected mode to be one of `caffe`, `tf` or `torch`. '
f'Received: mode={mode}')
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Expected data_format to be one of `channels_first` or '
f'`channels_last`. Received: data_format={data_format}')
Reported by Pylint.
Line: 108
Column: 1
f'Received: mode={mode}')
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Expected data_format to be one of `channels_first` or '
f'`channels_last`. Received: data_format={data_format}')
if isinstance(x, np.ndarray):
Reported by Pylint.
Line: 109
Column: 1
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Expected data_format to be one of `channels_first` or '
f'`channels_last`. Received: data_format={data_format}')
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(
Reported by Pylint.
Line: 110
Column: 1
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Expected data_format to be one of `channels_first` or '
f'`channels_last`. Received: data_format={data_format}')
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(
x, data_format=data_format, mode=mode)
Reported by Pylint.
keras/api/tests/api_compatibility_test.py
181 issues
Line: 31
Column: 1
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import os
import re
import sys
Reported by Pylint.
Line: 40
Column: 1
import six
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
Reported by Pylint.
Line: 41
Column: 1
import six
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
Reported by Pylint.
Line: 43
Column: 1
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
Reported by Pylint.
Line: 44
Column: 1
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
Reported by Pylint.
Line: 45
Column: 1
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
Reported by Pylint.
Line: 46
Column: 1
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
Reported by Pylint.
Line: 47
Column: 1
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
Reported by Pylint.
Line: 48
Column: 1
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
Reported by Pylint.
Line: 71
Column: 3
def _InitPathConstants():
global _API_GOLDEN_FOLDER_V1
global _API_GOLDEN_FOLDER_V2
root_golden_path_v2 = os.path.join(
tf.compat.v1.resource_loader.get_data_files_path(),
'..', 'golden', 'v2', 'tensorflow.keras.pbtxt')
Reported by Pylint.
keras/applications/mobilenet_v2.py
180 issues
Line: 83
Column: 1
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v2/')
Reported by Pylint.
Line: 84
Column: 1
from keras.utils import data_utils
from keras.utils import layer_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v2/')
layers = None
Reported by Pylint.
Line: 85
Column: 1
from keras.utils import layer_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v2/')
layers = None
Reported by Pylint.
Line: 184
Column: 3
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
Reported by Pylint.
Line: 213
Column: 9
is_input_t_tensor = backend.is_keras_tensor(
layer_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError(
f'input_tensor: {input_tensor}'
'is not type input_tensor. '
f'Received `type(input_tensor)={type(input_tensor)}`'
)
if is_input_t_tensor:
Reported by Pylint.
Line: 243
Column: 7
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor must be a valid Keras tensor type; '
f'Received {input_tensor} of type {type(input_tensor)}')
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
Reported by Pylint.
Line: 94
Column: 1
@keras_export('keras.applications.mobilenet_v2.MobileNetV2',
'keras.applications.MobileNetV2')
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
Reported by Pylint.
Line: 94
Column: 1
@keras_export('keras.applications.mobilenet_v2.MobileNetV2',
'keras.applications.MobileNetV2')
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
Reported by Pylint.
Line: 94
Column: 1
@keras_export('keras.applications.mobilenet_v2.MobileNetV2',
'keras.applications.MobileNetV2')
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
Reported by Pylint.
Line: 94
Column: 1
@keras_export('keras.applications.mobilenet_v2.MobileNetV2',
'keras.applications.MobileNetV2')
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
Reported by Pylint.
keras/distribute/multi_worker_callback_tf2_test.py
179 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for Keras callbacks in multi-worker training with TF2."""
import tensorflow.compat.v2 as tf
import json
import os
from absl.testing import parameterized
Reported by Pylint.
Line: 22
Column: 1
import json
import os
from absl.testing import parameterized
from keras import callbacks
from keras.distribute import distributed_file_utils
from keras.distribute import multi_worker_testing_utils
Reported by Pylint.
Line: 55
Column: 3
batch_size = 64
steps = 2
with tf.distribute.MultiWorkerMirroredStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# Pass saving_filepath from the parent thread to ensure every worker has the
Reported by Pylint.
Line: 96
Column: 26
file_format=['h5', 'tf'],
save_weights_only=[True, False]))
def test_model_checkpoint_saves_on_chief_but_not_otherwise(
self, file_format, mode, save_weights_only):
def proc_model_checkpoint_saves_on_chief_but_not_otherwise(
test_obj, file_format):
model, saving_filepath, train_ds, steps = _model_setup(
Reported by Pylint.
Line: 139
Column: 36
test_obj.assertEqual(
checkpoint_exists(
distributed_file_utils.write_filepath(
saving_filepath, model._distribution_strategy)), is_chief())
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_saves_on_chief_but_not_otherwise,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self, file_format))
Reported by Pylint.
Line: 147
Column: 61
args=(self, file_format))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_model_checkpoint_works_with_same_file_path(self, mode):
def proc_model_checkpoint_works_with_same_file_path(
test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
Reported by Pylint.
Line: 173
Column: 70
args=(self, saving_filepath))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_backupandrestore_checkpoint_works_with_interruption(self, mode):
class InterruptingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch == 2:
Reported by Pylint.
Line: 239
Column: 61
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=['eager']))
def test_profiler_saves_on_both_chief_and_non_chief(self, mode):
def proc_profiler_saves_on_both_chief_and_non_chief(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
Reported by Pylint.
Line: 274
Column: 63
args=(self,))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_tensorboard_saves_on_chief_but_not_otherwise(self, mode):
def proc_tensorboard_saves_on_chief_but_not_otherwise(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
Reported by Pylint.
Line: 314
Column: 71
args=(self,))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_tensorboard_can_still_save_to_temp_even_if_it_exists(self, mode):
def proc_tensorboard_can_still_save_to_temp_even_if_it_exists(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
Reported by Pylint.
keras/distribute/dataset_creator_model_fit_test.py
179 issues
Line: 18
Column: 1
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import tensorflow.compat.v2 as tf
import numpy as np
from tensorflow.python.framework import test_util
from keras.distribute import dataset_creator_model_fit_test_base as test_base
from keras.distribute import strategy_combinations
Reported by Pylint.
Line: 21
Column: 1
import tensorflow.compat.v2 as tf
import numpy as np
from tensorflow.python.framework import test_util
from keras.distribute import dataset_creator_model_fit_test_base as test_base
from keras.distribute import strategy_combinations
from keras.utils import dataset_creator
Reported by Pylint.
Line: 27
Column: 3
from keras.utils import dataset_creator
# TODO(rchao): Investigate why there cannot be single worker and multi worker
# PS strategies running in the same shard.
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies +
Reported by Pylint.
Line: 55
Column: 8
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).batch(2)
if strategy._should_use_with_coordinator:
with self.assertRaises((tf.errors.OutOfRangeError,
tf.errors.CancelledError)):
self._model_fit(
strategy,
steps_per_epoch=-1,
Reported by Pylint.
Line: 35
Column: 1
strategy_combinations.multi_worker_mirrored_strategies +
strategy_combinations.parameter_server_strategies_multi_worker,
mode="eager"))
class DatasetCreatorModelFitTest(test_base.DatasetCreatorModelFitTestBase):
def setUp(self):
super().setUp()
if test_util.is_xla_enabled():
self.skipTest("model.optimizer.iterations values is not as expected "
Reported by Pylint.
Line: 35
Column: 1
strategy_combinations.multi_worker_mirrored_strategies +
strategy_combinations.parameter_server_strategies_multi_worker,
mode="eager"))
class DatasetCreatorModelFitTest(test_base.DatasetCreatorModelFitTestBase):
def setUp(self):
super().setUp()
if test_util.is_xla_enabled():
self.skipTest("model.optimizer.iterations values is not as expected "
Reported by Pylint.
Line: 37
Column: 1
mode="eager"))
class DatasetCreatorModelFitTest(test_base.DatasetCreatorModelFitTestBase):
def setUp(self):
super().setUp()
if test_util.is_xla_enabled():
self.skipTest("model.optimizer.iterations values is not as expected "
"with XLA: b/184384487")
Reported by Pylint.
Line: 37
Column: 3
mode="eager"))
class DatasetCreatorModelFitTest(test_base.DatasetCreatorModelFitTestBase):
def setUp(self):
super().setUp()
if test_util.is_xla_enabled():
self.skipTest("model.optimizer.iterations values is not as expected "
"with XLA: b/184384487")
Reported by Pylint.
Line: 37
Column: 3
mode="eager"))
class DatasetCreatorModelFitTest(test_base.DatasetCreatorModelFitTestBase):
def setUp(self):
super().setUp()
if test_util.is_xla_enabled():
self.skipTest("model.optimizer.iterations values is not as expected "
"with XLA: b/184384487")
Reported by Pylint.
Line: 38
Column: 1
class DatasetCreatorModelFitTest(test_base.DatasetCreatorModelFitTestBase):
def setUp(self):
super().setUp()
if test_util.is_xla_enabled():
self.skipTest("model.optimizer.iterations values is not as expected "
"with XLA: b/184384487")
def testModelFit(self, strategy):
Reported by Pylint.
keras/engine/feature_columns_integration_test.py
179 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests specific to Feature Columns integration."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import keras_parameterized
Reported by Pylint.
Line: 29
Column: 1
from keras.utils import np_utils
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
Reported by Pylint.
Line: 36
Column: 3
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
net = self._input_layer(features)
net = self._dense_layer(net)
return net
Reported by Pylint.
Line: 178
Column: 3
dnn_model.evaluate(ds, steps=1)
dnn_model.predict(ds, steps=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_feature_layer_input(self):
col_a = tf.feature_column.numeric_column('a')
col_b = tf.feature_column.numeric_column('b')
Reported by Pylint.
Line: 205
Column: 3
data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
model.fit(*data, epochs=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
col_a = tf.feature_column.numeric_column('a')
col_b = tf.feature_column.numeric_column('b')
col_c = tf.feature_column.numeric_column('c')
Reported by Pylint.
Line: 29
Column: 1
from keras.utils import np_utils
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
Reported by Pylint.
Line: 31
Column: 1
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
Reported by Pylint.
Line: 32
Column: 1
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
net = self._input_layer(features)
Reported by Pylint.
Line: 32
Column: 5
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
net = self._input_layer(features)
Reported by Pylint.
Line: 33
Column: 1
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
net = self._input_layer(features)
net = self._dense_layer(net)
Reported by Pylint.
keras/layers/convolutional_recurrent_test.py
178 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for convolutional recurrent layers."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
Reported by Pylint.
Line: 19
Column: 1
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
Reported by Pylint.
Line: 28
Column: 1
@keras_parameterized.run_all_keras_modes
class ConvLSTM1DTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
Reported by Pylint.
Line: 30
Column: 1
@keras_parameterized.run_all_keras_modes
class ConvLSTM1DTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
Reported by Pylint.
Line: 34
Column: 3
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
Reported by Pylint.
Line: 34
Column: 1
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
Reported by Pylint.
Line: 34
Column: 3
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
Reported by Pylint.
Line: 35
Column: 1
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
sequence_len = 2
Reported by Pylint.
Line: 36
Column: 1
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
sequence_len = 2
if data_format == 'channels_first':
Reported by Pylint.
Line: 37
Column: 1
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len, input_channel,
Reported by Pylint.
keras/layers/multi_head_attention.py
177 issues
Line: 18
Column: 1
# ==============================================================================
"""Keras-based attention layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import collections
import math
import string
Reported by Pylint.
Line: 19
Column: 1
"""Keras-based attention layer."""
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
import collections
import math
import string
Reported by Pylint.
Line: 34
Column: 1
from keras.layers import core
from keras.layers import einsum_dense
from keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
_CHR_IDX = string.ascii_lowercase
Reported by Pylint.
Line: 35
Column: 1
from keras.layers import einsum_dense
from keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
_CHR_IDX = string.ascii_lowercase
Reported by Pylint.
Line: 336
Column: 5
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
free_dims = self._query_shape.rank - 1
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self._query_dense = einsum_dense.EinsumDense(
einsum_equation,
Reported by Pylint.
Line: 226
Column: 16
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self._num_heads = num_heads
Reported by Pylint.
Line: 340
Column: 7
free_dims = self._query_shape.rank - 1
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self._query_dense = einsum_dense.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="query",
Reported by Pylint.
Line: 349
Column: 7
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._key_shape.rank - 1, bound_dims=1, output_dims=2)
self._key_dense = einsum_dense.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="key",
Reported by Pylint.
Line: 358
Column: 7
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._value_shape.rank - 1, bound_dims=1, output_dims=2)
self._value_dense = einsum_dense.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value",
Reported by Pylint.
Line: 370
Column: 7
# These computations could be wrapped into the keras attention layer once
# it support mult-head einsum computations.
self._build_attention(output_rank)
self._output_dense = self._make_output_dense(
free_dims, common_kwargs, "attention_output")
def _make_output_dense(self, free_dims, common_kwargs, name=None):
"""Builds the output projection matrix.
Reported by Pylint.
keras/layers/rnn_cell_wrapper_v2_test.py
174 issues
Line: 17
Column: 1
# ==============================================================================
"""Tests for RNN cell wrapper v2 implementation."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras import layers
Reported by Pylint.
Line: 19
Column: 1
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras import layers
from keras.layers import rnn_cell_wrapper_v2
from keras.layers.legacy_rnn import rnn_cell_impl
Reported by Pylint.
Line: 134
Column: 5
def testWrapperV2Caller(self, wrapper):
"""Tests that wrapper V2 is using the LayerRNNCell's caller."""
with legacy_base_layer.keras_style_scope():
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1) for _ in range(2)])
rnn_cell = wrapper(base_cell)
inputs = tf.convert_to_tensor([[1]], dtype=tf.float32)
state = tf.convert_to_tensor([[1]], dtype=tf.float32)
Reported by Pylint.
Line: 43
Column: 20
bias_initializer=tf.compat.v1.constant_initializer(0.5))
g, m_new = base_cell(x, m)
wrapper_object = wrapper_type(base_cell)
(name, dep), = wrapper_object._checkpoint_dependencies
wrapper_object.get_config() # Should not throw an error
self.assertIs(dep, base_cell)
self.assertEqual("cell", name)
g_res, m_new_res = wrapper_object(x, m)
Reported by Pylint.
Line: 87
Column: 20
m = tf.zeros([1, 3])
cell = rnn_cell_impl.GRUCell(3)
wrapped_cell = wrapper_type(cell, "/cpu:0")
(name, dep), = wrapped_cell._checkpoint_dependencies
wrapped_cell.get_config() # Should not throw an error
self.assertIs(dep, cell)
self.assertEqual("cell", name)
outputs, _ = wrapped_cell(x, m)
Reported by Pylint.
Line: 104
Column: 22
self.assertIsNone(getattr(wrapped_cell_v2, "_keras_style", None))
wrapped_cell = wrapper(rnn_cell_impl.BasicRNNCell(1))
self.assertFalse(wrapped_cell._keras_style)
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
def testWrapperWeights(self, wrapper):
"""Tests that wrapper weights contain wrapped cells weights."""
Reported by Pylint.
Line: 141
Column: 15
inputs = tf.convert_to_tensor([[1]], dtype=tf.float32)
state = tf.convert_to_tensor([[1]], dtype=tf.float32)
_ = rnn_cell(inputs, [state, state])
weights = base_cell._cells[0].weights
self.assertLen(weights, expected_len=2)
self.assertTrue(all("_wrapper" in v.name for v in weights))
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
Reported by Pylint.
Line: 178
Column: 22
reconstructed_wrapper = wrapper_cls.from_config(config)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 4)
def residual_fn(inputs, outputs):
return inputs * 3 + outputs
wrapper = wrapper_cls(cell, residual_fn=residual_fn)
Reported by Pylint.
Line: 188
Column: 22
reconstructed_wrapper = wrapper_cls.from_config(config)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 5)
def testDropoutWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper
cell = layers.GRUCell(10)
wrapper = wrapper_cls(cell)
Reported by Pylint.
Line: 204
Column: 21
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertTrue(reconstructed_wrapper._dropout_state_filter(None))
def dropout_state_filter_visitor(unused_state):
return False
wrapper = wrapper_cls(
Reported by Pylint.