I am training a Mask R-CNN Inception ResNet V2 1024x1024 algorithm using my comp…uter's GPU. This was downloaded from the TensorFlow Detection Model Zoo, and I labeled my images (dimensions of 1100x1100 pixels) with Label-img. Here is what I am working with:
- GPU: NVIDIA GEFORCE RTX 2060
- GPU: 16GB RAM, 6 processor cores
- TensorFlow: 2.3.1
- Python: 3.8.6
- CUDA: 10.1
- cuDNN: 7.6
- Anaconda 3 command prompt
All tfrecord files have been generated, and when I start to train my model using ```python model_main_tf2.py --model_dir=models/my_faster_rcnn --pipeline_config_path=models/my_faster_rcnn/pipeline.config```, I get the following errors:
```
Traceback (most recent call last):
File "model_main_tf2.py", line 113, in <module>
tf.compat.v1.app.run()
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\platform\app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\absl\app.py", line 303, in run
_run_main(main, args)
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\absl\app.py", line 251, in _run_main
sys.exit(main(argv))
File "model_main_tf2.py", line 104, in main
model_lib_v2.train_loop(
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\object_detection\model_lib_v2.py", line 564, in train_loop
load_fine_tune_checkpoint(detection_model,
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\object_detection\model_lib_v2.py", line 350, in load_fine_tune_checkpoint
features, labels = iter(input_dataset).next()
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 645, in next
return self.__next__()
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 649, in __next__
return self.get_next()
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 694, in get_next
self._iterators[i].get_next_as_list_static_shapes(new_name))
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 1474, in get_next_as_list_static_shapes
return self._iterator.get_next()
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\data\ops\multi_device_iterator_ops.py", line 581, in get_next
result.append(self._device_iterators[i].get_next())
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py", line 825, in get_next
return self._next_internal()
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py", line 764, in _next_internal
return structure.from_compatible_tensor_list(self._element_spec, ret)
File "C:\user\anaconda3\envs\object_detection_api\lib\contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\eager\context.py", line 2105, in execution_mode
executor_new.wait()
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\eager\executor.py", line 67, in wait
pywrap_tfe.TFE_ExecutorWaitForAllPendingNodes(self._handle)
tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[16] = 16 is not in [0, 0)
[[{{node GatherV2_7}}]]
[[MultiDeviceIteratorGetNextFromShard]]
[[RemoteCall]]
```
The config file that was used to run the model is:
```
# Mask R-CNN with Inception Resnet v2 (no atrous)
# Sync-trained on COCO (with 8 GPUs) with batch size 16 (1024x1024 resolution)
# Initialized from Imagenet classification checkpoint
#
# Train on GPU-8
#
# Achieves 40.4 box mAP and 35.5 mask mAP on COCO17 val
model {
faster_rcnn {
number_of_stages: 3
num_classes: 1
image_resizer {
fixed_shape_resizer {
height: 1024
width: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2_keras'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
mask_height: 33
mask_width: 33
mask_prediction_conv_depth: 0
mask_prediction_num_conv_layers: 4
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
predict_instance_masks: true
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
second_stage_mask_prediction_loss_weight: 4.0
resize_masks: false
}
}
train_config: {
batch_size: 1
num_steps: 200000
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: 0.008
total_steps: 200000
warmup_learning_rate: 0.0
warmup_steps: 5000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint_version: V2
fine_tune_checkpoint: "pre-trained-models/mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8/checkpoint/ckpt-0"
fine_tune_checkpoint_type: "detection"
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
label_map_path: "annotations/label_map.pbtxt"
tf_record_input_reader {
input_path: "annotations/train.record"
}
load_instance_masks: true
mask_type: PNG_MASKS
}
eval_config: {
metrics_set: "coco_detection_metrics"
metrics_set: "coco_mask_metrics"
eval_instance_masks: true
use_moving_averages: false
batch_size: 1
include_metrics_per_category: true
}
eval_input_reader: {
label_map_path: "annotations/label_map.pbtxt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "annotations/test.record"
}
load_instance_masks: true
mask_type: PNG_MASKS
}
```
**What can be done to fix this?**
##############################################
Below are the scripts that are referenced in the error:
File "model_main_tf2.py", line 113:
```
#Lines 74-113:
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tf.config.set_soft_device_placement(True)
if FLAGS.checkpoint_dir:
model_lib_v2.eval_continuously(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
checkpoint_dir=FLAGS.checkpoint_dir,
wait_interval=300, timeout=FLAGS.eval_timeout)
else:
if FLAGS.use_tpu:
# TPU is automatically inferred if tpu_name is None and
# we are running under cloud ai-platform.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif FLAGS.num_workers > 1:
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
else:
strategy = tf.compat.v2.distribute.MirroredStrategy()
with strategy.scope():
model_lib_v2.train_loop(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
use_tpu=FLAGS.use_tpu,
checkpoint_every_n=FLAGS.checkpoint_every_n,
record_summaries=FLAGS.record_summaries)
if __name__ == '__main__':
tf.compat.v1.app.run()
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\platform\app.py", line 40:
```
#Lines 17-40:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
from absl.app import run as _run
from tensorflow.python.platform import flags
from tensorflow.python.util.tf_export import tf_export
def _parse_flags_tolerate_undef(argv):
"""Parse args, returning any unknown flags (ABSL defaults to crashing)."""
return flags.FLAGS(_sys.argv if argv is None else argv, known_only=True)
@tf_export(v1=['app.run'])
def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list."""
main = main or _sys.modules['__main__'].main
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\absl\app.py", line 303:
```
#Lines 294-328:
try:
args = _run_init(
sys.argv if argv is None else argv,
flags_parser,
)
while _init_callbacks:
callback = _init_callbacks.popleft()
callback()
try:
_run_main(main, args)
except UsageError as error:
usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode)
except:
exc = sys.exc_info()[1]
# Don't try to post-mortem debug successful SystemExits, since those
# mean there wasn't actually an error. In particular, the test framework
# raises SystemExit(False) even if all tests passed.
if isinstance(exc, SystemExit) and not exc.code:
raise
# Check the tty so that we don't hang waiting for input in an
# non-interactive scenario.
if FLAGS.pdb_post_mortem and sys.stdout.isatty():
traceback.print_exc()
print()
print(' *** Entering post-mortem debugging ***')
print()
pdb.post_mortem()
raise
except Exception as e:
_call_exception_handlers(e)
raise
# Callbacks which have been deferred until after _run_init has been called.
_init_callbacks = collections.deque()
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\absl\app.py", line 251:
```
#Lines 231-251:
def _run_main(main, argv):
"""Calls main, optionally with pdb or profiler."""
if FLAGS.run_with_pdb:
sys.exit(pdb.runcall(main, argv))
elif FLAGS.run_with_profiling or FLAGS.profile_file:
# Avoid import overhead since most apps (including performance-sensitive
# ones) won't be run with profiling.
import atexit
if FLAGS.use_cprofile_for_profiling:
import cProfile as profile
else:
import profile
profiler = profile.Profile()
if FLAGS.profile_file:
atexit.register(profiler.dump_stats, FLAGS.profile_file)
else:
atexit.register(profiler.print_stats)
retval = profiler.runcall(main, argv)
sys.exit(retval)
else:
sys.exit(main(argv))
```
File "model_main_tf2.py", line 104:
```
#Lines 74-113:
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tf.config.set_soft_device_placement(True)
if FLAGS.checkpoint_dir:
model_lib_v2.eval_continuously(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
checkpoint_dir=FLAGS.checkpoint_dir,
wait_interval=300, timeout=FLAGS.eval_timeout)
else:
if FLAGS.use_tpu:
# TPU is automatically inferred if tpu_name is None and
# we are running under cloud ai-platform.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif FLAGS.num_workers > 1:
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
else:
strategy = tf.compat.v2.distribute.MirroredStrategy()
with strategy.scope():
model_lib_v2.train_loop(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
use_tpu=FLAGS.use_tpu,
checkpoint_every_n=FLAGS.checkpoint_every_n,
record_summaries=FLAGS.record_summaries)
if __name__ == '__main__':
tf.compat.v1.app.run()
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\object_detection\model_lib_v2.py", line 564:
```
#Line 545-569:
if record_summaries:
summary_writer = tf.compat.v2.summary.create_file_writer(
summary_writer_filepath)
else:
summary_writer = tf2.summary.create_noop_writer()
if use_tpu:
num_steps_per_iteration = 100
else:
# TODO(b/135933080) Explore setting to 100 when GPU performance issues
# are fixed.
num_steps_per_iteration = 1
with summary_writer.as_default():
with strategy.scope():
with tf.compat.v2.summary.record_if(
lambda: global_step % num_steps_per_iteration == 0):
# Load a fine-tuning checkpoint.
if train_config.fine_tune_checkpoint:
load_fine_tune_checkpoint(detection_model,
train_config.fine_tune_checkpoint,
fine_tune_checkpoint_type,
fine_tune_checkpoint_version,
train_input,
unpad_groundtruth_tensors)
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\object_detection\model_lib_v2.py", line 350:
```
#Lines 312-350:
def load_fine_tune_checkpoint(
model, checkpoint_path, checkpoint_type, checkpoint_version, input_dataset,
unpad_groundtruth_tensors):
"""Load a fine tuning classification or detection checkpoint.
To make sure the model variables are all built, this method first executes
the model by computing a dummy loss. (Models might not have built their
variables before their first execution)
It then loads an object-based classification or detection checkpoint.
This method updates the model in-place and does not return a value.
Args:
model: A DetectionModel (based on Keras) to load a fine-tuning
checkpoint for.
checkpoint_path: Directory with checkpoints file or path to checkpoint.
checkpoint_type: Whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`.
checkpoint_version: train_pb2.CheckpointVersion.V1 or V2 enum indicating
whether to load checkpoints in V1 style or V2 style. In this binary
we only support V2 style (object-based) checkpoints.
input_dataset: The tf.data Dataset the model is being trained on. Needed
to get the shapes for the dummy loss computation.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
Raises:
IOError: if `checkpoint_path` does not point at a valid object-based
checkpoint
ValueError: if `checkpoint_version` is not train_pb2.CheckpointVersion.V2
"""
if not is_object_based_checkpoint(checkpoint_path):
raise IOError('Checkpoint is expected to be an object-based checkpoint.')
if checkpoint_version == train_pb2.CheckpointVersion.V1:
raise ValueError('Checkpoint version should be V2')
features, labels = iter(input_dataset).next()
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\distribute\input_lib.py", issues with line 645, 645, 694:
```
#Lines 615-728:
class DistributedIteratorBase(DistributedIteratorInterface):
"""Common implementation for all input iterators."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers, iterators, strategy):
static_shape = _get_static_shape(iterators)
# TODO(b/133073708): we currently need a flag to control the usage because
# there is a performance difference between get_next() and
# get_next_as_optional(). And we only enable get_next_as_optional when the
# output shapes are not static.
#
# TODO(rxsang): We want to always enable the get_next_as_optional behavior
# when user passed input_fn instead of dataset.
if getattr(
strategy.extended, "experimental_enable_get_next_as_optional", False):
self._enable_get_next_as_optional = (
not static_shape) or strategy.extended._in_multi_worker_mode()
else:
self._enable_get_next_as_optional = False
assert isinstance(input_workers, InputWorkers)
if not input_workers.worker_devices:
raise ValueError("Should have at least one worker for input iterator.")
self._iterators = iterators
self._input_workers = input_workers
self._strategy = strategy
def next(self):
return self.__next__()
def __next__(self):
try:
return self.get_next()
except errors.OutOfRangeError:
raise StopIteration
def __iter__(self):
return self
def get_next_as_optional(self):
global_has_value, replicas = _get_next_as_optional(self, self._strategy)
def return_none():
return optional_ops.Optional.empty(self._element_spec)
def return_value(replicas):
"""Wraps the inputs for replicas in an `tf.experimental.Optional`."""
results = []
for i, worker in enumerate(self._input_workers.worker_devices):
with ops.device(worker):
devices = self._input_workers.compute_devices_for_worker(i)
for j, device in enumerate(devices):
with ops.device(device):
result = replicas[i][j]
results.append(result)
replicas = results
return optional_ops.Optional.from_value(
distribute_utils.regroup(replicas))
return control_flow_ops.cond(global_has_value,
lambda: return_value(replicas),
lambda: return_none()) # pylint: disable=unnecessary-lambda
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
if not self._enable_get_next_as_optional:
replicas = []
for i, worker in enumerate(self._input_workers.worker_devices):
if name is not None:
d = tf_device.DeviceSpec.from_string(worker)
new_name = "%s_%s_%d" % (name, d.job, d.task)
else:
new_name = None
with ops.device(worker):
# Make `replicas` a flat list of values across all replicas.
replicas.extend(
self._iterators[i].get_next_as_list_static_shapes(new_name))
return distribute_utils.regroup(replicas)
out_of_range_replicas = []
def out_of_range_fn(worker_index, device):
"""This function will throw an OutOfRange error."""
# As this will be only called when there is no data left, so calling
# get_next() will trigger an OutOfRange error.
data = self._iterators[worker_index].get_next(device)
out_of_range_replicas.append(data)
return data
global_has_value, replicas = _get_next_as_optional(self, self._strategy)
results = []
for i, worker in enumerate(self._input_workers.worker_devices):
with ops.device(worker):
devices = self._input_workers.compute_devices_for_worker(i)
for j, device in enumerate(devices):
with ops.device(device):
# pylint: disable=undefined-loop-variable
# pylint: disable=cell-var-from-loop
# It is fine for the lambda to capture variables from the loop as
# the lambda is executed in the loop as well.
result = control_flow_ops.cond(
global_has_value,
lambda: replicas[i][j],
lambda: out_of_range_fn(i, device),
strict=True,
)
# pylint: enable=cell-var-from-loop
# pylint: enable=undefined-loop-variable
results.append(result)
replicas = results
return distribute_utils.regroup(replicas)
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 1474
```
#Lines 1459-1474:
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the underlying iterator.
Runs the iterator get_next() within a device scope. Since this doesn't use
get_next_as_optional(), is is considerably faster than get_next_as_list()
(but can only be used when the shapes are static).
Args:
name: not used.
Returns:
A list consisting of the next data from each device.
"""
del name
with ops.device(self._worker):
return self._iterator.get_next()
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\data\ops\multi_device_iterator_ops.py", line 581:
```
#Lines 572-588:
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def __iter__(self):
return self
def __next__(self):
return self.next()
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py", line 764 and 825:
```
#Lines 750-834:
with context.execution_mode(context.SYNC):
with ops.device(self._device):
# TODO(ashankar): Consider removing this ops.device() context manager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
ret = gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
try:
# Fast path for the case `self._structure` is not a nested structure.
return self._element_spec._from_compatible_tensor_list(ret) # pylint: disable=protected-access
except AttributeError:
return structure.from_compatible_tensor_list(self._element_spec, ret)
@property
def _type_spec(self):
return IteratorSpec(self.element_spec)
def next(self):
try:
return self._next_internal()
except errors.OutOfRangeError:
raise StopIteration
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(iterator)`.")
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.sparse.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(iterator)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(iterator)`.")
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._element_spec)
@property
def element_spec(self):
return self._element_spec
def get_next(self):
return self._next_internal()
def get_next_as_optional(self):
# pylint: disable=protected-access
return optional_ops._OptionalImpl(
gen_dataset_ops.iterator_get_next_as_optional(
self._iterator_resource,
output_types=structure.get_flat_tensor_types(self.element_spec),
output_shapes=structure.get_flat_tensor_shapes(
self.element_spec)), self.element_spec)
```
File "C:\user\anaconda3\envs\object_detection_api\lib\contextlib.py", line 131:
```
#Lines 97-162:
class _GeneratorContextManager(_GeneratorContextManagerBase,
AbstractContextManager,
ContextDecorator):
"""Helper for @contextmanager decorator."""
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, self.args, self.kwds)
def __enter__(self):
# do not keep args and kwds alive unnecessarily
# they are only needed for recreation, which is not possible anymore
del self.args, self.kwds, self.func
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return False
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
except StopIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return exc is not value
except RuntimeError as exc:
# Don't re-raise the passed in exception. (issue27122)
if exc is value:
return False
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
if type is StopIteration and exc.__cause__ is value:
return False
raise
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
# This cannot use 'except BaseException as exc' (as in the
# async implementation) to maintain compatibility with
# Python 2, where old-style class exceptions are not caught
# by 'except BaseException'.
if sys.exc_info()[1] is value:
return False
raise
raise RuntimeError("generator didn't stop after throw()")
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\eager\context.py", line 2105:
```
#Lines 2001-2013:
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
```
File "C:\user\anaconda3\envs\object_detection_api\lib\site-packages\tensorflow\python\eager\executor.py", line 67:
```
#Lines 24-76:
class Executor(object):
"""A class for handling eager execution.
The default behavior for asynchronous execution is to serialize all ops on
a single thread. Having different `Executor` objects in different threads
enables executing ops asynchronously in parallel:
```python
def thread_function():
executor = executor.Executor(enable_async=True):
context.set_executor(executor)
a = threading.Thread(target=thread_function)
a.start()
b = threading.Thread(target=thread_function)
b.start()
"""
def __init__(self, handle):
self._handle = handle
def __del__(self):
try:
# pywrap_tfe.TFE_ExecutorWaitForAllPendingNodes(self._handle)
pywrap_tfe.TFE_DeleteExecutor(self._handle)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the pywrap module
# already being unloaded, self._handle. no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
def is_async(self):
return pywrap_tfe.TFE_ExecutorIsAsync(self._handle)
def handle(self):
return self._handle
def wait(self):
"""Waits for ops dispatched in this executor to finish."""
pywrap_tfe.TFE_ExecutorWaitForAllPendingNodes(self._handle)
def clear_error(self):
"""Clears errors raised in this executor during execution."""
pywrap_tfe.TFE_ExecutorClearError(self._handle)
def new_executor(enable_async):
handle = pywrap_tfe.TFE_NewExecutor(enable_async)
return Executor(handle)
```