LostTech.TensorFlow : API Documentation

Type Context

Namespace tensorflow.python.eager.context

Parent PythonObjectContainer

Interfaces IContext

Methods

Properties

Public instance methods

void add_function(object fn)

void add_function_def(object fdef)

object add_function_def_dyn(object fdef)

object add_function_dyn(object fn)

void add_post_execution_callback(PythonFunctionContainer callback)

object add_post_execution_callback_dyn(object callback)

void clear_post_execution_callbacks()

object clear_post_execution_callbacks_dyn()

void configure_collective_ops(string collective_leader, ImplicitContainer<T> scoped_allocator_enabled_ops, bool use_nccl_communication, IEnumerable<string> device_filters)

object configure_collective_ops_dyn(ImplicitContainer<T> collective_leader, ImplicitContainer<T> scoped_allocator_enabled_ops, ImplicitContainer<T> use_nccl_communication, object device_filters)

IList<object> devices()

object devices_dyn()

void enable_collective_ops(object server_def)

object enable_collective_ops_dyn(object server_def)

object end_step_dyn()

object get_memory_growth(PhysicalDevice dev)

Get if memory growth is enabled for a PhysicalDevice.

A PhysicalDevice with memory growth set will not allocate all memory on the device upfront.
Returns
object
Current memory growth setting.
Show Example
physical_devices = config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            tf.config.experimental.set_memory_growth(physical_devices[0], True)
            assert tf.config.experimental.get_memory_growth(physical_devices[0]) == True 

object get_memory_growth_dyn(object dev)

Get if memory growth is enabled for a PhysicalDevice.

A PhysicalDevice with memory growth set will not allocate all memory on the device upfront.
Returns
object
Current memory growth setting.
Show Example
physical_devices = config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            tf.config.experimental.set_memory_growth(physical_devices[0], True)
            assert tf.config.experimental.get_memory_growth(physical_devices[0]) == True 

object get_optimizer_experimental_options_dyn()

IList<VirtualDeviceConfiguration> get_virtual_device_configuration(PhysicalDevice dev)

Get the virtual device configuration for a PhysicalDevice.

Returns the list of VirtualDeviceConfiguration objects previously configured by a call to `tf.config.experimental.set_virtual_device_configuration()`.
Returns
IList<VirtualDeviceConfiguration>
List of tf.config.experimental.VirtualDeviceConfiguration objects or `None` if no virtual device configuration has been set for this physical device.
Show Example
physical_devices = tf.config.experimental.list_physical_devices('CPU')
            assert len(physical_devices) == 1, "No CPUs found"
            configs = tf.config.experimental.get_virtual_device_configuration(
                physical_devices[0])
            assert configs is None
            tf.config.experimental.set_virtual_device_configuration(
                physical_devices[0],
                [tf.config.experimental.VirtualDeviceConfiguration(),
                 tf.config.experimental.VirtualDeviceConfiguration()])
            configs = tf.config.experimental.get_virtual_device_configuration(
                physical_devices[0])
            assert len(configs) == 2 

object get_virtual_device_configuration_dyn(object dev)

Get the virtual device configuration for a PhysicalDevice.

Returns the list of VirtualDeviceConfiguration objects previously configured by a call to `tf.config.experimental.set_virtual_device_configuration()`.
Returns
object
List of tf.config.experimental.VirtualDeviceConfiguration objects or `None` if no virtual device configuration has been set for this physical device.
Show Example
physical_devices = tf.config.experimental.list_physical_devices('CPU')
            assert len(physical_devices) == 1, "No CPUs found"
            configs = tf.config.experimental.get_virtual_device_configuration(
                physical_devices[0])
            assert configs is None
            tf.config.experimental.set_virtual_device_configuration(
                physical_devices[0],
                [tf.config.experimental.VirtualDeviceConfiguration(),
                 tf.config.experimental.VirtualDeviceConfiguration()])
            configs = tf.config.experimental.get_virtual_device_configuration(
                physical_devices[0])
            assert len(configs) == 2 

IList<object> get_visible_devices(string device_type)

Get the list of visible physical devices.

Returns a list of PhysicalDevice objects that are current marked as visible to the runtime. Any visible devices will have LogicalDevices assigned to them once the runtime is initialized.

The following example verifies all visible GPUs have been disabled:
Parameters
string device_type
(optional) Device types to limit query to.
Returns
IList<object>
List of PhysicalDevice objects
Show Example
physical_devices = config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            # Disable all GPUS
            tf.config.experimental.set_visible_devices([], 'GPU')
            visible_devices = tf.config.experimental.get_visible_devices()
            for device in visible_devices:
              assert device.device_type != 'GPU' 

object get_visible_devices_dyn(object device_type)

Get the list of visible physical devices.

Returns a list of PhysicalDevice objects that are current marked as visible to the runtime. Any visible devices will have LogicalDevices assigned to them once the runtime is initialized.

The following example verifies all visible GPUs have been disabled:
Parameters
object device_type
(optional) Device types to limit query to.
Returns
object
List of PhysicalDevice objects
Show Example
physical_devices = config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            # Disable all GPUS
            tf.config.experimental.set_visible_devices([], 'GPU')
            visible_devices = tf.config.experimental.get_visible_devices()
            for device in visible_devices:
              assert device.device_type != 'GPU' 

bool has_function(string name)

object has_function_dyn(object name)

IList<LogicalDevice> list_logical_devices(string device_type)

Return a list of logical devices created by runtime.

Logical devices may correspond to physical devices or remote devices in the cluster. Operations and tensors may be placed on these devices by using the `name` of the LogicalDevice.
Parameters
string device_type
(optional) Device type to filter by such as "CPU" or "GPU"
Returns
IList<LogicalDevice>
List of LogicalDevice objects
Show Example
logical_devices = tf.config.experimental.list_logical_devices('GPU')
            # Allocate on GPU:0
            with tf.device(logical_devices[0].name):
              one = tf.constant(1)
            # Allocate on GPU:1
            with tf.device(logical_devices[1].name):
              two = tf.constant(2) 

object list_logical_devices_dyn(object device_type)

Return a list of logical devices created by runtime.

Logical devices may correspond to physical devices or remote devices in the cluster. Operations and tensors may be placed on these devices by using the `name` of the LogicalDevice.
Parameters
object device_type
(optional) Device type to filter by such as "CPU" or "GPU"
Returns
object
List of LogicalDevice objects
Show Example
logical_devices = tf.config.experimental.list_logical_devices('GPU')
            # Allocate on GPU:0
            with tf.device(logical_devices[0].name):
              one = tf.constant(1)
            # Allocate on GPU:1
            with tf.device(logical_devices[1].name):
              two = tf.constant(2) 

IList<PhysicalDevice> list_physical_devices(string device_type)

Return a list of physical devices visible to the runtime.

Physical devices are hardware devices locally present on the current machine. By default all discovered CPU and GPU devices are considered visible. The `list_physical_devices` allows querying the hardware prior to runtime initialization.

The following example ensures the machine can see at least 1 GPU.
Parameters
string device_type
(optional) Device type to filter by such as "CPU" or "GPU"
Returns
IList<PhysicalDevice>
List of PhysicalDevice objects
Show Example
physical_devices = tf.config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "No GPUs found." 

object list_physical_devices_dyn(object device_type)

Return a list of physical devices visible to the runtime.

Physical devices are hardware devices locally present on the current machine. By default all discovered CPU and GPU devices are considered visible. The `list_physical_devices` allows querying the hardware prior to runtime initialization.

The following example ensures the machine can see at least 1 GPU.
Parameters
object device_type
(optional) Device type to filter by such as "CPU" or "GPU"
Returns
object
List of PhysicalDevice objects
Show Example
physical_devices = tf.config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "No GPUs found." 

int num_gpus()

object num_gpus_dyn()

_EagerTensorCache ones_rank_cache()

object ones_rank_cache_dyn()

void remove_function(string name)

object remove_function_dyn(object name)

void set_memory_growth(PhysicalDevice dev, bool enable)

Set if memory growth should be enabled for a PhysicalDevice.

A PhysicalDevice with memory growth set will not allocate all memory on the device upfront. Memory growth cannot be configured on a PhysicalDevice with virtual devices configured.
Parameters
PhysicalDevice dev
bool enable
Whether to enable or disable memory growth
Show Example
physical_devices = tf.config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            tf.config.experimental.set_memory_growth(physical_devices[0], True) 

object set_memory_growth_dyn(object dev, object enable)

Set if memory growth should be enabled for a PhysicalDevice.

A PhysicalDevice with memory growth set will not allocate all memory on the device upfront. Memory growth cannot be configured on a PhysicalDevice with virtual devices configured.
Parameters
object dev
object enable
Whether to enable or disable memory growth
Show Example
physical_devices = tf.config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            tf.config.experimental.set_memory_growth(physical_devices[0], True) 

void set_optimizer_experimental_options(IDictionary<string, object> options)

object set_optimizer_experimental_options_dyn(object options)

void set_server_def(object server_def, int keep_alive_secs)

object set_server_def_dyn(object server_def, ImplicitContainer<T> keep_alive_secs)

void set_virtual_device_configuration(PhysicalDevice dev, IEnumerable<VirtualDeviceConfiguration> virtual_devices)

Set the virtual device configuration for a PhysicalDevice.

A PhysicalDevice marked as visible will by default have a single LogicalDevice allocated to it once the runtime is configured. Specifying a list of tf.config.experimental.VirtualDeviceConfiguration objects allows multiple devices to be configured that utilize the same PhysicalDevice.

The following example splits the CPU into 2 virtual devices: The following example splits the GPU into 2 virtual devices with 100 MB each:
Parameters
PhysicalDevice dev
IEnumerable<VirtualDeviceConfiguration> virtual_devices
(optional) Need to update
Show Example
physical_devices = tf.config.experimental.list_physical_devices('CPU')
            assert len(physical_devices) == 1, "No CPUs found"
            # Specify 2 virtual CPUs. Note currently memory limit is not supported.
            tf.config.experimental.set_virtual_device_configuration(
              physical_devices[0],
              [tf.config.experimental.VirtualDeviceConfiguration(),
               tf.config.experimental.VirtualDeviceConfiguration()])
            logical_devices = tf.config.experimental.list_logical_devices('CPU')
            assert len(logical_devices) == 2 

try: tf.config.experimental.set_virtual_device_configuration( physical_devices[0], [tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration()]) except: print('Cannot modify the virtual devices once they have been initialized.')

object set_virtual_device_configuration_dyn(object dev, object virtual_devices)

Set the virtual device configuration for a PhysicalDevice.

A PhysicalDevice marked as visible will by default have a single LogicalDevice allocated to it once the runtime is configured. Specifying a list of tf.config.experimental.VirtualDeviceConfiguration objects allows multiple devices to be configured that utilize the same PhysicalDevice.

The following example splits the CPU into 2 virtual devices: The following example splits the GPU into 2 virtual devices with 100 MB each:
Parameters
object dev
object virtual_devices
(optional) Need to update
Show Example
physical_devices = tf.config.experimental.list_physical_devices('CPU')
            assert len(physical_devices) == 1, "No CPUs found"
            # Specify 2 virtual CPUs. Note currently memory limit is not supported.
            tf.config.experimental.set_virtual_device_configuration(
              physical_devices[0],
              [tf.config.experimental.VirtualDeviceConfiguration(),
               tf.config.experimental.VirtualDeviceConfiguration()])
            logical_devices = tf.config.experimental.list_logical_devices('CPU')
            assert len(logical_devices) == 2 

try: tf.config.experimental.set_virtual_device_configuration( physical_devices[0], [tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration(), tf.config.experimental.VirtualDeviceConfiguration()]) except: print('Cannot modify the virtual devices once they have been initialized.')

void set_visible_devices(PhysicalDevice devices, string device_type)

Set the list of visible devices.

Sets the list of PhysicalDevices to be marked as visible to the runtime. Any devices that are not marked as visible means TensorFlow will not allocate memory on it and will not be able to place any operations on it as no LogicalDevice will be created on it. By default all discovered devices are marked as visible.

The following example demonstrates disabling the first GPU on the machine.
Parameters
PhysicalDevice devices
(optional) List of PhysicalDevice objects to make visible
string device_type
(optional) Device types to limit visibility configuration to. Other device types will be left unaltered.
Show Example
physical_devices = config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            # Disable first GPU
            tf.config.experimental.set_visible_devices(physical_devices[1:], 'GPU')
            logical_devices = config.experimental.list_logical_devices('GPU')
            # Logical device was not created for first GPU
            assert len(logical_devices) == len(physical_devices) - 1 

void set_visible_devices(IEnumerable<object> devices, string device_type)

Set the list of visible devices.

Sets the list of PhysicalDevices to be marked as visible to the runtime. Any devices that are not marked as visible means TensorFlow will not allocate memory on it and will not be able to place any operations on it as no LogicalDevice will be created on it. By default all discovered devices are marked as visible.

The following example demonstrates disabling the first GPU on the machine.
Parameters
IEnumerable<object> devices
(optional) List of PhysicalDevice objects to make visible
string device_type
(optional) Device types to limit visibility configuration to. Other device types will be left unaltered.
Show Example
physical_devices = config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            # Disable first GPU
            tf.config.experimental.set_visible_devices(physical_devices[1:], 'GPU')
            logical_devices = config.experimental.list_logical_devices('GPU')
            # Logical device was not created for first GPU
            assert len(logical_devices) == len(physical_devices) - 1 

object set_visible_devices_dyn(object devices, object device_type)

Set the list of visible devices.

Sets the list of PhysicalDevices to be marked as visible to the runtime. Any devices that are not marked as visible means TensorFlow will not allocate memory on it and will not be able to place any operations on it as no LogicalDevice will be created on it. By default all discovered devices are marked as visible.

The following example demonstrates disabling the first GPU on the machine.
Parameters
object devices
(optional) List of PhysicalDevice objects to make visible
object device_type
(optional) Device types to limit visibility configuration to. Other device types will be left unaltered.
Show Example
physical_devices = config.experimental.list_physical_devices('GPU')
            assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
            # Disable first GPU
            tf.config.experimental.set_visible_devices(physical_devices[1:], 'GPU')
            logical_devices = config.experimental.list_logical_devices('GPU')
            # Logical device was not created for first GPU
            assert len(logical_devices) == len(physical_devices) - 1 

object start_step_dyn()

_EagerTensorCache zeros_cache()

object zeros_cache_dyn()

Public properties

object config get;

object config_dyn get;

_ContextSwitchStack context_switches get;

object context_switches_dyn get;

string device_name get;

object device_name_dyn get;

object device_policy get; set;

object device_policy_dyn get; set;

object device_spec get;

object device_spec_dyn get;

int execution_mode get; set;

object execution_mode_dyn get; set;

Executor executor get; set;

object executor_dyn get; set;

FunctionCallOptions function_call_options get; set;

object function_call_options_dyn get; set;

object inter_op_parallelism_threads get; set;

object inter_op_parallelism_threads_dyn get; set;

object intra_op_parallelism_threads get; set;

object intra_op_parallelism_threads_dyn get; set;

object log_device_placement get; set;

object log_device_placement_dyn get; set;

object mirroring_policy get; set;

object mirroring_policy_dyn get; set;

object optimizer_jit get; set;

object optimizer_jit_dyn get; set;

object post_execution_callbacks get;

object post_execution_callbacks_dyn get;

object PythonObject get;

string scope_name get; set;

object scope_name_dyn get; set;

object soft_device_placement get; set;

object soft_device_placement_dyn get; set;

object summary_recording get; set;

bool summary_recording_distribution_strategy get; set;

object summary_recording_distribution_strategy_dyn get; set;

object summary_recording_dyn get; set;

object summary_step get; set;

object summary_step_dyn get; set;

object summary_writer get; set;

object summary_writer_dyn get; set;