Package inference
Class ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators
java.lang.Object
com.google.protobuf.AbstractMessageLite
com.google.protobuf.AbstractMessage
com.google.protobuf.GeneratedMessageV3
inference.ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators
- All Implemented Interfaces:
com.google.protobuf.Message,com.google.protobuf.MessageLite,com.google.protobuf.MessageLiteOrBuilder,com.google.protobuf.MessageOrBuilder,ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder,Serializable
- Enclosing class:
ModelConfigOuterClass.ModelOptimizationPolicy
public static final class ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators
extends com.google.protobuf.GeneratedMessageV3
implements ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
@@ @@ .. cpp:var:: message ExecutionAccelerators @@ @@ Specify the preferred execution accelerators to be used to execute @@ the model. Currently only recognized by ONNX Runtime backend and @@ TensorFlow backend. @@ @@ For ONNX Runtime backend, it will deploy the model with the execution @@ accelerators by priority, the priority is determined based on the @@ order that they are set, i.e. the provider at the front has highest @@ priority. Overall, the priority will be in the following order: @@ <gpu_execution_accelerator> (if instance is on GPU) @@ CUDA Execution Provider (if instance is on GPU) @@ <cpu_execution_accelerator> @@ Default CPU Execution Provider @@Protobuf type
inference.ModelOptimizationPolicy.ExecutionAccelerators- See Also:
-
Nested Class Summary
Nested ClassesModifier and TypeClassDescriptionstatic final class@@ @@ ..static interfacestatic final class@@ @@ ..Nested classes/interfaces inherited from class com.google.protobuf.GeneratedMessageV3
com.google.protobuf.GeneratedMessageV3.BuilderParent, com.google.protobuf.GeneratedMessageV3.ExtendableBuilder<MessageT extends com.google.protobuf.GeneratedMessageV3.ExtendableMessage<MessageT>,BuilderT extends com.google.protobuf.GeneratedMessageV3.ExtendableBuilder<MessageT, BuilderT>>, com.google.protobuf.GeneratedMessageV3.ExtendableMessage<MessageT extends com.google.protobuf.GeneratedMessageV3.ExtendableMessage<MessageT>>, com.google.protobuf.GeneratedMessageV3.ExtendableMessageOrBuilder<MessageT extends com.google.protobuf.GeneratedMessageV3.ExtendableMessage<MessageT>>, com.google.protobuf.GeneratedMessageV3.FieldAccessorTable, com.google.protobuf.GeneratedMessageV3.UnusedPrivateParameter Nested classes/interfaces inherited from class com.google.protobuf.AbstractMessageLite
com.google.protobuf.AbstractMessageLite.InternalOneOfEnum -
Field Summary
FieldsModifier and TypeFieldDescriptionstatic final intstatic final intFields inherited from class com.google.protobuf.GeneratedMessageV3
alwaysUseFieldBuilders, unknownFieldsFields inherited from class com.google.protobuf.AbstractMessage
memoizedSizeFields inherited from class com.google.protobuf.AbstractMessageLite
memoizedHashCode -
Method Summary
Modifier and TypeMethodDescriptionbooleangetCpuExecutionAccelerator(int index) @@ ..int@@ ..@@ ..getCpuExecutionAcceleratorOrBuilder(int index) @@ ..List<? extends ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.AcceleratorOrBuilder> @@ ..static final com.google.protobuf.Descriptors.DescriptorgetGpuExecutionAccelerator(int index) @@ ..int@@ ..@@ ..getGpuExecutionAcceleratorOrBuilder(int index) @@ ..List<? extends ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.AcceleratorOrBuilder> @@ ..com.google.protobuf.Parser<ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators> intinthashCode()protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTablefinal booleannewBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) protected ObjectnewInstance(com.google.protobuf.GeneratedMessageV3.UnusedPrivateParameter unused) parseDelimitedFrom(InputStream input) parseDelimitedFrom(InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) parseFrom(byte[] data) parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) parseFrom(com.google.protobuf.ByteString data) parseFrom(com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) parseFrom(com.google.protobuf.CodedInputStream input) parseFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) parseFrom(InputStream input) parseFrom(InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) parseFrom(ByteBuffer data) parseFrom(ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) static com.google.protobuf.Parser<ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators> parser()voidwriteTo(com.google.protobuf.CodedOutputStream output) Methods inherited from class com.google.protobuf.GeneratedMessageV3
canUseUnsafe, computeStringSize, computeStringSizeNoTag, emptyBooleanList, emptyDoubleList, emptyFloatList, emptyIntList, emptyList, emptyLongList, getAllFields, getDescriptorForType, getField, getOneofFieldDescriptor, getRepeatedField, getRepeatedFieldCount, getUnknownFields, hasField, hasOneof, internalGetMapField, internalGetMapFieldReflection, isStringEmpty, makeExtensionsImmutable, makeMutableCopy, makeMutableCopy, mergeFromAndMakeImmutableInternal, mutableCopy, mutableCopy, mutableCopy, mutableCopy, mutableCopy, newBooleanList, newBuilderForType, newDoubleList, newFloatList, newIntList, newLongList, parseDelimitedWithIOException, parseDelimitedWithIOException, parseUnknownField, parseUnknownFieldProto3, parseWithIOException, parseWithIOException, parseWithIOException, parseWithIOException, serializeBooleanMapTo, serializeIntegerMapTo, serializeLongMapTo, serializeStringMapTo, writeReplace, writeString, writeStringNoTagMethods inherited from class com.google.protobuf.AbstractMessage
findInitializationErrors, getInitializationErrorString, hashBoolean, hashEnum, hashEnumList, hashFields, hashLong, toStringMethods inherited from class com.google.protobuf.AbstractMessageLite
addAll, addAll, checkByteStringIsUtf8, toByteArray, toByteString, writeDelimitedTo, writeToMethods inherited from class java.lang.Object
clone, finalize, getClass, notify, notifyAll, wait, wait, waitMethods inherited from interface com.google.protobuf.MessageLite
toByteArray, toByteString, writeDelimitedTo, writeToMethods inherited from interface com.google.protobuf.MessageOrBuilder
findInitializationErrors, getAllFields, getDescriptorForType, getField, getInitializationErrorString, getOneofFieldDescriptor, getRepeatedField, getRepeatedFieldCount, getUnknownFields, hasField, hasOneof
-
Field Details
-
GPU_EXECUTION_ACCELERATOR_FIELD_NUMBER
public static final int GPU_EXECUTION_ACCELERATOR_FIELD_NUMBER- See Also:
-
CPU_EXECUTION_ACCELERATOR_FIELD_NUMBER
public static final int CPU_EXECUTION_ACCELERATOR_FIELD_NUMBER- See Also:
-
-
Method Details
-
newInstance
- Overrides:
newInstancein classcom.google.protobuf.GeneratedMessageV3
-
getDescriptor
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() -
internalGetFieldAccessorTable
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()- Specified by:
internalGetFieldAccessorTablein classcom.google.protobuf.GeneratedMessageV3
-
getGpuExecutionAcceleratorList
public List<ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator> getGpuExecutionAcceleratorList()@@ .. cpp:var:: Accelerator gpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on GPU. @@ @@ For ONNX Runtime backend, possible value is "tensorrt" as name, @@ and no parameters are required. @@ @@ For TensorFlow backend, possible values are "tensorrt", @@ "auto_mixed_precision", "gpu_io". @@ @@ For "tensorrt", the following parameters can be specified: @@ "precision_mode": The precision used for optimization. @@ Allowed values are "FP32" and "FP16". Default value is "FP32". @@ @@ "max_cached_engines": The maximum number of cached TensorRT @@ engines in dynamic TensorRT ops. Default value is 100. @@ @@ "minimum_segment_size": The smallest model subgraph that will @@ be considered for optimization by TensorRT. Default value is 3. @@ @@ "max_workspace_size_bytes": The maximum GPU memory the model @@ can use temporarily during execution. Default value is 1GB. @@ @@ For "auto_mixed_precision", no parameters are required. If set, @@ the model will try to use FP16 for better performance. @@ This optimization can not be set with "tensorrt". @@ @@ For "gpu_io", no parameters are required. If set, the model will @@ be executed using TensorFlow Callable API to set input and output @@ tensors in GPU memory if possible, which can reduce data transfer @@ overhead if the model is used in ensemble. However, the Callable @@ object will be created on model creation and it will request all @@ outputs for every model execution, which may impact the @@ performance if a request does not require all outputs. This @@ optimization will only take affect if the model instance is @@ created with KIND_GPU. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator gpu_execution_accelerator = 1;- Specified by:
getGpuExecutionAcceleratorListin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
getGpuExecutionAcceleratorOrBuilderList
public List<? extends ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.AcceleratorOrBuilder> getGpuExecutionAcceleratorOrBuilderList()@@ .. cpp:var:: Accelerator gpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on GPU. @@ @@ For ONNX Runtime backend, possible value is "tensorrt" as name, @@ and no parameters are required. @@ @@ For TensorFlow backend, possible values are "tensorrt", @@ "auto_mixed_precision", "gpu_io". @@ @@ For "tensorrt", the following parameters can be specified: @@ "precision_mode": The precision used for optimization. @@ Allowed values are "FP32" and "FP16". Default value is "FP32". @@ @@ "max_cached_engines": The maximum number of cached TensorRT @@ engines in dynamic TensorRT ops. Default value is 100. @@ @@ "minimum_segment_size": The smallest model subgraph that will @@ be considered for optimization by TensorRT. Default value is 3. @@ @@ "max_workspace_size_bytes": The maximum GPU memory the model @@ can use temporarily during execution. Default value is 1GB. @@ @@ For "auto_mixed_precision", no parameters are required. If set, @@ the model will try to use FP16 for better performance. @@ This optimization can not be set with "tensorrt". @@ @@ For "gpu_io", no parameters are required. If set, the model will @@ be executed using TensorFlow Callable API to set input and output @@ tensors in GPU memory if possible, which can reduce data transfer @@ overhead if the model is used in ensemble. However, the Callable @@ object will be created on model creation and it will request all @@ outputs for every model execution, which may impact the @@ performance if a request does not require all outputs. This @@ optimization will only take affect if the model instance is @@ created with KIND_GPU. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator gpu_execution_accelerator = 1; -
getGpuExecutionAcceleratorCount
public int getGpuExecutionAcceleratorCount()@@ .. cpp:var:: Accelerator gpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on GPU. @@ @@ For ONNX Runtime backend, possible value is "tensorrt" as name, @@ and no parameters are required. @@ @@ For TensorFlow backend, possible values are "tensorrt", @@ "auto_mixed_precision", "gpu_io". @@ @@ For "tensorrt", the following parameters can be specified: @@ "precision_mode": The precision used for optimization. @@ Allowed values are "FP32" and "FP16". Default value is "FP32". @@ @@ "max_cached_engines": The maximum number of cached TensorRT @@ engines in dynamic TensorRT ops. Default value is 100. @@ @@ "minimum_segment_size": The smallest model subgraph that will @@ be considered for optimization by TensorRT. Default value is 3. @@ @@ "max_workspace_size_bytes": The maximum GPU memory the model @@ can use temporarily during execution. Default value is 1GB. @@ @@ For "auto_mixed_precision", no parameters are required. If set, @@ the model will try to use FP16 for better performance. @@ This optimization can not be set with "tensorrt". @@ @@ For "gpu_io", no parameters are required. If set, the model will @@ be executed using TensorFlow Callable API to set input and output @@ tensors in GPU memory if possible, which can reduce data transfer @@ overhead if the model is used in ensemble. However, the Callable @@ object will be created on model creation and it will request all @@ outputs for every model execution, which may impact the @@ performance if a request does not require all outputs. This @@ optimization will only take affect if the model instance is @@ created with KIND_GPU. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator gpu_execution_accelerator = 1;- Specified by:
getGpuExecutionAcceleratorCountin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
getGpuExecutionAccelerator
public ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator getGpuExecutionAccelerator(int index) @@ .. cpp:var:: Accelerator gpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on GPU. @@ @@ For ONNX Runtime backend, possible value is "tensorrt" as name, @@ and no parameters are required. @@ @@ For TensorFlow backend, possible values are "tensorrt", @@ "auto_mixed_precision", "gpu_io". @@ @@ For "tensorrt", the following parameters can be specified: @@ "precision_mode": The precision used for optimization. @@ Allowed values are "FP32" and "FP16". Default value is "FP32". @@ @@ "max_cached_engines": The maximum number of cached TensorRT @@ engines in dynamic TensorRT ops. Default value is 100. @@ @@ "minimum_segment_size": The smallest model subgraph that will @@ be considered for optimization by TensorRT. Default value is 3. @@ @@ "max_workspace_size_bytes": The maximum GPU memory the model @@ can use temporarily during execution. Default value is 1GB. @@ @@ For "auto_mixed_precision", no parameters are required. If set, @@ the model will try to use FP16 for better performance. @@ This optimization can not be set with "tensorrt". @@ @@ For "gpu_io", no parameters are required. If set, the model will @@ be executed using TensorFlow Callable API to set input and output @@ tensors in GPU memory if possible, which can reduce data transfer @@ overhead if the model is used in ensemble. However, the Callable @@ object will be created on model creation and it will request all @@ outputs for every model execution, which may impact the @@ performance if a request does not require all outputs. This @@ optimization will only take affect if the model instance is @@ created with KIND_GPU. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator gpu_execution_accelerator = 1;- Specified by:
getGpuExecutionAcceleratorin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
getGpuExecutionAcceleratorOrBuilder
public ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.AcceleratorOrBuilder getGpuExecutionAcceleratorOrBuilder(int index) @@ .. cpp:var:: Accelerator gpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on GPU. @@ @@ For ONNX Runtime backend, possible value is "tensorrt" as name, @@ and no parameters are required. @@ @@ For TensorFlow backend, possible values are "tensorrt", @@ "auto_mixed_precision", "gpu_io". @@ @@ For "tensorrt", the following parameters can be specified: @@ "precision_mode": The precision used for optimization. @@ Allowed values are "FP32" and "FP16". Default value is "FP32". @@ @@ "max_cached_engines": The maximum number of cached TensorRT @@ engines in dynamic TensorRT ops. Default value is 100. @@ @@ "minimum_segment_size": The smallest model subgraph that will @@ be considered for optimization by TensorRT. Default value is 3. @@ @@ "max_workspace_size_bytes": The maximum GPU memory the model @@ can use temporarily during execution. Default value is 1GB. @@ @@ For "auto_mixed_precision", no parameters are required. If set, @@ the model will try to use FP16 for better performance. @@ This optimization can not be set with "tensorrt". @@ @@ For "gpu_io", no parameters are required. If set, the model will @@ be executed using TensorFlow Callable API to set input and output @@ tensors in GPU memory if possible, which can reduce data transfer @@ overhead if the model is used in ensemble. However, the Callable @@ object will be created on model creation and it will request all @@ outputs for every model execution, which may impact the @@ performance if a request does not require all outputs. This @@ optimization will only take affect if the model instance is @@ created with KIND_GPU. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator gpu_execution_accelerator = 1;- Specified by:
getGpuExecutionAcceleratorOrBuilderin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
getCpuExecutionAcceleratorList
public List<ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator> getCpuExecutionAcceleratorList()@@ .. cpp:var:: Accelerator cpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on CPU. @@ @@ For ONNX Runtime backend, possible value is "openvino" as name, @@ and no parameters are required. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator cpu_execution_accelerator = 2;- Specified by:
getCpuExecutionAcceleratorListin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
getCpuExecutionAcceleratorOrBuilderList
public List<? extends ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.AcceleratorOrBuilder> getCpuExecutionAcceleratorOrBuilderList()@@ .. cpp:var:: Accelerator cpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on CPU. @@ @@ For ONNX Runtime backend, possible value is "openvino" as name, @@ and no parameters are required. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator cpu_execution_accelerator = 2; -
getCpuExecutionAcceleratorCount
public int getCpuExecutionAcceleratorCount()@@ .. cpp:var:: Accelerator cpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on CPU. @@ @@ For ONNX Runtime backend, possible value is "openvino" as name, @@ and no parameters are required. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator cpu_execution_accelerator = 2;- Specified by:
getCpuExecutionAcceleratorCountin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
getCpuExecutionAccelerator
public ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator getCpuExecutionAccelerator(int index) @@ .. cpp:var:: Accelerator cpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on CPU. @@ @@ For ONNX Runtime backend, possible value is "openvino" as name, @@ and no parameters are required. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator cpu_execution_accelerator = 2;- Specified by:
getCpuExecutionAcceleratorin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
getCpuExecutionAcceleratorOrBuilder
public ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.AcceleratorOrBuilder getCpuExecutionAcceleratorOrBuilder(int index) @@ .. cpp:var:: Accelerator cpu_execution_accelerator (repeated) @@ @@ The preferred execution provider to be used if the model instance @@ is deployed on CPU. @@ @@ For ONNX Runtime backend, possible value is "openvino" as name, @@ and no parameters are required. @@
repeated .inference.ModelOptimizationPolicy.ExecutionAccelerators.Accelerator cpu_execution_accelerator = 2;- Specified by:
getCpuExecutionAcceleratorOrBuilderin interfaceModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAcceleratorsOrBuilder
-
isInitialized
public final boolean isInitialized()- Specified by:
isInitializedin interfacecom.google.protobuf.MessageLiteOrBuilder- Overrides:
isInitializedin classcom.google.protobuf.GeneratedMessageV3
-
writeTo
- Specified by:
writeToin interfacecom.google.protobuf.MessageLite- Overrides:
writeToin classcom.google.protobuf.GeneratedMessageV3- Throws:
IOException
-
getSerializedSize
public int getSerializedSize()- Specified by:
getSerializedSizein interfacecom.google.protobuf.MessageLite- Overrides:
getSerializedSizein classcom.google.protobuf.GeneratedMessageV3
-
equals
- Specified by:
equalsin interfacecom.google.protobuf.Message- Overrides:
equalsin classcom.google.protobuf.AbstractMessage
-
hashCode
public int hashCode()- Specified by:
hashCodein interfacecom.google.protobuf.Message- Overrides:
hashCodein classcom.google.protobuf.AbstractMessage
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException - Throws:
com.google.protobuf.InvalidProtocolBufferException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException - Throws:
com.google.protobuf.InvalidProtocolBufferException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException - Throws:
com.google.protobuf.InvalidProtocolBufferException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException - Throws:
com.google.protobuf.InvalidProtocolBufferException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException - Throws:
com.google.protobuf.InvalidProtocolBufferException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException - Throws:
com.google.protobuf.InvalidProtocolBufferException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(InputStream input) throws IOException - Throws:
IOException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException - Throws:
IOException
-
parseDelimitedFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseDelimitedFrom(InputStream input) throws IOException - Throws:
IOException
-
parseDelimitedFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseDelimitedFrom(InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException - Throws:
IOException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(com.google.protobuf.CodedInputStream input) throws IOException - Throws:
IOException
-
parseFrom
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators parseFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException - Throws:
IOException
-
newBuilderForType
public ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Builder newBuilderForType()- Specified by:
newBuilderForTypein interfacecom.google.protobuf.Message- Specified by:
newBuilderForTypein interfacecom.google.protobuf.MessageLite
-
newBuilder
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Builder newBuilder() -
newBuilder
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Builder newBuilder(ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators prototype) -
toBuilder
- Specified by:
toBuilderin interfacecom.google.protobuf.Message- Specified by:
toBuilderin interfacecom.google.protobuf.MessageLite
-
newBuilderForType
protected ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators.Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) - Specified by:
newBuilderForTypein classcom.google.protobuf.GeneratedMessageV3
-
getDefaultInstance
public static ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators getDefaultInstance() -
parser
public static com.google.protobuf.Parser<ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators> parser() -
getParserForType
public com.google.protobuf.Parser<ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators> getParserForType()- Specified by:
getParserForTypein interfacecom.google.protobuf.Message- Specified by:
getParserForTypein interfacecom.google.protobuf.MessageLite- Overrides:
getParserForTypein classcom.google.protobuf.GeneratedMessageV3
-
getDefaultInstanceForType
public ModelConfigOuterClass.ModelOptimizationPolicy.ExecutionAccelerators getDefaultInstanceForType()- Specified by:
getDefaultInstanceForTypein interfacecom.google.protobuf.MessageLiteOrBuilder- Specified by:
getDefaultInstanceForTypein interfacecom.google.protobuf.MessageOrBuilder
-