Package inference

Interface ModelConfigOuterClass.ModelConfigOrBuilder

All Superinterfaces:
com.google.protobuf.MessageLiteOrBuilder, com.google.protobuf.MessageOrBuilder
All Known Implementing Classes:
ModelConfigOuterClass.ModelConfig, ModelConfigOuterClass.ModelConfig.Builder
Enclosing class:
ModelConfigOuterClass

public static interface ModelConfigOuterClass.ModelConfigOrBuilder extends com.google.protobuf.MessageOrBuilder
  • Method Details

    • getName

      String getName()
      @@  .. cpp:var:: string name
      @@
      @@     The name of the model.
      @@
       
      string name = 1;
      Returns:
      The name.
    • getNameBytes

      com.google.protobuf.ByteString getNameBytes()
      @@  .. cpp:var:: string name
      @@
      @@     The name of the model.
      @@
       
      string name = 1;
      Returns:
      The bytes for name.
    • getPlatform

      String getPlatform()
      @@  .. cpp:var:: string platform
      @@
      @@     Additional backend-specific configuration for the model.
      @@     Please refer to the backend documentation on whether this field
      @@     should be specified.
      @@
       
      string platform = 2;
      Returns:
      The platform.
    • getPlatformBytes

      com.google.protobuf.ByteString getPlatformBytes()
      @@  .. cpp:var:: string platform
      @@
      @@     Additional backend-specific configuration for the model.
      @@     Please refer to the backend documentation on whether this field
      @@     should be specified.
      @@
       
      string platform = 2;
      Returns:
      The bytes for platform.
    • getBackend

      String getBackend()
      @@  .. cpp:var:: string backend
      @@
      @@     The backend used by the model.
      @@
       
      string backend = 17;
      Returns:
      The backend.
    • getBackendBytes

      com.google.protobuf.ByteString getBackendBytes()
      @@  .. cpp:var:: string backend
      @@
      @@     The backend used by the model.
      @@
       
      string backend = 17;
      Returns:
      The bytes for backend.
    • getRuntime

      String getRuntime()
      @@  .. cpp:var:: string runtime
      @@
      @@     The name of the backend library file used by the model.
      @@
       
      string runtime = 25;
      Returns:
      The runtime.
    • getRuntimeBytes

      com.google.protobuf.ByteString getRuntimeBytes()
      @@  .. cpp:var:: string runtime
      @@
      @@     The name of the backend library file used by the model.
      @@
       
      string runtime = 25;
      Returns:
      The bytes for runtime.
    • hasVersionPolicy

      boolean hasVersionPolicy()
      @@  .. cpp:var:: ModelVersionPolicy version_policy
      @@
      @@     Policy indicating which version(s) of the model will be served.
      @@
       
      .inference.ModelVersionPolicy version_policy = 3;
      Returns:
      Whether the versionPolicy field is set.
    • getVersionPolicy

      @@  .. cpp:var:: ModelVersionPolicy version_policy
      @@
      @@     Policy indicating which version(s) of the model will be served.
      @@
       
      .inference.ModelVersionPolicy version_policy = 3;
      Returns:
      The versionPolicy.
    • getVersionPolicyOrBuilder

      @@  .. cpp:var:: ModelVersionPolicy version_policy
      @@
      @@     Policy indicating which version(s) of the model will be served.
      @@
       
      .inference.ModelVersionPolicy version_policy = 3;
    • getMaxBatchSize

      int getMaxBatchSize()
      @@  .. cpp:var:: int32 max_batch_size
      @@
      @@     Maximum batch size allowed for inference. This can only decrease
      @@     what is allowed by the model itself. A max_batch_size value of 0
      @@     indicates that batching is not allowed for the model and the
      @@     dimension/shape of the input and output tensors must exactly
      @@     match what is specified in the input and output configuration. A
      @@     max_batch_size value > 0 indicates that batching is allowed and
      @@     so the model expects the input tensors to have an additional
      @@     initial dimension for the batching that is not specified in the
      @@     input (for example, if the model supports batched inputs of
      @@     2-dimensional tensors then the model configuration will specify
      @@     the input shape as [ X, Y ] but the model will expect the actual
      @@     input tensors to have shape [ N, X, Y ]). For max_batch_size > 0
      @@     returned outputs will also have an additional initial dimension
      @@     for the batch.
      @@
       
      int32 max_batch_size = 4;
      Returns:
      The maxBatchSize.
    • getInputList

      @@  .. cpp:var:: ModelInput input (repeated)
      @@
      @@     The inputs request by the model.
      @@
       
      repeated .inference.ModelInput input = 5;
    • getInput

      ModelConfigOuterClass.ModelInput getInput(int index)
      @@  .. cpp:var:: ModelInput input (repeated)
      @@
      @@     The inputs request by the model.
      @@
       
      repeated .inference.ModelInput input = 5;
    • getInputCount

      int getInputCount()
      @@  .. cpp:var:: ModelInput input (repeated)
      @@
      @@     The inputs request by the model.
      @@
       
      repeated .inference.ModelInput input = 5;
    • getInputOrBuilderList

      List<? extends ModelConfigOuterClass.ModelInputOrBuilder> getInputOrBuilderList()
      @@  .. cpp:var:: ModelInput input (repeated)
      @@
      @@     The inputs request by the model.
      @@
       
      repeated .inference.ModelInput input = 5;
    • getInputOrBuilder

      ModelConfigOuterClass.ModelInputOrBuilder getInputOrBuilder(int index)
      @@  .. cpp:var:: ModelInput input (repeated)
      @@
      @@     The inputs request by the model.
      @@
       
      repeated .inference.ModelInput input = 5;
    • getOutputList

      @@  .. cpp:var:: ModelOutput output (repeated)
      @@
      @@     The outputs produced by the model.
      @@
       
      repeated .inference.ModelOutput output = 6;
    • getOutput

      ModelConfigOuterClass.ModelOutput getOutput(int index)
      @@  .. cpp:var:: ModelOutput output (repeated)
      @@
      @@     The outputs produced by the model.
      @@
       
      repeated .inference.ModelOutput output = 6;
    • getOutputCount

      int getOutputCount()
      @@  .. cpp:var:: ModelOutput output (repeated)
      @@
      @@     The outputs produced by the model.
      @@
       
      repeated .inference.ModelOutput output = 6;
    • getOutputOrBuilderList

      List<? extends ModelConfigOuterClass.ModelOutputOrBuilder> getOutputOrBuilderList()
      @@  .. cpp:var:: ModelOutput output (repeated)
      @@
      @@     The outputs produced by the model.
      @@
       
      repeated .inference.ModelOutput output = 6;
    • getOutputOrBuilder

      ModelConfigOuterClass.ModelOutputOrBuilder getOutputOrBuilder(int index)
      @@  .. cpp:var:: ModelOutput output (repeated)
      @@
      @@     The outputs produced by the model.
      @@
       
      repeated .inference.ModelOutput output = 6;
    • getBatchInputList

      @@  .. cpp:var:: BatchInput batch_input (repeated)
      @@
      @@     The model input(s) that the server should use to communicate
      @@     batch related values to the model.
      @@
       
      repeated .inference.BatchInput batch_input = 20;
    • getBatchInput

      ModelConfigOuterClass.BatchInput getBatchInput(int index)
      @@  .. cpp:var:: BatchInput batch_input (repeated)
      @@
      @@     The model input(s) that the server should use to communicate
      @@     batch related values to the model.
      @@
       
      repeated .inference.BatchInput batch_input = 20;
    • getBatchInputCount

      int getBatchInputCount()
      @@  .. cpp:var:: BatchInput batch_input (repeated)
      @@
      @@     The model input(s) that the server should use to communicate
      @@     batch related values to the model.
      @@
       
      repeated .inference.BatchInput batch_input = 20;
    • getBatchInputOrBuilderList

      List<? extends ModelConfigOuterClass.BatchInputOrBuilder> getBatchInputOrBuilderList()
      @@  .. cpp:var:: BatchInput batch_input (repeated)
      @@
      @@     The model input(s) that the server should use to communicate
      @@     batch related values to the model.
      @@
       
      repeated .inference.BatchInput batch_input = 20;
    • getBatchInputOrBuilder

      ModelConfigOuterClass.BatchInputOrBuilder getBatchInputOrBuilder(int index)
      @@  .. cpp:var:: BatchInput batch_input (repeated)
      @@
      @@     The model input(s) that the server should use to communicate
      @@     batch related values to the model.
      @@
       
      repeated .inference.BatchInput batch_input = 20;
    • getBatchOutputList

      @@  .. cpp:var:: BatchOutput batch_output (repeated)
      @@
      @@     The outputs produced by the model that requires special handling
      @@     by the model backend.
      @@
       
      repeated .inference.BatchOutput batch_output = 21;
    • getBatchOutput

      ModelConfigOuterClass.BatchOutput getBatchOutput(int index)
      @@  .. cpp:var:: BatchOutput batch_output (repeated)
      @@
      @@     The outputs produced by the model that requires special handling
      @@     by the model backend.
      @@
       
      repeated .inference.BatchOutput batch_output = 21;
    • getBatchOutputCount

      int getBatchOutputCount()
      @@  .. cpp:var:: BatchOutput batch_output (repeated)
      @@
      @@     The outputs produced by the model that requires special handling
      @@     by the model backend.
      @@
       
      repeated .inference.BatchOutput batch_output = 21;
    • getBatchOutputOrBuilderList

      List<? extends ModelConfigOuterClass.BatchOutputOrBuilder> getBatchOutputOrBuilderList()
      @@  .. cpp:var:: BatchOutput batch_output (repeated)
      @@
      @@     The outputs produced by the model that requires special handling
      @@     by the model backend.
      @@
       
      repeated .inference.BatchOutput batch_output = 21;
    • getBatchOutputOrBuilder

      ModelConfigOuterClass.BatchOutputOrBuilder getBatchOutputOrBuilder(int index)
      @@  .. cpp:var:: BatchOutput batch_output (repeated)
      @@
      @@     The outputs produced by the model that requires special handling
      @@     by the model backend.
      @@
       
      repeated .inference.BatchOutput batch_output = 21;
    • hasOptimization

      boolean hasOptimization()
      @@  .. cpp:var:: ModelOptimizationPolicy optimization
      @@
      @@     Optimization configuration for the model. If not specified
      @@     then default optimization policy is used.
      @@
       
      .inference.ModelOptimizationPolicy optimization = 12;
      Returns:
      Whether the optimization field is set.
    • getOptimization

      @@  .. cpp:var:: ModelOptimizationPolicy optimization
      @@
      @@     Optimization configuration for the model. If not specified
      @@     then default optimization policy is used.
      @@
       
      .inference.ModelOptimizationPolicy optimization = 12;
      Returns:
      The optimization.
    • getOptimizationOrBuilder

      @@  .. cpp:var:: ModelOptimizationPolicy optimization
      @@
      @@     Optimization configuration for the model. If not specified
      @@     then default optimization policy is used.
      @@
       
      .inference.ModelOptimizationPolicy optimization = 12;
    • hasDynamicBatching

      boolean hasDynamicBatching()
      @@    .. cpp:var:: ModelDynamicBatching dynamic_batching
      @@
      @@       If specified, enables the dynamic-batching scheduling
      @@       policy. With dynamic-batching the scheduler may group
      @@       together independent requests into a single batch to
      @@       improve inference throughput.
      @@
       
      .inference.ModelDynamicBatching dynamic_batching = 11;
      Returns:
      Whether the dynamicBatching field is set.
    • getDynamicBatching

      @@    .. cpp:var:: ModelDynamicBatching dynamic_batching
      @@
      @@       If specified, enables the dynamic-batching scheduling
      @@       policy. With dynamic-batching the scheduler may group
      @@       together independent requests into a single batch to
      @@       improve inference throughput.
      @@
       
      .inference.ModelDynamicBatching dynamic_batching = 11;
      Returns:
      The dynamicBatching.
    • getDynamicBatchingOrBuilder

      @@    .. cpp:var:: ModelDynamicBatching dynamic_batching
      @@
      @@       If specified, enables the dynamic-batching scheduling
      @@       policy. With dynamic-batching the scheduler may group
      @@       together independent requests into a single batch to
      @@       improve inference throughput.
      @@
       
      .inference.ModelDynamicBatching dynamic_batching = 11;
    • hasSequenceBatching

      boolean hasSequenceBatching()
      @@    .. cpp:var:: ModelSequenceBatching sequence_batching
      @@
      @@       If specified, enables the sequence-batching scheduling
      @@       policy. With sequence-batching, inference requests
      @@       with the same correlation ID are routed to the same
      @@       model instance. Multiple sequences of inference requests
      @@       may be batched together into a single batch to
      @@       improve inference throughput.
      @@
       
      .inference.ModelSequenceBatching sequence_batching = 13;
      Returns:
      Whether the sequenceBatching field is set.
    • getSequenceBatching

      @@    .. cpp:var:: ModelSequenceBatching sequence_batching
      @@
      @@       If specified, enables the sequence-batching scheduling
      @@       policy. With sequence-batching, inference requests
      @@       with the same correlation ID are routed to the same
      @@       model instance. Multiple sequences of inference requests
      @@       may be batched together into a single batch to
      @@       improve inference throughput.
      @@
       
      .inference.ModelSequenceBatching sequence_batching = 13;
      Returns:
      The sequenceBatching.
    • getSequenceBatchingOrBuilder

      @@    .. cpp:var:: ModelSequenceBatching sequence_batching
      @@
      @@       If specified, enables the sequence-batching scheduling
      @@       policy. With sequence-batching, inference requests
      @@       with the same correlation ID are routed to the same
      @@       model instance. Multiple sequences of inference requests
      @@       may be batched together into a single batch to
      @@       improve inference throughput.
      @@
       
      .inference.ModelSequenceBatching sequence_batching = 13;
    • hasEnsembleScheduling

      boolean hasEnsembleScheduling()
      @@    .. cpp:var:: ModelEnsembling ensemble_scheduling
      @@
      @@       If specified, enables the model-ensembling scheduling
      @@       policy. With model-ensembling, inference requests
      @@       will be processed according to the specification, such as an
      @@       execution sequence of models. The input specified in this model
      @@       config will be the input for the ensemble, and the output
      @@       specified will be the output of the ensemble.
      @@
       
      .inference.ModelEnsembling ensemble_scheduling = 15;
      Returns:
      Whether the ensembleScheduling field is set.
    • getEnsembleScheduling

      ModelConfigOuterClass.ModelEnsembling getEnsembleScheduling()
      @@    .. cpp:var:: ModelEnsembling ensemble_scheduling
      @@
      @@       If specified, enables the model-ensembling scheduling
      @@       policy. With model-ensembling, inference requests
      @@       will be processed according to the specification, such as an
      @@       execution sequence of models. The input specified in this model
      @@       config will be the input for the ensemble, and the output
      @@       specified will be the output of the ensemble.
      @@
       
      .inference.ModelEnsembling ensemble_scheduling = 15;
      Returns:
      The ensembleScheduling.
    • getEnsembleSchedulingOrBuilder

      ModelConfigOuterClass.ModelEnsemblingOrBuilder getEnsembleSchedulingOrBuilder()
      @@    .. cpp:var:: ModelEnsembling ensemble_scheduling
      @@
      @@       If specified, enables the model-ensembling scheduling
      @@       policy. With model-ensembling, inference requests
      @@       will be processed according to the specification, such as an
      @@       execution sequence of models. The input specified in this model
      @@       config will be the input for the ensemble, and the output
      @@       specified will be the output of the ensemble.
      @@
       
      .inference.ModelEnsembling ensemble_scheduling = 15;
    • getInstanceGroupList

      @@  .. cpp:var:: ModelInstanceGroup instance_group (repeated)
      @@
      @@     Instances of this model. If not specified, one instance
      @@     of the model will be instantiated on each available GPU.
      @@
       
      repeated .inference.ModelInstanceGroup instance_group = 7;
    • getInstanceGroup

      ModelConfigOuterClass.ModelInstanceGroup getInstanceGroup(int index)
      @@  .. cpp:var:: ModelInstanceGroup instance_group (repeated)
      @@
      @@     Instances of this model. If not specified, one instance
      @@     of the model will be instantiated on each available GPU.
      @@
       
      repeated .inference.ModelInstanceGroup instance_group = 7;
    • getInstanceGroupCount

      int getInstanceGroupCount()
      @@  .. cpp:var:: ModelInstanceGroup instance_group (repeated)
      @@
      @@     Instances of this model. If not specified, one instance
      @@     of the model will be instantiated on each available GPU.
      @@
       
      repeated .inference.ModelInstanceGroup instance_group = 7;
    • getInstanceGroupOrBuilderList

      List<? extends ModelConfigOuterClass.ModelInstanceGroupOrBuilder> getInstanceGroupOrBuilderList()
      @@  .. cpp:var:: ModelInstanceGroup instance_group (repeated)
      @@
      @@     Instances of this model. If not specified, one instance
      @@     of the model will be instantiated on each available GPU.
      @@
       
      repeated .inference.ModelInstanceGroup instance_group = 7;
    • getInstanceGroupOrBuilder

      ModelConfigOuterClass.ModelInstanceGroupOrBuilder getInstanceGroupOrBuilder(int index)
      @@  .. cpp:var:: ModelInstanceGroup instance_group (repeated)
      @@
      @@     Instances of this model. If not specified, one instance
      @@     of the model will be instantiated on each available GPU.
      @@
       
      repeated .inference.ModelInstanceGroup instance_group = 7;
    • getDefaultModelFilename

      String getDefaultModelFilename()
      @@  .. cpp:var:: string default_model_filename
      @@
      @@     Optional filename of the model file to use if a
      @@     compute-capability specific model is not specified in
      @@     :cpp:var:`cc_model_filenames`. If not specified the default name
      @@     is 'model.graphdef', 'model.savedmodel', 'model.plan' or
      @@     'model.pt' depending on the model type.
      @@
       
      string default_model_filename = 8;
      Returns:
      The defaultModelFilename.
    • getDefaultModelFilenameBytes

      com.google.protobuf.ByteString getDefaultModelFilenameBytes()
      @@  .. cpp:var:: string default_model_filename
      @@
      @@     Optional filename of the model file to use if a
      @@     compute-capability specific model is not specified in
      @@     :cpp:var:`cc_model_filenames`. If not specified the default name
      @@     is 'model.graphdef', 'model.savedmodel', 'model.plan' or
      @@     'model.pt' depending on the model type.
      @@
       
      string default_model_filename = 8;
      Returns:
      The bytes for defaultModelFilename.
    • getCcModelFilenamesCount

      int getCcModelFilenamesCount()
      @@  .. cpp:var:: map<string,string> cc_model_filenames
      @@
      @@     Optional map from CUDA compute capability to the filename of
      @@     the model that supports that compute capability. The filename
      @@     refers to a file within the model version directory.
      @@
       
      map<string, string> cc_model_filenames = 9;
    • containsCcModelFilenames

      boolean containsCcModelFilenames(String key)
      @@  .. cpp:var:: map<string,string> cc_model_filenames
      @@
      @@     Optional map from CUDA compute capability to the filename of
      @@     the model that supports that compute capability. The filename
      @@     refers to a file within the model version directory.
      @@
       
      map<string, string> cc_model_filenames = 9;
    • getCcModelFilenames

      @Deprecated Map<String,String> getCcModelFilenames()
      Deprecated.
    • getCcModelFilenamesMap

      Map<String,String> getCcModelFilenamesMap()
      @@  .. cpp:var:: map<string,string> cc_model_filenames
      @@
      @@     Optional map from CUDA compute capability to the filename of
      @@     the model that supports that compute capability. The filename
      @@     refers to a file within the model version directory.
      @@
       
      map<string, string> cc_model_filenames = 9;
    • getCcModelFilenamesOrDefault

      String getCcModelFilenamesOrDefault(String key, String defaultValue)
      @@  .. cpp:var:: map<string,string> cc_model_filenames
      @@
      @@     Optional map from CUDA compute capability to the filename of
      @@     the model that supports that compute capability. The filename
      @@     refers to a file within the model version directory.
      @@
       
      map<string, string> cc_model_filenames = 9;
    • getCcModelFilenamesOrThrow

      String getCcModelFilenamesOrThrow(String key)
      @@  .. cpp:var:: map<string,string> cc_model_filenames
      @@
      @@     Optional map from CUDA compute capability to the filename of
      @@     the model that supports that compute capability. The filename
      @@     refers to a file within the model version directory.
      @@
       
      map<string, string> cc_model_filenames = 9;
    • getMetricTagsCount

      int getMetricTagsCount()
      @@  .. cpp:var:: map<string,string> metric_tags
      @@
      @@     Optional metric tags. User-specific key-value pairs for metrics
      @@     reported for this model. These tags are applied to the metrics
      @@     reported on the HTTP metrics port.
      @@
       
      map<string, string> metric_tags = 10;
    • containsMetricTags

      boolean containsMetricTags(String key)
      @@  .. cpp:var:: map<string,string> metric_tags
      @@
      @@     Optional metric tags. User-specific key-value pairs for metrics
      @@     reported for this model. These tags are applied to the metrics
      @@     reported on the HTTP metrics port.
      @@
       
      map<string, string> metric_tags = 10;
    • getMetricTags

      @Deprecated Map<String,String> getMetricTags()
      Deprecated.
      Use getMetricTagsMap() instead.
    • getMetricTagsMap

      Map<String,String> getMetricTagsMap()
      @@  .. cpp:var:: map<string,string> metric_tags
      @@
      @@     Optional metric tags. User-specific key-value pairs for metrics
      @@     reported for this model. These tags are applied to the metrics
      @@     reported on the HTTP metrics port.
      @@
       
      map<string, string> metric_tags = 10;
    • getMetricTagsOrDefault

      String getMetricTagsOrDefault(String key, String defaultValue)
      @@  .. cpp:var:: map<string,string> metric_tags
      @@
      @@     Optional metric tags. User-specific key-value pairs for metrics
      @@     reported for this model. These tags are applied to the metrics
      @@     reported on the HTTP metrics port.
      @@
       
      map<string, string> metric_tags = 10;
    • getMetricTagsOrThrow

      String getMetricTagsOrThrow(String key)
      @@  .. cpp:var:: map<string,string> metric_tags
      @@
      @@     Optional metric tags. User-specific key-value pairs for metrics
      @@     reported for this model. These tags are applied to the metrics
      @@     reported on the HTTP metrics port.
      @@
       
      map<string, string> metric_tags = 10;
    • getParametersCount

      int getParametersCount()
      @@  .. cpp:var:: map<string,ModelParameter> parameters
      @@
      @@     Optional model parameters. User-specified parameter values.
      @@
       
      map<string, .inference.ModelParameter> parameters = 14;
    • containsParameters

      boolean containsParameters(String key)
      @@  .. cpp:var:: map<string,ModelParameter> parameters
      @@
      @@     Optional model parameters. User-specified parameter values.
      @@
       
      map<string, .inference.ModelParameter> parameters = 14;
    • getParameters

      Deprecated.
      Use getParametersMap() instead.
    • getParametersMap

      @@  .. cpp:var:: map<string,ModelParameter> parameters
      @@
      @@     Optional model parameters. User-specified parameter values.
      @@
       
      map<string, .inference.ModelParameter> parameters = 14;
    • getParametersOrDefault

      @@  .. cpp:var:: map<string,ModelParameter> parameters
      @@
      @@     Optional model parameters. User-specified parameter values.
      @@
       
      map<string, .inference.ModelParameter> parameters = 14;
    • getParametersOrThrow

      ModelConfigOuterClass.ModelParameter getParametersOrThrow(String key)
      @@  .. cpp:var:: map<string,ModelParameter> parameters
      @@
      @@     Optional model parameters. User-specified parameter values.
      @@
       
      map<string, .inference.ModelParameter> parameters = 14;
    • getModelWarmupList

      @@  .. cpp:var:: ModelWarmup model_warmup (repeated)
      @@
      @@     Warmup setting of this model. If specified, all instances
      @@     will be run with the request samples in sequence before
      @@     serving the model.
      @@     This field can only be specified if the model is not an ensemble
      @@     model.
      @@
       
      repeated .inference.ModelWarmup model_warmup = 16;
    • getModelWarmup

      ModelConfigOuterClass.ModelWarmup getModelWarmup(int index)
      @@  .. cpp:var:: ModelWarmup model_warmup (repeated)
      @@
      @@     Warmup setting of this model. If specified, all instances
      @@     will be run with the request samples in sequence before
      @@     serving the model.
      @@     This field can only be specified if the model is not an ensemble
      @@     model.
      @@
       
      repeated .inference.ModelWarmup model_warmup = 16;
    • getModelWarmupCount

      int getModelWarmupCount()
      @@  .. cpp:var:: ModelWarmup model_warmup (repeated)
      @@
      @@     Warmup setting of this model. If specified, all instances
      @@     will be run with the request samples in sequence before
      @@     serving the model.
      @@     This field can only be specified if the model is not an ensemble
      @@     model.
      @@
       
      repeated .inference.ModelWarmup model_warmup = 16;
    • getModelWarmupOrBuilderList

      List<? extends ModelConfigOuterClass.ModelWarmupOrBuilder> getModelWarmupOrBuilderList()
      @@  .. cpp:var:: ModelWarmup model_warmup (repeated)
      @@
      @@     Warmup setting of this model. If specified, all instances
      @@     will be run with the request samples in sequence before
      @@     serving the model.
      @@     This field can only be specified if the model is not an ensemble
      @@     model.
      @@
       
      repeated .inference.ModelWarmup model_warmup = 16;
    • getModelWarmupOrBuilder

      ModelConfigOuterClass.ModelWarmupOrBuilder getModelWarmupOrBuilder(int index)
      @@  .. cpp:var:: ModelWarmup model_warmup (repeated)
      @@
      @@     Warmup setting of this model. If specified, all instances
      @@     will be run with the request samples in sequence before
      @@     serving the model.
      @@     This field can only be specified if the model is not an ensemble
      @@     model.
      @@
       
      repeated .inference.ModelWarmup model_warmup = 16;
    • hasModelOperations

      boolean hasModelOperations()
      @@  .. cpp:var:: ModelOperations model_operations
      @@
      @@     Optional metadata of the libraries providing custom operations for
      @@     this model.
      @@
       
      .inference.ModelOperations model_operations = 18;
      Returns:
      Whether the modelOperations field is set.
    • getModelOperations

      @@  .. cpp:var:: ModelOperations model_operations
      @@
      @@     Optional metadata of the libraries providing custom operations for
      @@     this model.
      @@
       
      .inference.ModelOperations model_operations = 18;
      Returns:
      The modelOperations.
    • getModelOperationsOrBuilder

      ModelConfigOuterClass.ModelOperationsOrBuilder getModelOperationsOrBuilder()
      @@  .. cpp:var:: ModelOperations model_operations
      @@
      @@     Optional metadata of the libraries providing custom operations for
      @@     this model.
      @@
       
      .inference.ModelOperations model_operations = 18;
    • hasModelTransactionPolicy

      boolean hasModelTransactionPolicy()
      @@  .. cpp:var:: ModelTransactionPolicy model_transaction_policy
      @@
      @@     Optional specification that describes the nature of transactions
      @@     to be expected from the model.
      @@
       
      .inference.ModelTransactionPolicy model_transaction_policy = 19;
      Returns:
      Whether the modelTransactionPolicy field is set.
    • getModelTransactionPolicy

      @@  .. cpp:var:: ModelTransactionPolicy model_transaction_policy
      @@
      @@     Optional specification that describes the nature of transactions
      @@     to be expected from the model.
      @@
       
      .inference.ModelTransactionPolicy model_transaction_policy = 19;
      Returns:
      The modelTransactionPolicy.
    • getModelTransactionPolicyOrBuilder

      ModelConfigOuterClass.ModelTransactionPolicyOrBuilder getModelTransactionPolicyOrBuilder()
      @@  .. cpp:var:: ModelTransactionPolicy model_transaction_policy
      @@
      @@     Optional specification that describes the nature of transactions
      @@     to be expected from the model.
      @@
       
      .inference.ModelTransactionPolicy model_transaction_policy = 19;
    • hasModelRepositoryAgents

      boolean hasModelRepositoryAgents()
      @@  .. cpp:var:: ModelRepositoryAgents model_repository_agents
      @@
      @@     Optional specification of the agent(s) that should be invoked
      @@     with repository actions are performed for this model.
      @@
       
      .inference.ModelRepositoryAgents model_repository_agents = 23;
      Returns:
      Whether the modelRepositoryAgents field is set.
    • getModelRepositoryAgents

      @@  .. cpp:var:: ModelRepositoryAgents model_repository_agents
      @@
      @@     Optional specification of the agent(s) that should be invoked
      @@     with repository actions are performed for this model.
      @@
       
      .inference.ModelRepositoryAgents model_repository_agents = 23;
      Returns:
      The modelRepositoryAgents.
    • getModelRepositoryAgentsOrBuilder

      ModelConfigOuterClass.ModelRepositoryAgentsOrBuilder getModelRepositoryAgentsOrBuilder()
      @@  .. cpp:var:: ModelRepositoryAgents model_repository_agents
      @@
      @@     Optional specification of the agent(s) that should be invoked
      @@     with repository actions are performed for this model.
      @@
       
      .inference.ModelRepositoryAgents model_repository_agents = 23;
    • hasResponseCache

      boolean hasResponseCache()
      @@  .. cpp:var:: ModelResponseCache response_cache
      @@
      @@     Optional setting for utilizing the response cache for this
      @@     model.
      @@
       
      .inference.ModelResponseCache response_cache = 24;
      Returns:
      Whether the responseCache field is set.
    • getResponseCache

      @@  .. cpp:var:: ModelResponseCache response_cache
      @@
      @@     Optional setting for utilizing the response cache for this
      @@     model.
      @@
       
      .inference.ModelResponseCache response_cache = 24;
      Returns:
      The responseCache.
    • getResponseCacheOrBuilder

      @@  .. cpp:var:: ModelResponseCache response_cache
      @@
      @@     Optional setting for utilizing the response cache for this
      @@     model.
      @@
       
      .inference.ModelResponseCache response_cache = 24;
    • hasModelMetrics

      boolean hasModelMetrics()
      @@  .. cpp:var:: ModelMetrics model_metrics
      @@
      @@     Optional setting for custom metrics configuration for this model.
      @@     Application default is applied to metrics that are not specified.
      @@
       
      .inference.ModelMetrics model_metrics = 26;
      Returns:
      Whether the modelMetrics field is set.
    • getModelMetrics

      @@  .. cpp:var:: ModelMetrics model_metrics
      @@
      @@     Optional setting for custom metrics configuration for this model.
      @@     Application default is applied to metrics that are not specified.
      @@
       
      .inference.ModelMetrics model_metrics = 26;
      Returns:
      The modelMetrics.
    • getModelMetricsOrBuilder

      @@  .. cpp:var:: ModelMetrics model_metrics
      @@
      @@     Optional setting for custom metrics configuration for this model.
      @@     Application default is applied to metrics that are not specified.
      @@
       
      .inference.ModelMetrics model_metrics = 26;
    • getSchedulingChoiceCase