Package inference

Class GrpcService.ModelStatistics.Builder

java.lang.Object
com.google.protobuf.AbstractMessageLite.Builder
com.google.protobuf.AbstractMessage.Builder<GrpcService.ModelStatistics.Builder>
com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
inference.GrpcService.ModelStatistics.Builder
All Implemented Interfaces:
com.google.protobuf.Message.Builder, com.google.protobuf.MessageLite.Builder, com.google.protobuf.MessageLiteOrBuilder, com.google.protobuf.MessageOrBuilder, GrpcService.ModelStatisticsOrBuilder, Cloneable
Enclosing class:
GrpcService.ModelStatistics

public static final class GrpcService.ModelStatistics.Builder extends com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder> implements GrpcService.ModelStatisticsOrBuilder
@@
@@.. cpp:var:: message ModelStatistics
@@
@@   Statistics for a specific model and version.
@@
 
Protobuf type inference.ModelStatistics
  • Method Details

    • getDescriptor

      public static final com.google.protobuf.Descriptors.Descriptor getDescriptor()
    • internalGetMapFieldReflection

      protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection(int number)
      Overrides:
      internalGetMapFieldReflection in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • internalGetMutableMapFieldReflection

      protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection(int number)
      Overrides:
      internalGetMutableMapFieldReflection in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • internalGetFieldAccessorTable

      protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
      Specified by:
      internalGetFieldAccessorTable in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • clear

      Specified by:
      clear in interface com.google.protobuf.Message.Builder
      Specified by:
      clear in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      clear in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • getDescriptorForType

      public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()
      Specified by:
      getDescriptorForType in interface com.google.protobuf.Message.Builder
      Specified by:
      getDescriptorForType in interface com.google.protobuf.MessageOrBuilder
      Overrides:
      getDescriptorForType in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • getDefaultInstanceForType

      public GrpcService.ModelStatistics getDefaultInstanceForType()
      Specified by:
      getDefaultInstanceForType in interface com.google.protobuf.MessageLiteOrBuilder
      Specified by:
      getDefaultInstanceForType in interface com.google.protobuf.MessageOrBuilder
    • build

      Specified by:
      build in interface com.google.protobuf.Message.Builder
      Specified by:
      build in interface com.google.protobuf.MessageLite.Builder
    • buildPartial

      public GrpcService.ModelStatistics buildPartial()
      Specified by:
      buildPartial in interface com.google.protobuf.Message.Builder
      Specified by:
      buildPartial in interface com.google.protobuf.MessageLite.Builder
    • clone

      Specified by:
      clone in interface com.google.protobuf.Message.Builder
      Specified by:
      clone in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      clone in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • setField

      public GrpcService.ModelStatistics.Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
      Specified by:
      setField in interface com.google.protobuf.Message.Builder
      Overrides:
      setField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • clearField

      public GrpcService.ModelStatistics.Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field)
      Specified by:
      clearField in interface com.google.protobuf.Message.Builder
      Overrides:
      clearField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • clearOneof

      public GrpcService.ModelStatistics.Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)
      Specified by:
      clearOneof in interface com.google.protobuf.Message.Builder
      Overrides:
      clearOneof in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • setRepeatedField

      public GrpcService.ModelStatistics.Builder setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value)
      Specified by:
      setRepeatedField in interface com.google.protobuf.Message.Builder
      Overrides:
      setRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • addRepeatedField

      public GrpcService.ModelStatistics.Builder addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
      Specified by:
      addRepeatedField in interface com.google.protobuf.Message.Builder
      Overrides:
      addRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • mergeFrom

      public GrpcService.ModelStatistics.Builder mergeFrom(com.google.protobuf.Message other)
      Specified by:
      mergeFrom in interface com.google.protobuf.Message.Builder
      Overrides:
      mergeFrom in class com.google.protobuf.AbstractMessage.Builder<GrpcService.ModelStatistics.Builder>
    • mergeFrom

    • isInitialized

      public final boolean isInitialized()
      Specified by:
      isInitialized in interface com.google.protobuf.MessageLiteOrBuilder
      Overrides:
      isInitialized in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • mergeFrom

      public GrpcService.ModelStatistics.Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException
      Specified by:
      mergeFrom in interface com.google.protobuf.Message.Builder
      Specified by:
      mergeFrom in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      mergeFrom in class com.google.protobuf.AbstractMessage.Builder<GrpcService.ModelStatistics.Builder>
      Throws:
      IOException
    • getName

      public String getName()
      @@  .. cpp:var:: string name
      @@
      @@     The name of the model. If not given returns statistics for all
      @@
       
      string name = 1;
      Specified by:
      getName in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The name.
    • getNameBytes

      public com.google.protobuf.ByteString getNameBytes()
      @@  .. cpp:var:: string name
      @@
      @@     The name of the model. If not given returns statistics for all
      @@
       
      string name = 1;
      Specified by:
      getNameBytes in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The bytes for name.
    • setName

      @@  .. cpp:var:: string name
      @@
      @@     The name of the model. If not given returns statistics for all
      @@
       
      string name = 1;
      Parameters:
      value - The name to set.
      Returns:
      This builder for chaining.
    • clearName

      @@  .. cpp:var:: string name
      @@
      @@     The name of the model. If not given returns statistics for all
      @@
       
      string name = 1;
      Returns:
      This builder for chaining.
    • setNameBytes

      public GrpcService.ModelStatistics.Builder setNameBytes(com.google.protobuf.ByteString value)
      @@  .. cpp:var:: string name
      @@
      @@     The name of the model. If not given returns statistics for all
      @@
       
      string name = 1;
      Parameters:
      value - The bytes for name to set.
      Returns:
      This builder for chaining.
    • getVersion

      public String getVersion()
      @@  .. cpp:var:: string version
      @@
      @@     The version of the model.
      @@
       
      string version = 2;
      Specified by:
      getVersion in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The version.
    • getVersionBytes

      public com.google.protobuf.ByteString getVersionBytes()
      @@  .. cpp:var:: string version
      @@
      @@     The version of the model.
      @@
       
      string version = 2;
      Specified by:
      getVersionBytes in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The bytes for version.
    • setVersion

      public GrpcService.ModelStatistics.Builder setVersion(String value)
      @@  .. cpp:var:: string version
      @@
      @@     The version of the model.
      @@
       
      string version = 2;
      Parameters:
      value - The version to set.
      Returns:
      This builder for chaining.
    • clearVersion

      public GrpcService.ModelStatistics.Builder clearVersion()
      @@  .. cpp:var:: string version
      @@
      @@     The version of the model.
      @@
       
      string version = 2;
      Returns:
      This builder for chaining.
    • setVersionBytes

      public GrpcService.ModelStatistics.Builder setVersionBytes(com.google.protobuf.ByteString value)
      @@  .. cpp:var:: string version
      @@
      @@     The version of the model.
      @@
       
      string version = 2;
      Parameters:
      value - The bytes for version to set.
      Returns:
      This builder for chaining.
    • getLastInference

      public long getLastInference()
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The timestamp of the last inference request made for this model,
      @@     as milliseconds since the epoch.
      @@
       
      uint64 last_inference = 3;
      Specified by:
      getLastInference in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The lastInference.
    • setLastInference

      public GrpcService.ModelStatistics.Builder setLastInference(long value)
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The timestamp of the last inference request made for this model,
      @@     as milliseconds since the epoch.
      @@
       
      uint64 last_inference = 3;
      Parameters:
      value - The lastInference to set.
      Returns:
      This builder for chaining.
    • clearLastInference

      public GrpcService.ModelStatistics.Builder clearLastInference()
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The timestamp of the last inference request made for this model,
      @@     as milliseconds since the epoch.
      @@
       
      uint64 last_inference = 3;
      Returns:
      This builder for chaining.
    • getInferenceCount

      public long getInferenceCount()
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The cumulative count of successful inference requests made for this
      @@     model. Each inference in a batched request is counted as an
      @@     individual inference. For example, if a client sends a single
      @@     inference request with batch size 64, "inference_count" will be
      @@     incremented by 64. Similarly, if a clients sends 64 individual
      @@     requests each with batch size 1, "inference_count" will be
      @@     incremented by 64. The "inference_count" value DOES NOT include
      @@     cache hits.
      @@
       
      uint64 inference_count = 4;
      Specified by:
      getInferenceCount in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The inferenceCount.
    • setInferenceCount

      public GrpcService.ModelStatistics.Builder setInferenceCount(long value)
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The cumulative count of successful inference requests made for this
      @@     model. Each inference in a batched request is counted as an
      @@     individual inference. For example, if a client sends a single
      @@     inference request with batch size 64, "inference_count" will be
      @@     incremented by 64. Similarly, if a clients sends 64 individual
      @@     requests each with batch size 1, "inference_count" will be
      @@     incremented by 64. The "inference_count" value DOES NOT include
      @@     cache hits.
      @@
       
      uint64 inference_count = 4;
      Parameters:
      value - The inferenceCount to set.
      Returns:
      This builder for chaining.
    • clearInferenceCount

      public GrpcService.ModelStatistics.Builder clearInferenceCount()
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The cumulative count of successful inference requests made for this
      @@     model. Each inference in a batched request is counted as an
      @@     individual inference. For example, if a client sends a single
      @@     inference request with batch size 64, "inference_count" will be
      @@     incremented by 64. Similarly, if a clients sends 64 individual
      @@     requests each with batch size 1, "inference_count" will be
      @@     incremented by 64. The "inference_count" value DOES NOT include
      @@     cache hits.
      @@
       
      uint64 inference_count = 4;
      Returns:
      This builder for chaining.
    • getExecutionCount

      public long getExecutionCount()
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The cumulative count of the number of successful inference executions
      @@     performed for the model. When dynamic batching is enabled, a single
      @@     model execution can perform inferencing for more than one inference
      @@     request. For example, if a clients sends 64 individual requests each
      @@     with batch size 1 and the dynamic batcher batches them into a single
      @@     large batch for model execution then "execution_count" will be
      @@     incremented by 1. If, on the other hand, the dynamic batcher is not
      @@     enabled for that each of the 64 individual requests is executed
      @@     independently, then "execution_count" will be incremented by 64.
      @@     The "execution_count" value DOES NOT include cache hits.
      @@
       
      uint64 execution_count = 5;
      Specified by:
      getExecutionCount in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The executionCount.
    • setExecutionCount

      public GrpcService.ModelStatistics.Builder setExecutionCount(long value)
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The cumulative count of the number of successful inference executions
      @@     performed for the model. When dynamic batching is enabled, a single
      @@     model execution can perform inferencing for more than one inference
      @@     request. For example, if a clients sends 64 individual requests each
      @@     with batch size 1 and the dynamic batcher batches them into a single
      @@     large batch for model execution then "execution_count" will be
      @@     incremented by 1. If, on the other hand, the dynamic batcher is not
      @@     enabled for that each of the 64 individual requests is executed
      @@     independently, then "execution_count" will be incremented by 64.
      @@     The "execution_count" value DOES NOT include cache hits.
      @@
       
      uint64 execution_count = 5;
      Parameters:
      value - The executionCount to set.
      Returns:
      This builder for chaining.
    • clearExecutionCount

      public GrpcService.ModelStatistics.Builder clearExecutionCount()
      @@  .. cpp:var:: uint64 last_inference
      @@
      @@     The cumulative count of the number of successful inference executions
      @@     performed for the model. When dynamic batching is enabled, a single
      @@     model execution can perform inferencing for more than one inference
      @@     request. For example, if a clients sends 64 individual requests each
      @@     with batch size 1 and the dynamic batcher batches them into a single
      @@     large batch for model execution then "execution_count" will be
      @@     incremented by 1. If, on the other hand, the dynamic batcher is not
      @@     enabled for that each of the 64 individual requests is executed
      @@     independently, then "execution_count" will be incremented by 64.
      @@     The "execution_count" value DOES NOT include cache hits.
      @@
       
      uint64 execution_count = 5;
      Returns:
      This builder for chaining.
    • hasInferenceStats

      public boolean hasInferenceStats()
      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
      Specified by:
      hasInferenceStats in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      Whether the inferenceStats field is set.
    • getInferenceStats

      public GrpcService.InferStatistics getInferenceStats()
      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
      Specified by:
      getInferenceStats in interface GrpcService.ModelStatisticsOrBuilder
      Returns:
      The inferenceStats.
    • setInferenceStats

      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
    • setInferenceStats

      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
    • mergeInferenceStats

      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
    • clearInferenceStats

      public GrpcService.ModelStatistics.Builder clearInferenceStats()
      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
    • getInferenceStatsBuilder

      public GrpcService.InferStatistics.Builder getInferenceStatsBuilder()
      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
    • getInferenceStatsOrBuilder

      public GrpcService.InferStatisticsOrBuilder getInferenceStatsOrBuilder()
      @@  .. cpp:var:: InferStatistics inference_stats
      @@
      @@     The aggregate statistics for the model/version.
      @@
       
      .inference.InferStatistics inference_stats = 6;
      Specified by:
      getInferenceStatsOrBuilder in interface GrpcService.ModelStatisticsOrBuilder
    • getBatchStatsList

      public List<GrpcService.InferBatchStatistics> getBatchStatsList()
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
      Specified by:
      getBatchStatsList in interface GrpcService.ModelStatisticsOrBuilder
    • getBatchStatsCount

      public int getBatchStatsCount()
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
      Specified by:
      getBatchStatsCount in interface GrpcService.ModelStatisticsOrBuilder
    • getBatchStats

      public GrpcService.InferBatchStatistics getBatchStats(int index)
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
      Specified by:
      getBatchStats in interface GrpcService.ModelStatisticsOrBuilder
    • setBatchStats

      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • setBatchStats

      public GrpcService.ModelStatistics.Builder setBatchStats(int index, GrpcService.InferBatchStatistics.Builder builderForValue)
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • addBatchStats

      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • addBatchStats

      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • addBatchStats

      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • addBatchStats

      public GrpcService.ModelStatistics.Builder addBatchStats(int index, GrpcService.InferBatchStatistics.Builder builderForValue)
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • addAllBatchStats

      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • clearBatchStats

      public GrpcService.ModelStatistics.Builder clearBatchStats()
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • removeBatchStats

      public GrpcService.ModelStatistics.Builder removeBatchStats(int index)
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • getBatchStatsBuilder

      public GrpcService.InferBatchStatistics.Builder getBatchStatsBuilder(int index)
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • getBatchStatsOrBuilder

      public GrpcService.InferBatchStatisticsOrBuilder getBatchStatsOrBuilder(int index)
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
      Specified by:
      getBatchStatsOrBuilder in interface GrpcService.ModelStatisticsOrBuilder
    • getBatchStatsOrBuilderList

      public List<? extends GrpcService.InferBatchStatisticsOrBuilder> getBatchStatsOrBuilderList()
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
      Specified by:
      getBatchStatsOrBuilderList in interface GrpcService.ModelStatisticsOrBuilder
    • addBatchStatsBuilder

      public GrpcService.InferBatchStatistics.Builder addBatchStatsBuilder()
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • addBatchStatsBuilder

      public GrpcService.InferBatchStatistics.Builder addBatchStatsBuilder(int index)
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • getBatchStatsBuilderList

      public List<GrpcService.InferBatchStatistics.Builder> getBatchStatsBuilderList()
      @@  .. cpp:var:: InferBatchStatistics batch_stats (repeated)
      @@
      @@     The aggregate statistics for each different batch size that is
      @@     executed in the model. The batch statistics indicate how many actual
      @@     model executions were performed and show differences due to different
      @@     batch size (for example, larger batches typically take longer to
      @@     compute).
      @@
       
      repeated .inference.InferBatchStatistics batch_stats = 7;
    • getMemoryUsageList

      public List<GrpcService.MemoryUsage> getMemoryUsageList()
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
      Specified by:
      getMemoryUsageList in interface GrpcService.ModelStatisticsOrBuilder
    • getMemoryUsageCount

      public int getMemoryUsageCount()
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
      Specified by:
      getMemoryUsageCount in interface GrpcService.ModelStatisticsOrBuilder
    • getMemoryUsage

      public GrpcService.MemoryUsage getMemoryUsage(int index)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
      Specified by:
      getMemoryUsage in interface GrpcService.ModelStatisticsOrBuilder
    • setMemoryUsage

      public GrpcService.ModelStatistics.Builder setMemoryUsage(int index, GrpcService.MemoryUsage value)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • setMemoryUsage

      public GrpcService.ModelStatistics.Builder setMemoryUsage(int index, GrpcService.MemoryUsage.Builder builderForValue)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • addMemoryUsage

      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • addMemoryUsage

      public GrpcService.ModelStatistics.Builder addMemoryUsage(int index, GrpcService.MemoryUsage value)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • addMemoryUsage

      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • addMemoryUsage

      public GrpcService.ModelStatistics.Builder addMemoryUsage(int index, GrpcService.MemoryUsage.Builder builderForValue)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • addAllMemoryUsage

      public GrpcService.ModelStatistics.Builder addAllMemoryUsage(Iterable<? extends GrpcService.MemoryUsage> values)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • clearMemoryUsage

      public GrpcService.ModelStatistics.Builder clearMemoryUsage()
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • removeMemoryUsage

      public GrpcService.ModelStatistics.Builder removeMemoryUsage(int index)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • getMemoryUsageBuilder

      public GrpcService.MemoryUsage.Builder getMemoryUsageBuilder(int index)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • getMemoryUsageOrBuilder

      public GrpcService.MemoryUsageOrBuilder getMemoryUsageOrBuilder(int index)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
      Specified by:
      getMemoryUsageOrBuilder in interface GrpcService.ModelStatisticsOrBuilder
    • getMemoryUsageOrBuilderList

      public List<? extends GrpcService.MemoryUsageOrBuilder> getMemoryUsageOrBuilderList()
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
      Specified by:
      getMemoryUsageOrBuilderList in interface GrpcService.ModelStatisticsOrBuilder
    • addMemoryUsageBuilder

      public GrpcService.MemoryUsage.Builder addMemoryUsageBuilder()
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • addMemoryUsageBuilder

      public GrpcService.MemoryUsage.Builder addMemoryUsageBuilder(int index)
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • getMemoryUsageBuilderList

      public List<GrpcService.MemoryUsage.Builder> getMemoryUsageBuilderList()
      @@  .. cpp:var:: MemoryUsage memory_usage (repeated)
      @@
      @@     The memory usage detected during model loading, which may be used to
      @@     estimate the memory to be released once the model is unloaded. Note
      @@     that the estimation is inferenced by the profiling tools and
      @@     framework's memory schema, therefore it is advised to perform
      @@     experiments to understand the scenario that the reported memory usage
      @@     can be relied on. As a starting point, the GPU memory usage for
      @@     models in ONNX Runtime backend and TensorRT backend is usually
      @@     aligned.
      @@
       
      repeated .inference.MemoryUsage memory_usage = 8;
    • getResponseStatsCount

      public int getResponseStatsCount()
      Description copied from interface: GrpcService.ModelStatisticsOrBuilder
      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
      Specified by:
      getResponseStatsCount in interface GrpcService.ModelStatisticsOrBuilder
    • containsResponseStats

      public boolean containsResponseStats(String key)
      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
      Specified by:
      containsResponseStats in interface GrpcService.ModelStatisticsOrBuilder
    • getResponseStats

      Deprecated.
      Specified by:
      getResponseStats in interface GrpcService.ModelStatisticsOrBuilder
    • getResponseStatsMap

      public Map<String,GrpcService.InferResponseStatistics> getResponseStatsMap()
      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
      Specified by:
      getResponseStatsMap in interface GrpcService.ModelStatisticsOrBuilder
    • getResponseStatsOrDefault

      public GrpcService.InferResponseStatistics getResponseStatsOrDefault(String key, GrpcService.InferResponseStatistics defaultValue)
      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
      Specified by:
      getResponseStatsOrDefault in interface GrpcService.ModelStatisticsOrBuilder
    • getResponseStatsOrThrow

      public GrpcService.InferResponseStatistics getResponseStatsOrThrow(String key)
      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
      Specified by:
      getResponseStatsOrThrow in interface GrpcService.ModelStatisticsOrBuilder
    • clearResponseStats

      public GrpcService.ModelStatistics.Builder clearResponseStats()
    • removeResponseStats

      public GrpcService.ModelStatistics.Builder removeResponseStats(String key)
      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
    • getMutableResponseStats

      @Deprecated public Map<String,GrpcService.InferResponseStatistics> getMutableResponseStats()
      Deprecated.
      Use alternate mutation accessors instead.
    • putResponseStats

      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
    • putAllResponseStats

      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
    • putResponseStatsBuilderIfAbsent

      public GrpcService.InferResponseStatistics.Builder putResponseStatsBuilderIfAbsent(String key)
      @@  .. cpp:var:: map<string, InferResponseStatistics> response_stats
      @@
      @@     The key and value pairs for all responses statistics. The key is a
      @@     string identifying a set of response statistics aggregated together
      @@     (i.e. index of the response sent). The value is the aggregated
      @@     response statistics.
      @@
       
      map<string, .inference.InferResponseStatistics> response_stats = 9;
    • setUnknownFields

      public final GrpcService.ModelStatistics.Builder setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
      Specified by:
      setUnknownFields in interface com.google.protobuf.Message.Builder
      Overrides:
      setUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
    • mergeUnknownFields

      public final GrpcService.ModelStatistics.Builder mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
      Specified by:
      mergeUnknownFields in interface com.google.protobuf.Message.Builder
      Overrides:
      mergeUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>