Package inference

Class GrpcService.InferStatistics.Builder

java.lang.Object
com.google.protobuf.AbstractMessageLite.Builder
com.google.protobuf.AbstractMessage.Builder<GrpcService.InferStatistics.Builder>
com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
inference.GrpcService.InferStatistics.Builder
All Implemented Interfaces:
com.google.protobuf.Message.Builder, com.google.protobuf.MessageLite.Builder, com.google.protobuf.MessageLiteOrBuilder, com.google.protobuf.MessageOrBuilder, GrpcService.InferStatisticsOrBuilder, Cloneable
Enclosing class:
GrpcService.InferStatistics

public static final class GrpcService.InferStatistics.Builder extends com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder> implements GrpcService.InferStatisticsOrBuilder
@@
@@.. cpp:var:: message InferStatistics
@@
@@   Inference statistics.
@@
 
Protobuf type inference.InferStatistics
  • Method Details

    • getDescriptor

      public static final com.google.protobuf.Descriptors.Descriptor getDescriptor()
    • internalGetFieldAccessorTable

      protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
      Specified by:
      internalGetFieldAccessorTable in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • clear

      Specified by:
      clear in interface com.google.protobuf.Message.Builder
      Specified by:
      clear in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      clear in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • getDescriptorForType

      public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()
      Specified by:
      getDescriptorForType in interface com.google.protobuf.Message.Builder
      Specified by:
      getDescriptorForType in interface com.google.protobuf.MessageOrBuilder
      Overrides:
      getDescriptorForType in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • getDefaultInstanceForType

      public GrpcService.InferStatistics getDefaultInstanceForType()
      Specified by:
      getDefaultInstanceForType in interface com.google.protobuf.MessageLiteOrBuilder
      Specified by:
      getDefaultInstanceForType in interface com.google.protobuf.MessageOrBuilder
    • build

      Specified by:
      build in interface com.google.protobuf.Message.Builder
      Specified by:
      build in interface com.google.protobuf.MessageLite.Builder
    • buildPartial

      public GrpcService.InferStatistics buildPartial()
      Specified by:
      buildPartial in interface com.google.protobuf.Message.Builder
      Specified by:
      buildPartial in interface com.google.protobuf.MessageLite.Builder
    • clone

      Specified by:
      clone in interface com.google.protobuf.Message.Builder
      Specified by:
      clone in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      clone in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • setField

      public GrpcService.InferStatistics.Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
      Specified by:
      setField in interface com.google.protobuf.Message.Builder
      Overrides:
      setField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • clearField

      public GrpcService.InferStatistics.Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field)
      Specified by:
      clearField in interface com.google.protobuf.Message.Builder
      Overrides:
      clearField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • clearOneof

      public GrpcService.InferStatistics.Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)
      Specified by:
      clearOneof in interface com.google.protobuf.Message.Builder
      Overrides:
      clearOneof in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • setRepeatedField

      public GrpcService.InferStatistics.Builder setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value)
      Specified by:
      setRepeatedField in interface com.google.protobuf.Message.Builder
      Overrides:
      setRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • addRepeatedField

      public GrpcService.InferStatistics.Builder addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
      Specified by:
      addRepeatedField in interface com.google.protobuf.Message.Builder
      Overrides:
      addRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • mergeFrom

      public GrpcService.InferStatistics.Builder mergeFrom(com.google.protobuf.Message other)
      Specified by:
      mergeFrom in interface com.google.protobuf.Message.Builder
      Overrides:
      mergeFrom in class com.google.protobuf.AbstractMessage.Builder<GrpcService.InferStatistics.Builder>
    • mergeFrom

    • isInitialized

      public final boolean isInitialized()
      Specified by:
      isInitialized in interface com.google.protobuf.MessageLiteOrBuilder
      Overrides:
      isInitialized in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • mergeFrom

      public GrpcService.InferStatistics.Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException
      Specified by:
      mergeFrom in interface com.google.protobuf.Message.Builder
      Specified by:
      mergeFrom in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      mergeFrom in class com.google.protobuf.AbstractMessage.Builder<GrpcService.InferStatistics.Builder>
      Throws:
      IOException
    • hasSuccess

      public boolean hasSuccess()
      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
      Specified by:
      hasSuccess in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the success field is set.
    • getSuccess

      public GrpcService.StatisticDuration getSuccess()
      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
      Specified by:
      getSuccess in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The success.
    • setSuccess

      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
    • setSuccess

      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
    • mergeSuccess

      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
    • clearSuccess

      public GrpcService.InferStatistics.Builder clearSuccess()
      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
    • getSuccessBuilder

      public GrpcService.StatisticDuration.Builder getSuccessBuilder()
      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
    • getSuccessOrBuilder

      public GrpcService.StatisticDurationOrBuilder getSuccessOrBuilder()
      @@  .. cpp:var:: StatisticDuration success
      @@
      @@     Cumulative count and duration for successful inference
      @@     request. The "success" count and cumulative duration includes
      @@     cache hits.
      @@
       
      .inference.StatisticDuration success = 1;
      Specified by:
      getSuccessOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • hasFail

      public boolean hasFail()
      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
      Specified by:
      hasFail in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the fail field is set.
    • getFail

      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
      Specified by:
      getFail in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The fail.
    • setFail

      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
    • setFail

      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
    • mergeFail

      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
    • clearFail

      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
    • getFailBuilder

      public GrpcService.StatisticDuration.Builder getFailBuilder()
      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
    • getFailOrBuilder

      public GrpcService.StatisticDurationOrBuilder getFailOrBuilder()
      @@  .. cpp:var:: StatisticDuration fail
      @@
      @@     Cumulative count and duration for failed inference
      @@     request.
      @@
       
      .inference.StatisticDuration fail = 2;
      Specified by:
      getFailOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • hasQueue

      public boolean hasQueue()
      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
      Specified by:
      hasQueue in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the queue field is set.
    • getQueue

      public GrpcService.StatisticDuration getQueue()
      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
      Specified by:
      getQueue in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The queue.
    • setQueue

      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
    • setQueue

      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
    • mergeQueue

      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
    • clearQueue

      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
    • getQueueBuilder

      public GrpcService.StatisticDuration.Builder getQueueBuilder()
      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
    • getQueueOrBuilder

      public GrpcService.StatisticDurationOrBuilder getQueueOrBuilder()
      @@  .. cpp:var:: StatisticDuration queue
      @@
      @@     The count and cumulative duration that inference requests wait in
      @@     scheduling or other queues. The "queue" count and cumulative
      @@     duration includes cache hits.
      @@
       
      .inference.StatisticDuration queue = 3;
      Specified by:
      getQueueOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • hasComputeInput

      public boolean hasComputeInput()
      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
      Specified by:
      hasComputeInput in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the computeInput field is set.
    • getComputeInput

      public GrpcService.StatisticDuration getComputeInput()
      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
      Specified by:
      getComputeInput in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The computeInput.
    • setComputeInput

      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
    • setComputeInput

      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
    • mergeComputeInput

      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
    • clearComputeInput

      public GrpcService.InferStatistics.Builder clearComputeInput()
      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
    • getComputeInputBuilder

      public GrpcService.StatisticDuration.Builder getComputeInputBuilder()
      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
    • getComputeInputOrBuilder

      public GrpcService.StatisticDurationOrBuilder getComputeInputOrBuilder()
      @@  .. cpp:var:: StatisticDuration compute_input
      @@
      @@     The count and cumulative duration to prepare input tensor data as
      @@     required by the model framework / backend. For example, this duration
      @@     should include the time to copy input tensor data to the GPU.
      @@     The "compute_input" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_input = 4;
      Specified by:
      getComputeInputOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • hasComputeInfer

      public boolean hasComputeInfer()
      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
      Specified by:
      hasComputeInfer in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the computeInfer field is set.
    • getComputeInfer

      public GrpcService.StatisticDuration getComputeInfer()
      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
      Specified by:
      getComputeInfer in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The computeInfer.
    • setComputeInfer

      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
    • setComputeInfer

      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
    • mergeComputeInfer

      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
    • clearComputeInfer

      public GrpcService.InferStatistics.Builder clearComputeInfer()
      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
    • getComputeInferBuilder

      public GrpcService.StatisticDuration.Builder getComputeInferBuilder()
      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
    • getComputeInferOrBuilder

      public GrpcService.StatisticDurationOrBuilder getComputeInferOrBuilder()
      @@  .. cpp:var:: StatisticDuration compute_infer
      @@
      @@     The count and cumulative duration to execute the model.
      @@     The "compute_infer" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_infer = 5;
      Specified by:
      getComputeInferOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • hasComputeOutput

      public boolean hasComputeOutput()
      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
      Specified by:
      hasComputeOutput in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the computeOutput field is set.
    • getComputeOutput

      public GrpcService.StatisticDuration getComputeOutput()
      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
      Specified by:
      getComputeOutput in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The computeOutput.
    • setComputeOutput

      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
    • setComputeOutput

      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
    • mergeComputeOutput

      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
    • clearComputeOutput

      public GrpcService.InferStatistics.Builder clearComputeOutput()
      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
    • getComputeOutputBuilder

      public GrpcService.StatisticDuration.Builder getComputeOutputBuilder()
      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
    • getComputeOutputOrBuilder

      public GrpcService.StatisticDurationOrBuilder getComputeOutputOrBuilder()
      @@  .. cpp:var:: StatisticDuration compute_output
      @@
      @@     The count and cumulative duration to extract output tensor data
      @@     produced by the model framework / backend. For example, this duration
      @@     should include the time to copy output tensor data from the GPU.
      @@     The "compute_output" count and cumulative duration do not account for
      @@     requests that were a cache hit. See the "cache_hit" field for more
      @@     info.
      @@
       
      .inference.StatisticDuration compute_output = 6;
      Specified by:
      getComputeOutputOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • hasCacheHit

      public boolean hasCacheHit()
      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
      Specified by:
      hasCacheHit in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the cacheHit field is set.
    • getCacheHit

      public GrpcService.StatisticDuration getCacheHit()
      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
      Specified by:
      getCacheHit in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The cacheHit.
    • setCacheHit

      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
    • setCacheHit

      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
    • mergeCacheHit

      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
    • clearCacheHit

      public GrpcService.InferStatistics.Builder clearCacheHit()
      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
    • getCacheHitBuilder

      public GrpcService.StatisticDuration.Builder getCacheHitBuilder()
      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
    • getCacheHitOrBuilder

      public GrpcService.StatisticDurationOrBuilder getCacheHitOrBuilder()
      @@  .. cpp:var:: StatisticDuration cache_hit
      @@
      @@     The count of response cache hits and cumulative duration to lookup
      @@     and extract output tensor data from the Response Cache on a cache
      @@     hit. For example, this duration should include the time to copy
      @@     output tensor data from the Response Cache to the response object.
      @@     On cache hits, triton does not need to go to the model/backend
      @@     for the output tensor data, so the "compute_input", "compute_infer",
      @@     and "compute_output" fields are not updated. Assuming the response
      @@     cache is enabled for a given model, a cache hit occurs for a
      @@     request to that model when the request metadata (model name,
      @@     model version, model inputs) hashes to an existing entry in the
      @@     cache. On a cache miss, the request hash and response output tensor
      @@     data is added to the cache. See response cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_hit = 7;
      Specified by:
      getCacheHitOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • hasCacheMiss

      public boolean hasCacheMiss()
      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
      Specified by:
      hasCacheMiss in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      Whether the cacheMiss field is set.
    • getCacheMiss

      public GrpcService.StatisticDuration getCacheMiss()
      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
      Specified by:
      getCacheMiss in interface GrpcService.InferStatisticsOrBuilder
      Returns:
      The cacheMiss.
    • setCacheMiss

      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
    • setCacheMiss

      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
    • mergeCacheMiss

      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
    • clearCacheMiss

      public GrpcService.InferStatistics.Builder clearCacheMiss()
      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
    • getCacheMissBuilder

      public GrpcService.StatisticDuration.Builder getCacheMissBuilder()
      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
    • getCacheMissOrBuilder

      public GrpcService.StatisticDurationOrBuilder getCacheMissOrBuilder()
      @@  .. cpp:var:: StatisticDuration cache_miss
      @@
      @@     The count of response cache misses and cumulative duration to lookup
      @@     and insert output tensor data from the computed response to the
       cache.
      @@     For example, this duration should include the time to copy
      @@     output tensor data from the response object to the Response Cache.
      @@     Assuming the response cache is enabled for a given model, a cache
      @@     miss occurs for a request to that model when the request metadata
      @@     does NOT hash to an existing entry in the cache. See the response
      @@     cache docs for more info:
      @@
       https://github.com/triton-inference-server/server/blob/main/docs/response_cache.md
      @@
       
      .inference.StatisticDuration cache_miss = 8;
      Specified by:
      getCacheMissOrBuilder in interface GrpcService.InferStatisticsOrBuilder
    • setUnknownFields

      public final GrpcService.InferStatistics.Builder setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
      Specified by:
      setUnknownFields in interface com.google.protobuf.Message.Builder
      Overrides:
      setUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>
    • mergeUnknownFields

      public final GrpcService.InferStatistics.Builder mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
      Specified by:
      mergeUnknownFields in interface com.google.protobuf.Message.Builder
      Overrides:
      mergeUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.InferStatistics.Builder>