Package inference
Class GrpcService.ModelStatistics.Builder
java.lang.Object
com.google.protobuf.AbstractMessageLite.Builder
com.google.protobuf.AbstractMessage.Builder<GrpcService.ModelStatistics.Builder>
com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
inference.GrpcService.ModelStatistics.Builder
- All Implemented Interfaces:
com.google.protobuf.Message.Builder,com.google.protobuf.MessageLite.Builder,com.google.protobuf.MessageLiteOrBuilder,com.google.protobuf.MessageOrBuilder,GrpcService.ModelStatisticsOrBuilder,Cloneable
- Enclosing class:
GrpcService.ModelStatistics
public static final class GrpcService.ModelStatistics.Builder
extends com.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
implements GrpcService.ModelStatisticsOrBuilder
@@ @@.. cpp:var:: message ModelStatistics @@ @@ Statistics for a specific model and version. @@Protobuf type
inference.ModelStatistics-
Method Summary
Modifier and TypeMethodDescriptionaddAllBatchStats(Iterable<? extends GrpcService.InferBatchStatistics> values) @@ ..addAllMemoryUsage(Iterable<? extends GrpcService.MemoryUsage> values) @@ ..addBatchStats(int index, GrpcService.InferBatchStatistics value) @@ ..addBatchStats(int index, GrpcService.InferBatchStatistics.Builder builderForValue) @@ ..@@ ..addBatchStats(GrpcService.InferBatchStatistics.Builder builderForValue) @@ ..@@ ..addBatchStatsBuilder(int index) @@ ..addMemoryUsage(int index, GrpcService.MemoryUsage value) @@ ..addMemoryUsage(int index, GrpcService.MemoryUsage.Builder builderForValue) @@ ..@@ ..addMemoryUsage(GrpcService.MemoryUsage.Builder builderForValue) @@ ..@@ ..addMemoryUsageBuilder(int index) @@ ..addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) build()clear()@@ ..@@ ..clearField(com.google.protobuf.Descriptors.FieldDescriptor field) @@ ..@@ ..@@ ..@@ ..@@ ..clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) @@ ..clone()boolean@@ ..getBatchStats(int index) @@ ..getBatchStatsBuilder(int index) @@ ..@@ ..int@@ ..@@ ..getBatchStatsOrBuilder(int index) @@ ..List<? extends GrpcService.InferBatchStatisticsOrBuilder> @@ ..static final com.google.protobuf.Descriptors.Descriptorcom.google.protobuf.Descriptors.Descriptorlong@@ ..long@@ ..@@ ..@@ ..@@ ..long@@ ..getMemoryUsage(int index) @@ ..getMemoryUsageBuilder(int index) @@ ..@@ ..int@@ ..@@ ..getMemoryUsageOrBuilder(int index) @@ ..List<? extends GrpcService.MemoryUsageOrBuilder> @@ ..Deprecated.getName()@@ ..com.google.protobuf.ByteString@@ ..Deprecated.int@@ ..@@ ..getResponseStatsOrDefault(String key, GrpcService.InferResponseStatistics defaultValue) @@ ..@@ ..@@ ..com.google.protobuf.ByteString@@ ..boolean@@ ..protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTableprotected com.google.protobuf.MapFieldReflectionAccessorinternalGetMapFieldReflection(int number) protected com.google.protobuf.MapFieldReflectionAccessorinternalGetMutableMapFieldReflection(int number) final booleanmergeFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) mergeFrom(com.google.protobuf.Message other) @@ ..mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields) @@ ..@@ ..@@ ..removeBatchStats(int index) @@ ..removeMemoryUsage(int index) @@ ..@@ ..setBatchStats(int index, GrpcService.InferBatchStatistics value) @@ ..setBatchStats(int index, GrpcService.InferBatchStatistics.Builder builderForValue) @@ ..setExecutionCount(long value) @@ ..setInferenceCount(long value) @@ ..@@ ..setInferenceStats(GrpcService.InferStatistics.Builder builderForValue) @@ ..setLastInference(long value) @@ ..setMemoryUsage(int index, GrpcService.MemoryUsage value) @@ ..setMemoryUsage(int index, GrpcService.MemoryUsage.Builder builderForValue) @@ ..@@ ..setNameBytes(com.google.protobuf.ByteString value) @@ ..setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields) setVersion(String value) @@ ..setVersionBytes(com.google.protobuf.ByteString value) @@ ..Methods inherited from class com.google.protobuf.GeneratedMessageV3.Builder
getAllFields, getField, getFieldBuilder, getOneofFieldDescriptor, getParentForChildren, getRepeatedField, getRepeatedFieldBuilder, getRepeatedFieldCount, getUnknownFields, getUnknownFieldSetBuilder, hasField, hasOneof, internalGetMapField, internalGetMutableMapField, isClean, markClean, mergeUnknownLengthDelimitedField, mergeUnknownVarintField, newBuilderForField, onBuilt, onChanged, parseUnknownField, setUnknownFieldSetBuilder, setUnknownFieldsProto3Methods inherited from class com.google.protobuf.AbstractMessage.Builder
findInitializationErrors, getInitializationErrorString, internalMergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, newUninitializedMessageException, toStringMethods inherited from class com.google.protobuf.AbstractMessageLite.Builder
addAll, addAll, mergeDelimitedFrom, mergeDelimitedFrom, mergeFrom, newUninitializedMessageExceptionMethods inherited from class java.lang.Object
equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, waitMethods inherited from interface com.google.protobuf.Message.Builder
mergeDelimitedFrom, mergeDelimitedFromMethods inherited from interface com.google.protobuf.MessageLite.Builder
mergeFromMethods inherited from interface com.google.protobuf.MessageOrBuilder
findInitializationErrors, getAllFields, getField, getInitializationErrorString, getOneofFieldDescriptor, getRepeatedField, getRepeatedFieldCount, getUnknownFields, hasField, hasOneof
-
Method Details
-
getDescriptor
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() -
internalGetMapFieldReflection
protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection(int number) - Overrides:
internalGetMapFieldReflectionin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
internalGetMutableMapFieldReflection
protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection(int number) - Overrides:
internalGetMutableMapFieldReflectionin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
internalGetFieldAccessorTable
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()- Specified by:
internalGetFieldAccessorTablein classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
clear
- Specified by:
clearin interfacecom.google.protobuf.Message.Builder- Specified by:
clearin interfacecom.google.protobuf.MessageLite.Builder- Overrides:
clearin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
getDescriptorForType
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()- Specified by:
getDescriptorForTypein interfacecom.google.protobuf.Message.Builder- Specified by:
getDescriptorForTypein interfacecom.google.protobuf.MessageOrBuilder- Overrides:
getDescriptorForTypein classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
getDefaultInstanceForType
- Specified by:
getDefaultInstanceForTypein interfacecom.google.protobuf.MessageLiteOrBuilder- Specified by:
getDefaultInstanceForTypein interfacecom.google.protobuf.MessageOrBuilder
-
build
- Specified by:
buildin interfacecom.google.protobuf.Message.Builder- Specified by:
buildin interfacecom.google.protobuf.MessageLite.Builder
-
buildPartial
- Specified by:
buildPartialin interfacecom.google.protobuf.Message.Builder- Specified by:
buildPartialin interfacecom.google.protobuf.MessageLite.Builder
-
clone
- Specified by:
clonein interfacecom.google.protobuf.Message.Builder- Specified by:
clonein interfacecom.google.protobuf.MessageLite.Builder- Overrides:
clonein classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
setField
public GrpcService.ModelStatistics.Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) - Specified by:
setFieldin interfacecom.google.protobuf.Message.Builder- Overrides:
setFieldin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
clearField
public GrpcService.ModelStatistics.Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) - Specified by:
clearFieldin interfacecom.google.protobuf.Message.Builder- Overrides:
clearFieldin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
clearOneof
public GrpcService.ModelStatistics.Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) - Specified by:
clearOneofin interfacecom.google.protobuf.Message.Builder- Overrides:
clearOneofin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
setRepeatedField
public GrpcService.ModelStatistics.Builder setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) - Specified by:
setRepeatedFieldin interfacecom.google.protobuf.Message.Builder- Overrides:
setRepeatedFieldin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
addRepeatedField
public GrpcService.ModelStatistics.Builder addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) - Specified by:
addRepeatedFieldin interfacecom.google.protobuf.Message.Builder- Overrides:
addRepeatedFieldin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
mergeFrom
- Specified by:
mergeFromin interfacecom.google.protobuf.Message.Builder- Overrides:
mergeFromin classcom.google.protobuf.AbstractMessage.Builder<GrpcService.ModelStatistics.Builder>
-
mergeFrom
-
isInitialized
public final boolean isInitialized()- Specified by:
isInitializedin interfacecom.google.protobuf.MessageLiteOrBuilder- Overrides:
isInitializedin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
mergeFrom
public GrpcService.ModelStatistics.Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException - Specified by:
mergeFromin interfacecom.google.protobuf.Message.Builder- Specified by:
mergeFromin interfacecom.google.protobuf.MessageLite.Builder- Overrides:
mergeFromin classcom.google.protobuf.AbstractMessage.Builder<GrpcService.ModelStatistics.Builder>- Throws:
IOException
-
getName
@@ .. cpp:var:: string name @@ @@ The name of the model. If not given returns statistics for all @@
string name = 1;- Specified by:
getNamein interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The name.
-
getNameBytes
public com.google.protobuf.ByteString getNameBytes()@@ .. cpp:var:: string name @@ @@ The name of the model. If not given returns statistics for all @@
string name = 1;- Specified by:
getNameBytesin interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The bytes for name.
-
setName
@@ .. cpp:var:: string name @@ @@ The name of the model. If not given returns statistics for all @@
string name = 1;- Parameters:
value- The name to set.- Returns:
- This builder for chaining.
-
clearName
@@ .. cpp:var:: string name @@ @@ The name of the model. If not given returns statistics for all @@
string name = 1;- Returns:
- This builder for chaining.
-
setNameBytes
@@ .. cpp:var:: string name @@ @@ The name of the model. If not given returns statistics for all @@
string name = 1;- Parameters:
value- The bytes for name to set.- Returns:
- This builder for chaining.
-
getVersion
@@ .. cpp:var:: string version @@ @@ The version of the model. @@
string version = 2;- Specified by:
getVersionin interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The version.
-
getVersionBytes
public com.google.protobuf.ByteString getVersionBytes()@@ .. cpp:var:: string version @@ @@ The version of the model. @@
string version = 2;- Specified by:
getVersionBytesin interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The bytes for version.
-
setVersion
@@ .. cpp:var:: string version @@ @@ The version of the model. @@
string version = 2;- Parameters:
value- The version to set.- Returns:
- This builder for chaining.
-
clearVersion
@@ .. cpp:var:: string version @@ @@ The version of the model. @@
string version = 2;- Returns:
- This builder for chaining.
-
setVersionBytes
@@ .. cpp:var:: string version @@ @@ The version of the model. @@
string version = 2;- Parameters:
value- The bytes for version to set.- Returns:
- This builder for chaining.
-
getLastInference
public long getLastInference()@@ .. cpp:var:: uint64 last_inference @@ @@ The timestamp of the last inference request made for this model, @@ as milliseconds since the epoch. @@
uint64 last_inference = 3;- Specified by:
getLastInferencein interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The lastInference.
-
setLastInference
@@ .. cpp:var:: uint64 last_inference @@ @@ The timestamp of the last inference request made for this model, @@ as milliseconds since the epoch. @@
uint64 last_inference = 3;- Parameters:
value- The lastInference to set.- Returns:
- This builder for chaining.
-
clearLastInference
@@ .. cpp:var:: uint64 last_inference @@ @@ The timestamp of the last inference request made for this model, @@ as milliseconds since the epoch. @@
uint64 last_inference = 3;- Returns:
- This builder for chaining.
-
getInferenceCount
public long getInferenceCount()@@ .. cpp:var:: uint64 last_inference @@ @@ The cumulative count of successful inference requests made for this @@ model. Each inference in a batched request is counted as an @@ individual inference. For example, if a client sends a single @@ inference request with batch size 64, "inference_count" will be @@ incremented by 64. Similarly, if a clients sends 64 individual @@ requests each with batch size 1, "inference_count" will be @@ incremented by 64. The "inference_count" value DOES NOT include @@ cache hits. @@
uint64 inference_count = 4;- Specified by:
getInferenceCountin interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The inferenceCount.
-
setInferenceCount
@@ .. cpp:var:: uint64 last_inference @@ @@ The cumulative count of successful inference requests made for this @@ model. Each inference in a batched request is counted as an @@ individual inference. For example, if a client sends a single @@ inference request with batch size 64, "inference_count" will be @@ incremented by 64. Similarly, if a clients sends 64 individual @@ requests each with batch size 1, "inference_count" will be @@ incremented by 64. The "inference_count" value DOES NOT include @@ cache hits. @@
uint64 inference_count = 4;- Parameters:
value- The inferenceCount to set.- Returns:
- This builder for chaining.
-
clearInferenceCount
@@ .. cpp:var:: uint64 last_inference @@ @@ The cumulative count of successful inference requests made for this @@ model. Each inference in a batched request is counted as an @@ individual inference. For example, if a client sends a single @@ inference request with batch size 64, "inference_count" will be @@ incremented by 64. Similarly, if a clients sends 64 individual @@ requests each with batch size 1, "inference_count" will be @@ incremented by 64. The "inference_count" value DOES NOT include @@ cache hits. @@
uint64 inference_count = 4;- Returns:
- This builder for chaining.
-
getExecutionCount
public long getExecutionCount()@@ .. cpp:var:: uint64 last_inference @@ @@ The cumulative count of the number of successful inference executions @@ performed for the model. When dynamic batching is enabled, a single @@ model execution can perform inferencing for more than one inference @@ request. For example, if a clients sends 64 individual requests each @@ with batch size 1 and the dynamic batcher batches them into a single @@ large batch for model execution then "execution_count" will be @@ incremented by 1. If, on the other hand, the dynamic batcher is not @@ enabled for that each of the 64 individual requests is executed @@ independently, then "execution_count" will be incremented by 64. @@ The "execution_count" value DOES NOT include cache hits. @@
uint64 execution_count = 5;- Specified by:
getExecutionCountin interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The executionCount.
-
setExecutionCount
@@ .. cpp:var:: uint64 last_inference @@ @@ The cumulative count of the number of successful inference executions @@ performed for the model. When dynamic batching is enabled, a single @@ model execution can perform inferencing for more than one inference @@ request. For example, if a clients sends 64 individual requests each @@ with batch size 1 and the dynamic batcher batches them into a single @@ large batch for model execution then "execution_count" will be @@ incremented by 1. If, on the other hand, the dynamic batcher is not @@ enabled for that each of the 64 individual requests is executed @@ independently, then "execution_count" will be incremented by 64. @@ The "execution_count" value DOES NOT include cache hits. @@
uint64 execution_count = 5;- Parameters:
value- The executionCount to set.- Returns:
- This builder for chaining.
-
clearExecutionCount
@@ .. cpp:var:: uint64 last_inference @@ @@ The cumulative count of the number of successful inference executions @@ performed for the model. When dynamic batching is enabled, a single @@ model execution can perform inferencing for more than one inference @@ request. For example, if a clients sends 64 individual requests each @@ with batch size 1 and the dynamic batcher batches them into a single @@ large batch for model execution then "execution_count" will be @@ incremented by 1. If, on the other hand, the dynamic batcher is not @@ enabled for that each of the 64 individual requests is executed @@ independently, then "execution_count" will be incremented by 64. @@ The "execution_count" value DOES NOT include cache hits. @@
uint64 execution_count = 5;- Returns:
- This builder for chaining.
-
hasInferenceStats
public boolean hasInferenceStats()@@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6;- Specified by:
hasInferenceStatsin interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- Whether the inferenceStats field is set.
-
getInferenceStats
@@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6;- Specified by:
getInferenceStatsin interfaceGrpcService.ModelStatisticsOrBuilder- Returns:
- The inferenceStats.
-
setInferenceStats
@@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6; -
setInferenceStats
public GrpcService.ModelStatistics.Builder setInferenceStats(GrpcService.InferStatistics.Builder builderForValue) @@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6; -
mergeInferenceStats
@@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6; -
clearInferenceStats
@@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6; -
getInferenceStatsBuilder
@@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6; -
getInferenceStatsOrBuilder
@@ .. cpp:var:: InferStatistics inference_stats @@ @@ The aggregate statistics for the model/version. @@
.inference.InferStatistics inference_stats = 6;- Specified by:
getInferenceStatsOrBuilderin interfaceGrpcService.ModelStatisticsOrBuilder
-
getBatchStatsList
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7;- Specified by:
getBatchStatsListin interfaceGrpcService.ModelStatisticsOrBuilder
-
getBatchStatsCount
public int getBatchStatsCount()@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7;- Specified by:
getBatchStatsCountin interfaceGrpcService.ModelStatisticsOrBuilder
-
getBatchStats
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7;- Specified by:
getBatchStatsin interfaceGrpcService.ModelStatisticsOrBuilder
-
setBatchStats
public GrpcService.ModelStatistics.Builder setBatchStats(int index, GrpcService.InferBatchStatistics value) @@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
setBatchStats
public GrpcService.ModelStatistics.Builder setBatchStats(int index, GrpcService.InferBatchStatistics.Builder builderForValue) @@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
addBatchStats
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
addBatchStats
public GrpcService.ModelStatistics.Builder addBatchStats(int index, GrpcService.InferBatchStatistics value) @@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
addBatchStats
public GrpcService.ModelStatistics.Builder addBatchStats(GrpcService.InferBatchStatistics.Builder builderForValue) @@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
addBatchStats
public GrpcService.ModelStatistics.Builder addBatchStats(int index, GrpcService.InferBatchStatistics.Builder builderForValue) @@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
addAllBatchStats
public GrpcService.ModelStatistics.Builder addAllBatchStats(Iterable<? extends GrpcService.InferBatchStatistics> values) @@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
clearBatchStats
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
removeBatchStats
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
getBatchStatsBuilder
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
getBatchStatsOrBuilder
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7;- Specified by:
getBatchStatsOrBuilderin interfaceGrpcService.ModelStatisticsOrBuilder
-
getBatchStatsOrBuilderList
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7;- Specified by:
getBatchStatsOrBuilderListin interfaceGrpcService.ModelStatisticsOrBuilder
-
addBatchStatsBuilder
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
addBatchStatsBuilder
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
getBatchStatsBuilderList
@@ .. cpp:var:: InferBatchStatistics batch_stats (repeated) @@ @@ The aggregate statistics for each different batch size that is @@ executed in the model. The batch statistics indicate how many actual @@ model executions were performed and show differences due to different @@ batch size (for example, larger batches typically take longer to @@ compute). @@
repeated .inference.InferBatchStatistics batch_stats = 7; -
getMemoryUsageList
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8;- Specified by:
getMemoryUsageListin interfaceGrpcService.ModelStatisticsOrBuilder
-
getMemoryUsageCount
public int getMemoryUsageCount()@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8;- Specified by:
getMemoryUsageCountin interfaceGrpcService.ModelStatisticsOrBuilder
-
getMemoryUsage
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8;- Specified by:
getMemoryUsagein interfaceGrpcService.ModelStatisticsOrBuilder
-
setMemoryUsage
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
setMemoryUsage
public GrpcService.ModelStatistics.Builder setMemoryUsage(int index, GrpcService.MemoryUsage.Builder builderForValue) @@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
addMemoryUsage
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
addMemoryUsage
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
addMemoryUsage
public GrpcService.ModelStatistics.Builder addMemoryUsage(GrpcService.MemoryUsage.Builder builderForValue) @@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
addMemoryUsage
public GrpcService.ModelStatistics.Builder addMemoryUsage(int index, GrpcService.MemoryUsage.Builder builderForValue) @@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
addAllMemoryUsage
public GrpcService.ModelStatistics.Builder addAllMemoryUsage(Iterable<? extends GrpcService.MemoryUsage> values) @@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
clearMemoryUsage
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
removeMemoryUsage
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
getMemoryUsageBuilder
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
getMemoryUsageOrBuilder
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8;- Specified by:
getMemoryUsageOrBuilderin interfaceGrpcService.ModelStatisticsOrBuilder
-
getMemoryUsageOrBuilderList
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8;- Specified by:
getMemoryUsageOrBuilderListin interfaceGrpcService.ModelStatisticsOrBuilder
-
addMemoryUsageBuilder
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
addMemoryUsageBuilder
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
getMemoryUsageBuilderList
@@ .. cpp:var:: MemoryUsage memory_usage (repeated) @@ @@ The memory usage detected during model loading, which may be used to @@ estimate the memory to be released once the model is unloaded. Note @@ that the estimation is inferenced by the profiling tools and @@ framework's memory schema, therefore it is advised to perform @@ experiments to understand the scenario that the reported memory usage @@ can be relied on. As a starting point, the GPU memory usage for @@ models in ONNX Runtime backend and TensorRT backend is usually @@ aligned. @@
repeated .inference.MemoryUsage memory_usage = 8; -
getResponseStatsCount
public int getResponseStatsCount()Description copied from interface:GrpcService.ModelStatisticsOrBuilder@@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9;- Specified by:
getResponseStatsCountin interfaceGrpcService.ModelStatisticsOrBuilder
-
containsResponseStats
@@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9;- Specified by:
containsResponseStatsin interfaceGrpcService.ModelStatisticsOrBuilder
-
getResponseStats
Deprecated.UsegetResponseStatsMap()instead.- Specified by:
getResponseStatsin interfaceGrpcService.ModelStatisticsOrBuilder
-
getResponseStatsMap
@@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9;- Specified by:
getResponseStatsMapin interfaceGrpcService.ModelStatisticsOrBuilder
-
getResponseStatsOrDefault
public GrpcService.InferResponseStatistics getResponseStatsOrDefault(String key, GrpcService.InferResponseStatistics defaultValue) @@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9;- Specified by:
getResponseStatsOrDefaultin interfaceGrpcService.ModelStatisticsOrBuilder
-
getResponseStatsOrThrow
@@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9;- Specified by:
getResponseStatsOrThrowin interfaceGrpcService.ModelStatisticsOrBuilder
-
clearResponseStats
-
removeResponseStats
@@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9; -
getMutableResponseStats
Deprecated.Use alternate mutation accessors instead. -
putResponseStats
public GrpcService.ModelStatistics.Builder putResponseStats(String key, GrpcService.InferResponseStatistics value) @@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9; -
putAllResponseStats
public GrpcService.ModelStatistics.Builder putAllResponseStats(Map<String, GrpcService.InferResponseStatistics> values) @@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9; -
putResponseStatsBuilderIfAbsent
@@ .. cpp:var:: map<string, InferResponseStatistics> response_stats @@ @@ The key and value pairs for all responses statistics. The key is a @@ string identifying a set of response statistics aggregated together @@ (i.e. index of the response sent). The value is the aggregated @@ response statistics. @@
map<string, .inference.InferResponseStatistics> response_stats = 9; -
setUnknownFields
public final GrpcService.ModelStatistics.Builder setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields) - Specified by:
setUnknownFieldsin interfacecom.google.protobuf.Message.Builder- Overrides:
setUnknownFieldsin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-
mergeUnknownFields
public final GrpcService.ModelStatistics.Builder mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields) - Specified by:
mergeUnknownFieldsin interfacecom.google.protobuf.Message.Builder- Overrides:
mergeUnknownFieldsin classcom.google.protobuf.GeneratedMessageV3.Builder<GrpcService.ModelStatistics.Builder>
-