Skip to content

Commit 711cd72

Browse files
feat: [vertexai] update gapic library to the latest version (#11129)
PiperOrigin-RevId: 672705511 Co-authored-by: Jaycee Li <jayceeli@google.com>
1 parent 93d9f6a commit 711cd72

File tree

66 files changed

+17208
-783
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

66 files changed

+17208
-783
lines changed

java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/api/LlmUtilityServiceClient.java

+12-4
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ public LlmUtilityServiceStub getStub() {
315315
*
316316
* @param endpoint Required. The name of the Endpoint requested to perform token counting. Format:
317317
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
318-
* @param instances Required. The instances that are the input to token counting call. Schema is
318+
* @param instances Optional. The instances that are the input to token counting call. Schema is
319319
* identical to the prediction schema of the underlying model.
320320
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
321321
*/
@@ -351,7 +351,7 @@ public final CountTokensResponse countTokens(EndpointName endpoint, List<Value>
351351
*
352352
* @param endpoint Required. The name of the Endpoint requested to perform token counting. Format:
353353
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
354-
* @param instances Required. The instances that are the input to token counting call. Schema is
354+
* @param instances Optional. The instances that are the input to token counting call. Schema is
355355
* identical to the prediction schema of the underlying model.
356356
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
357357
*/
@@ -383,6 +383,8 @@ public final CountTokensResponse countTokens(String endpoint, List<Value> instan
383383
* .setModel("model104069929")
384384
* .addAllInstances(new ArrayList<Value>())
385385
* .addAllContents(new ArrayList<Content>())
386+
* .setSystemInstruction(Content.newBuilder().build())
387+
* .addAllTools(new ArrayList<Tool>())
386388
* .build();
387389
* CountTokensResponse response = llmUtilityServiceClient.countTokens(request);
388390
* }
@@ -417,6 +419,8 @@ public final CountTokensResponse countTokens(CountTokensRequest request) {
417419
* .setModel("model104069929")
418420
* .addAllInstances(new ArrayList<Value>())
419421
* .addAllContents(new ArrayList<Content>())
422+
* .setSystemInstruction(Content.newBuilder().build())
423+
* .addAllTools(new ArrayList<Tool>())
420424
* .build();
421425
* ApiFuture<CountTokensResponse> future =
422426
* llmUtilityServiceClient.countTokensCallable().futureCall(request);
@@ -451,7 +455,7 @@ public final UnaryCallable<CountTokensRequest, CountTokensResponse> countTokensC
451455
*
452456
* @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token
453457
* ids.
454-
* @param instances Required. The instances that are the input to token computing API call. Schema
458+
* @param instances Optional. The instances that are the input to token computing API call. Schema
455459
* is identical to the prediction schema of the text model, even for the non-text models, like
456460
* chat models, or Codey models.
457461
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
@@ -488,7 +492,7 @@ public final ComputeTokensResponse computeTokens(EndpointName endpoint, List<Val
488492
*
489493
* @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token
490494
* ids.
491-
* @param instances Required. The instances that are the input to token computing API call. Schema
495+
* @param instances Optional. The instances that are the input to token computing API call. Schema
492496
* is identical to the prediction schema of the text model, even for the non-text models, like
493497
* chat models, or Codey models.
494498
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
@@ -519,6 +523,8 @@ public final ComputeTokensResponse computeTokens(String endpoint, List<Value> in
519523
* "[PROJECT]", "[LOCATION]", "[ENDPOINT]")
520524
* .toString())
521525
* .addAllInstances(new ArrayList<Value>())
526+
* .setModel("model104069929")
527+
* .addAllContents(new ArrayList<Content>())
522528
* .build();
523529
* ComputeTokensResponse response = llmUtilityServiceClient.computeTokens(request);
524530
* }
@@ -551,6 +557,8 @@ public final ComputeTokensResponse computeTokens(ComputeTokensRequest request) {
551557
* "[PROJECT]", "[LOCATION]", "[ENDPOINT]")
552558
* .toString())
553559
* .addAllInstances(new ArrayList<Value>())
560+
* .setModel("model104069929")
561+
* .addAllContents(new ArrayList<Content>())
554562
* .build();
555563
* ApiFuture<ComputeTokensResponse> future =
556564
* llmUtilityServiceClient.computeTokensCallable().futureCall(request);

java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/api/PredictionServiceClient.java

+6-2
Original file line numberDiff line numberDiff line change
@@ -1378,8 +1378,12 @@ public final UnaryCallable<ExplainRequest, ExplainResponse> explainCallable() {
13781378
* }
13791379
* }</pre>
13801380
*
1381-
* @param model Required. The name of the publisher model requested to serve the prediction.
1382-
* Format: `projects/{project}/locations/{location}/publishers/&#42;/models/&#42;`
1381+
* @param model Required. The fully qualified name of the publisher model or tuned model endpoint
1382+
* to use.
1383+
* <p>Publisher model format:
1384+
* `projects/{project}/locations/{location}/publishers/&#42;/models/&#42;`
1385+
* <p>Tuned model endpoint format:
1386+
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
13831387
* @param contents Required. The content of the current conversation with the model.
13841388
* <p>For single-turn queries, this is a single instance. For multi-turn queries, this is a
13851389
* repeated field that contains conversation history + latest request.

java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/api/stub/EndpointServiceStubSettings.java

+2
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import com.google.api.core.ApiFunction;
2323
import com.google.api.core.ApiFuture;
2424
import com.google.api.core.BetaApi;
25+
import com.google.api.core.ObsoleteApi;
2526
import com.google.api.gax.core.GaxProperties;
2627
import com.google.api.gax.core.GoogleCredentialsProvider;
2728
import com.google.api.gax.core.InstantiatingExecutorProvider;
@@ -402,6 +403,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild
402403
}
403404

404405
/** Returns the default service endpoint. */
406+
@ObsoleteApi("Use getEndpoint() instead")
405407
public static String getDefaultEndpoint() {
406408
return "aiplatform.googleapis.com:443";
407409
}

java-vertexai/google-cloud-vertexai/src/main/java/com/google/cloud/vertexai/api/stub/GrpcPredictionServiceStub.java

-12
Original file line numberDiff line numberDiff line change
@@ -385,24 +385,12 @@ protected GrpcPredictionServiceStub(
385385
streamDirectPredictTransportSettings =
386386
GrpcCallSettings.<StreamDirectPredictRequest, StreamDirectPredictResponse>newBuilder()
387387
.setMethodDescriptor(streamDirectPredictMethodDescriptor)
388-
.setParamsExtractor(
389-
request -> {
390-
RequestParamsBuilder builder = RequestParamsBuilder.create();
391-
builder.add("endpoint", String.valueOf(request.getEndpoint()));
392-
return builder.build();
393-
})
394388
.build();
395389
GrpcCallSettings<StreamDirectRawPredictRequest, StreamDirectRawPredictResponse>
396390
streamDirectRawPredictTransportSettings =
397391
GrpcCallSettings
398392
.<StreamDirectRawPredictRequest, StreamDirectRawPredictResponse>newBuilder()
399393
.setMethodDescriptor(streamDirectRawPredictMethodDescriptor)
400-
.setParamsExtractor(
401-
request -> {
402-
RequestParamsBuilder builder = RequestParamsBuilder.create();
403-
builder.add("endpoint", String.valueOf(request.getEndpoint()));
404-
return builder.build();
405-
})
406394
.build();
407395
GrpcCallSettings<StreamingPredictRequest, StreamingPredictResponse>
408396
streamingPredictTransportSettings =

0 commit comments

Comments
 (0)