Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: [vertexai] update gapic library to the latest version #11129

Merged
merged 1 commit into from
Sep 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ public LlmUtilityServiceStub getStub() {
*
* @param endpoint Required. The name of the Endpoint requested to perform token counting. Format:
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
* @param instances Required. The instances that are the input to token counting call. Schema is
* @param instances Optional. The instances that are the input to token counting call. Schema is
* identical to the prediction schema of the underlying model.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand Down Expand Up @@ -351,7 +351,7 @@ public final CountTokensResponse countTokens(EndpointName endpoint, List<Value>
*
* @param endpoint Required. The name of the Endpoint requested to perform token counting. Format:
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
* @param instances Required. The instances that are the input to token counting call. Schema is
* @param instances Optional. The instances that are the input to token counting call. Schema is
* identical to the prediction schema of the underlying model.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand Down Expand Up @@ -383,6 +383,8 @@ public final CountTokensResponse countTokens(String endpoint, List<Value> instan
* .setModel("model104069929")
* .addAllInstances(new ArrayList<Value>())
* .addAllContents(new ArrayList<Content>())
* .setSystemInstruction(Content.newBuilder().build())
* .addAllTools(new ArrayList<Tool>())
* .build();
* CountTokensResponse response = llmUtilityServiceClient.countTokens(request);
* }
Expand Down Expand Up @@ -417,6 +419,8 @@ public final CountTokensResponse countTokens(CountTokensRequest request) {
* .setModel("model104069929")
* .addAllInstances(new ArrayList<Value>())
* .addAllContents(new ArrayList<Content>())
* .setSystemInstruction(Content.newBuilder().build())
* .addAllTools(new ArrayList<Tool>())
* .build();
* ApiFuture<CountTokensResponse> future =
* llmUtilityServiceClient.countTokensCallable().futureCall(request);
Expand Down Expand Up @@ -451,7 +455,7 @@ public final UnaryCallable<CountTokensRequest, CountTokensResponse> countTokensC
*
* @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token
* ids.
* @param instances Required. The instances that are the input to token computing API call. Schema
* @param instances Optional. The instances that are the input to token computing API call. Schema
* is identical to the prediction schema of the text model, even for the non-text models, like
* chat models, or Codey models.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
Expand Down Expand Up @@ -488,7 +492,7 @@ public final ComputeTokensResponse computeTokens(EndpointName endpoint, List<Val
*
* @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token
* ids.
* @param instances Required. The instances that are the input to token computing API call. Schema
* @param instances Optional. The instances that are the input to token computing API call. Schema
* is identical to the prediction schema of the text model, even for the non-text models, like
* chat models, or Codey models.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
Expand Down Expand Up @@ -519,6 +523,8 @@ public final ComputeTokensResponse computeTokens(String endpoint, List<Value> in
* "[PROJECT]", "[LOCATION]", "[ENDPOINT]")
* .toString())
* .addAllInstances(new ArrayList<Value>())
* .setModel("model104069929")
* .addAllContents(new ArrayList<Content>())
* .build();
* ComputeTokensResponse response = llmUtilityServiceClient.computeTokens(request);
* }
Expand Down Expand Up @@ -551,6 +557,8 @@ public final ComputeTokensResponse computeTokens(ComputeTokensRequest request) {
* "[PROJECT]", "[LOCATION]", "[ENDPOINT]")
* .toString())
* .addAllInstances(new ArrayList<Value>())
* .setModel("model104069929")
* .addAllContents(new ArrayList<Content>())
* .build();
* ApiFuture<ComputeTokensResponse> future =
* llmUtilityServiceClient.computeTokensCallable().futureCall(request);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1378,8 +1378,12 @@ public final UnaryCallable<ExplainRequest, ExplainResponse> explainCallable() {
* }
* }</pre>
*
* @param model Required. The name of the publisher model requested to serve the prediction.
* Format: `projects/{project}/locations/{location}/publishers/&#42;/models/&#42;`
* @param model Required. The fully qualified name of the publisher model or tuned model endpoint
* to use.
* <p>Publisher model format:
* `projects/{project}/locations/{location}/publishers/&#42;/models/&#42;`
* <p>Tuned model endpoint format:
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
* @param contents Required. The content of the current conversation with the model.
* <p>For single-turn queries, this is a single instance. For multi-turn queries, this is a
* repeated field that contains conversation history + latest request.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import com.google.api.core.ApiFunction;
import com.google.api.core.ApiFuture;
import com.google.api.core.BetaApi;
import com.google.api.core.ObsoleteApi;
import com.google.api.gax.core.GaxProperties;
import com.google.api.gax.core.GoogleCredentialsProvider;
import com.google.api.gax.core.InstantiatingExecutorProvider;
Expand Down Expand Up @@ -402,6 +403,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild
}

/** Returns the default service endpoint. */
@ObsoleteApi("Use getEndpoint() instead")
public static String getDefaultEndpoint() {
return "aiplatform.googleapis.com:443";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -385,24 +385,12 @@ protected GrpcPredictionServiceStub(
streamDirectPredictTransportSettings =
GrpcCallSettings.<StreamDirectPredictRequest, StreamDirectPredictResponse>newBuilder()
.setMethodDescriptor(streamDirectPredictMethodDescriptor)
.setParamsExtractor(
request -> {
RequestParamsBuilder builder = RequestParamsBuilder.create();
builder.add("endpoint", String.valueOf(request.getEndpoint()));
return builder.build();
})
.build();
GrpcCallSettings<StreamDirectRawPredictRequest, StreamDirectRawPredictResponse>
streamDirectRawPredictTransportSettings =
GrpcCallSettings
.<StreamDirectRawPredictRequest, StreamDirectRawPredictResponse>newBuilder()
.setMethodDescriptor(streamDirectRawPredictMethodDescriptor)
.setParamsExtractor(
request -> {
RequestParamsBuilder builder = RequestParamsBuilder.create();
builder.add("endpoint", String.valueOf(request.getEndpoint()));
return builder.build();
})
.build();
GrpcCallSettings<StreamingPredictRequest, StreamingPredictResponse>
streamingPredictTransportSettings =
Expand Down
Loading
Loading