Search code examples
node.jsgoogle-cloud-platformartificial-intelligencegoogle-cloud-vertex-ai

Vertex AI - NodeJS SDK - Error: 3 INVALID_ARGUMENT: 0 is out of supported range [1, 1025);


I'm following the example for calling the predict endpoint: :

I am getting this error in Vertex ai:

⨯ Error: 3 INVALID_ARGUMENT: 0 is out of supported range [1, 1025);  for the maximum number of tokens.
    at callErrorFromStatus (webpack-internal:///(rsc)/./node_modules/@grpc/grpc-js/build/src/call.js:31:19)
    at Object.onReceiveStatus (webpack-internal:///(rsc)/./node_modules/@grpc/grpc-js/build/src/client.js:192:76)
    at Object.onReceiveStatus (webpack-internal:///(rsc)/./node_modules/@grpc/grpc-js/build/src/client-interceptors.js:344:141)
    at Object.onReceiveStatus (webpack-internal:///(rsc)/./node_modules/@grpc/grpc-js/build/src/client-interceptors.js:308:181)
    at eval (webpack-internal:///(rsc)/./node_modules/@grpc/grpc-js/build/src/resolving-call.js:94:78)
    at process.processTicksAndRejections (node:internal/process/task_queues:77:11)
for call at
    at ServiceClientImpl.makeUnaryRequest (webpack-internal:///(rsc)/./node_modules/@grpc/grpc-js/build/src/client.js:162:32)
    at ServiceClientImpl.eval (webpack-internal:///(rsc)/./node_modules/@grpc/grpc-js/build/src/make-client.js:103:19)
    at eval (webpack-internal:///(rsc)/./node_modules/@google-cloud/aiplatform/build/src/v1/prediction_service_client.js:241:33)
    at eval (webpack-internal:///(rsc)/./node_modules/google-gax/build/src/normalCalls/timeout.js:42:16)
    at OngoingCallPromise.call (webpack-internal:///(rsc)/./node_modules/google-gax/build/src/call.js:64:27)
    at NormalApiCaller.call (webpack-internal:///(rsc)/./node_modules/google-gax/build/src/normalCalls/normalApiCaller.js:34:19)
    at eval (webpack-internal:///(rsc)/./node_modules/google-gax/build/src/createApiCall.js:75:30)
    at process.processTicksAndRejections (node:internal/process/task_queues:95:5) {
  code: 3,
  details: '0 is out of supported range [1, 1025);  for the maximum number of tokens.',
  metadata: Metadata {
    internalRepr: Map(2) {
      'endpoint-load-metrics-bin' => [Array],
      'grpc-server-stats-bin' => [Array]
    },
    options: {}
  }

Here is my code:

const projectId = process.env.PROJECT_ID;
const zone = process.env.ZONE;

const aiplatform = require('@google-cloud/aiplatform');

const {PredictionServiceClient} = aiplatform.v1 
const {EndpointServiceClient} = aiplatform.v1 
const {helpers} = aiplatform;

const clientOptions = {
  apiEndpoint: 'us-central1-aiplatform.googleapis.com',
};
const publisher = 'google';
const model = 'text-bison@001';

const predictionServiceClient = new PredictionServiceClient(clientOptions);

export async function callPredict() {
  // configure parent resource
    const endpoint = `projects/${projectId}/locations/${zone}/publishers/${publisher}/models/${model}`;
  const prompt = {
    prompt: 
    'Give me ten interview questions for a project manager, comapre with a program manager',
  };
   const instanceValue = helpers.toValue(prompt);
    

  const instances = [instanceValue];



  const request = {
    endpoint: endpoint,
    instances,
 
  };
  const test = await predictionServiceClient.apiEndpoint();
  const response = await predictionServiceClient.predict(request);
  console.log(response);
  console.log(test);
  return response;
}

Solution

  • I encounter the same issue. It seems parameters is required in request.

    const request = {
        endpoint: endpoint,
        instances,
        parameters: {
          temperature: 0.2,
          maxOutputTokens: 256,
      topP: 0.95,
      topK: 40,
        }
    };