'javascript - await promise stucks
I am implementing google video person detection .
I completely followed their docs and implemented same script they provided
But my code is not passing by this line:
const results = await operation.promise();
On terminal I get :
A
B
C
and thats it. some how code is not being processed after this line: const results = await operation.promise();
Here is my complete code script:
'use strict';
function main(path = 'my-file.mp4') {
  
  // [START video_detect_person]
  /**
   * TODO(developer): Uncomment these variables before running the sample.
   */
  // const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4';
  // Imports the Google Cloud Video Intelligence library + Node's fs library
  const Video = require('@google-cloud/video-intelligence').v1;
  const fs = require('fs');
  // Creates a client
  const video = new Video.VideoIntelligenceServiceClient();
  /**
   * TODO(developer): Uncomment the following line before running the sample.
   */
  // const path = 'Local file to analyze, e.g. ./my-file.mp4';
  // Reads a local video file and converts it to base64
  const file = fs.readFileSync(path);
  const inputContent = file.toString('base64');
  async function detectPerson() {
    console.log("A");
    
    const request = {
      inputContent: inputContent,
      features: ['PERSON_DETECTION'],
      videoContext: {
        personDetectionConfig: {
          // Must set includeBoundingBoxes to true to get poses and attributes.
          includeBoundingBoxes: true,
          includePoseLandmarks: true,
          includeAttributes: true,
        },
      },
    };
    // Detects faces in a video
    // We get the first result because we only process 1 video
    console.log("B");
    const [operation] = await video.annotateVideo(request);
    
    console.log("C");
    const results = await operation.promise();
    console.log("D");
    console.log('Waiting for operation to complete...');
    // Gets annotations for video
    const personAnnotations =
      results[0].annotationResults[0].personDetectionAnnotations;
    for (const {tracks} of personAnnotations) {
      console.log('Person detected:');
      for (const {segment, timestampedObjects} of tracks) {
        console.log(
          `\tStart: ${segment.startTimeOffset.seconds}` +
            `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s`
        );
        console.log(
          `\tEnd: ${segment.endTimeOffset.seconds}.` +
            `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s`
        );
        // Each segment includes timestamped objects that
        // include characteristic--e.g. clothes, posture
        // of the person detected.
        const [firstTimestampedObject] = timestampedObjects;
        // Attributes include unique pieces of clothing, poses (i.e., body
        // landmarks) of the person detected.
        for (const {name, value} of firstTimestampedObject.attributes) {
          console.log(`\tAttribute: ${name}; Value: ${value}`);
        }
        // Landmarks in person detection include body parts.
        for (const {name, point} of firstTimestampedObject.landmarks) {
          console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`);
        }
      }
    }
  }
  detectPerson(); 
  // [END video_detect_person]
}
main(...process.argv.slice(2));
							
						Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source | 
|---|
