【问题标题】:How to conver PVector to PImage kinect tracking for processing如何将 PVector 转换为 PImage kinect 跟踪进行处理
【发布时间】:2020-05-18 09:15:51
【问题描述】:

我正在使用 Daniel Shiffman 教程来创建处理过程中的 kinect 跟踪器运动,在教程中有来自原始深度相机的 PVector(跟随运动)。我想知道是否可以将矢量更改为将跟随运动的 PImage,或者将 PVector 转换为 PImage 的方法。谢谢

这是代码

import org.openkinect.freenect.*;
import org.openkinect.processing.*;

class KinectTracker {

  // Depth threshold
  int threshold = 745;

  // Raw location
  PVector loc;

  // Interpolated location
  PVector lerpedLoc;

  // Depth data
  int[] depth;

  // What we'll show the user
  PImage display;

  KinectTracker() {
    // This is an awkard use of a global variable here
    // But doing it this way for simplicity
    kinect.initDepth();
    kinect.enableMirror(true);
    // Make a blank image
    display = createImage(kinect.width, kinect.height, RGB);
    // Set up the vectors
    loc = new PVector(0, 0);
    lerpedLoc = new PVector(0, 0);
  }

  void track() {
    // Get the raw depth as array of integers
    depth = kinect.getRawDepth();

    // Being overly cautious here
    if (depth == null) return;

    float sumX = 0;
    float sumY = 0;
    float count = 0;

    for (int x = 0; x < kinect.width; x++) {
      for (int y = 0; y < kinect.height; y++) {

        int offset =  x + y*kinect.width;
        // Grabbing the raw depth
        int rawDepth = depth[offset];

        // Testing against threshold
        if (rawDepth < threshold) {
          sumX += x;
          sumY += y;
          count++;
        }
      }
    }
    // As long as we found something
    if (count != 0) {
      loc = new PVector(sumX/count, sumY/count);
    }

    // Interpolating the location, doing it arbitrarily for now
    lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
    lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
  }

  PVector getLerpedPos() {
    return lerpedLoc;
  }

  PVector getPos() {
    return loc;
  }

  void display() {
    PImage img = kinect.getDepthImage();

    // Being overly cautious here
    if (depth == null || img == null) return;

    // Going to rewrite the depth image to show which pixels are in threshold
    // A lot of this is redundant, but this is just for demonstration purposes
    display.loadPixels();
    for (int x = 0; x < kinect.width; x++) {
      for (int y = 0; y < kinect.height; y++) {

        int offset = x + y * kinect.width;
        // Raw depth
        int rawDepth = depth[offset];
        int pix = x + y * display.width;
        if (rawDepth < threshold) {
          // A red color instead
          display.pixels[pix] = color(150, 50, 50);
        } else {
          display.pixels[pix] = img.pixels[offset];
        }
      }
    }
    display.updatePixels();

    // Draw the image
    image(display, 0, 0);
  }

  int getThreshold() {
    return threshold;
  }

  void setThreshold(int t) {
    threshold =  t;
  }
}
KinectTracker tracker;
Kinect kinect;


void setup() {
  size(640, 520);
  kinect = new Kinect(this);
  tracker = new KinectTracker();
}

void draw() {
  background(255);

  // Run the tracking analysis
  tracker.track();
  // Show the image
  tracker.display();

  // Let's draw the raw location
  PVector v1 = tracker.getPos();
  fill(50, 100, 250, 200);
  noStroke();
  ellipse(v1.x, v1.y, 20, 20);

  // Let's draw the "lerped" location
  PVector v2 = tracker.getLerpedPos();
  fill(100, 250, 50, 200);
  noStroke();
  ellipse(v2.x, v2.y, 20, 20);

  // Display some info
  int t = tracker.getThreshold();
  fill(0);
  text("threshold: " + t + "    " +  "framerate: " + int(frameRate) + "    " + 
    "UP increase threshold, DOWN decrease threshold", 10, 500);
}

// Adjust the threshold with key presses
void keyPressed() {
  int t = tracker.getThreshold();
  if (key == CODED) {
    if (keyCode == UP) {
      t+=5;
      tracker.setThreshold(t);
    } else if (keyCode == DOWN) {
      t-=5;
      tracker.setThreshold(t);
    }
  }
}

【问题讨论】:

    标签: java processing kinect tracking


    【解决方案1】:

    PVector 具有 .x.y 属性,这是您使用 image() 在该位置呈现 PImage 所需的全部内容:

    假设imageToDrag 是在整个草图中可用的setup() 中加载/生成的PImage 的实例,您只需在draw() 末尾执行类似的操作:

    image(imageToDrag, v2.x, v2.y);
    

    在上下文中:

    import org.openkinect.freenect.*;
    import org.openkinect.processing.*;
    
    class KinectTracker {
    
      // Depth threshold
      int threshold = 745;
    
      // Raw location
      PVector loc;
    
      // Interpolated location
      PVector lerpedLoc;
    
      // Depth data
      int[] depth;
    
      // What we'll show the user
      PImage display;
    
      KinectTracker() {
        // This is an awkard use of a global variable here
        // But doing it this way for simplicity
        kinect.initDepth();
        kinect.enableMirror(true);
        // Make a blank image
        display = createImage(kinect.width, kinect.height, RGB);
        // Set up the vectors
        loc = new PVector(0, 0);
        lerpedLoc = new PVector(0, 0);
      }
    
      void track() {
        // Get the raw depth as array of integers
        depth = kinect.getRawDepth();
    
        // Being overly cautious here
        if (depth == null) return;
    
        float sumX = 0;
        float sumY = 0;
        float count = 0;
    
        for (int x = 0; x < kinect.width; x++) {
          for (int y = 0; y < kinect.height; y++) {
    
            int offset =  x + y*kinect.width;
            // Grabbing the raw depth
            int rawDepth = depth[offset];
    
            // Testing against threshold
            if (rawDepth < threshold) {
              sumX += x;
              sumY += y;
              count++;
            }
          }
        }
        // As long as we found something
        if (count != 0) {
          loc = new PVector(sumX/count, sumY/count);
        }
    
        // Interpolating the location, doing it arbitrarily for now
        lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
        lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
      }
    
      PVector getLerpedPos() {
        return lerpedLoc;
      }
    
      PVector getPos() {
        return loc;
      }
    
      void display() {
        PImage img = kinect.getDepthImage();
    
        // Being overly cautious here
        if (depth == null || img == null) return;
    
        // Going to rewrite the depth image to show which pixels are in threshold
        // A lot of this is redundant, but this is just for demonstration purposes
        display.loadPixels();
        for (int x = 0; x < kinect.width; x++) {
          for (int y = 0; y < kinect.height; y++) {
    
            int offset = x + y * kinect.width;
            // Raw depth
            int rawDepth = depth[offset];
            int pix = x + y * display.width;
            if (rawDepth < threshold) {
              // A red color instead
              display.pixels[pix] = color(150, 50, 50);
            } else {
              display.pixels[pix] = img.pixels[offset];
            }
          }
        }
        display.updatePixels();
    
        // Draw the image
        image(display, 0, 0);
      }
    
      int getThreshold() {
        return threshold;
      }
    
      void setThreshold(int t) {
        threshold =  t;
      }
    }
    KinectTracker tracker;
    Kinect kinect;
    
    PImage imageToDrag;
    
    void setup() {
      size(640, 520);
      kinect = new Kinect(this);
      tracker = new KinectTracker();
      // generate test image: can swap this to loadImage("yourAwesomeImageHere.png");
      imageToDrag = getTestImage(128,128,RGB);
    }
    
    void draw() {
      background(255);
    
      // Run the tracking analysis
      tracker.track();
      // Show the image
      tracker.display();
    
      // Let's draw the raw location
      PVector v1 = tracker.getPos();
      fill(50, 100, 250, 200);
      noStroke();
      ellipse(v1.x, v1.y, 20, 20);
    
      // Let's draw the "lerped" location
      PVector v2 = tracker.getLerpedPos();
      fill(100, 250, 50, 200);
      noStroke();
      ellipse(v2.x, v2.y, 20, 20);
    
      // draw an image at the "lerped" location
      image(imageToDrag, v2.x, v2.y);
    
      // Display some info
      int t = tracker.getThreshold();
      fill(0);
      text("threshold: " + t + "    " +  "framerate: " + int(frameRate) + "    " + 
        "UP increase threshold, DOWN decrease threshold", 10, 500);
    }
    
    // Adjust the threshold with key presses
    void keyPressed() {
      int t = tracker.getThreshold();
      if (key == CODED) {
        if (keyCode == UP) {
          t+=5;
          tracker.setThreshold(t);
        } else if (keyCode == DOWN) {
          t-=5;
          tracker.setThreshold(t);
        }
      }
    }
    
    PImage getTestImage(int w, int h, int type){
      PImage out = createImage(w,h,type);
    
      for(int i = 0 ; i < out.pixels.length; i++){
        int x = i % w;
        int y = i / w;
        int z = (w - (x+y));
        out.pixels[i] = color(map(x,0,w,0,255),
                             map(y,0,w,0,255),
                             map(z,0,w,0,255));
      }
      out.updatePixels();
    
      return out;
    }
    

    【讨论】:

      猜你喜欢
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 2022-07-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      相关资源
      最近更新 更多