【问题标题】:Rendering a SceneKit scene to video output将 SceneKit 场景渲染到视频输出
【发布时间】:2015-05-17 14:26:19
【问题描述】:

作为主要的高级/iOS 开发人员,我对将 SceneKit 用于动画项目很感兴趣。

几个月来我一直很喜欢 SceneKit,尽管它显然是为“实时”交互而设计的,但我发现能够将 SKScene“渲染”到视频中非常有用。目前,我一直在使用 Quicktime 的屏幕录像机来捕捉视频输出,但是(当然)这样做会降低帧率。是否有替代方案可以让场景按照自己的节奏进行渲染并输出为流畅的视频文件?

我知道这不太可能......只是想我会问,以防我错过了一些较低级别的东西!

【问题讨论】:

  • 您使用了哪个选项?您是否能够找到一些也可以渲染视频的东西,就好像从移动的相机节点拍摄一样?

标签: ios macos video rendering scenekit


【解决方案1】:

您可以使用 SCNRenderer 渲染到屏幕外的 CGImage,然后使用 AVFoundation 将 CGImage 添加到视频流。

我编写了这个用于渲染成 CGImage 的 Swift 扩展。

public extension SCNRenderer {

    public func renderToImageSize(size: CGSize, floatComponents: Bool, atTime time: NSTimeInterval) -> CGImage? {

        var thumbnailCGImage: CGImage?

        let width = GLsizei(size.width), height = GLsizei(size.height)
        let samplesPerPixel = 4

        #if os(iOS)
            let oldGLContext = EAGLContext.currentContext()
            let glContext = unsafeBitCast(context, EAGLContext.self)

            EAGLContext.setCurrentContext(glContext)
            objc_sync_enter(glContext)
        #elseif os(OSX)
            let oldGLContext = CGLGetCurrentContext()
            let glContext = unsafeBitCast(context, CGLContextObj.self)

            CGLSetCurrentContext(glContext)
            CGLLockContext(glContext)
        #endif

        // set up the OpenGL buffers
        var thumbnailFramebuffer: GLuint = 0
        glGenFramebuffers(1, &thumbnailFramebuffer)
        glBindFramebuffer(GLenum(GL_FRAMEBUFFER), thumbnailFramebuffer); checkGLErrors()

        var colorRenderbuffer: GLuint = 0
        glGenRenderbuffers(1, &colorRenderbuffer)
        glBindRenderbuffer(GLenum(GL_RENDERBUFFER), colorRenderbuffer)
        if floatComponents {
            glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA16F), width, height)
        } else {
            glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA8), width, height)
        }
        glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0), GLenum(GL_RENDERBUFFER), colorRenderbuffer); checkGLErrors()

        var depthRenderbuffer: GLuint = 0
        glGenRenderbuffers(1, &depthRenderbuffer)
        glBindRenderbuffer(GLenum(GL_RENDERBUFFER), depthRenderbuffer)
        glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_DEPTH_COMPONENT24), width, height)
        glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_DEPTH_ATTACHMENT), GLenum(GL_RENDERBUFFER), depthRenderbuffer); checkGLErrors()

        let framebufferStatus = Int32(glCheckFramebufferStatus(GLenum(GL_FRAMEBUFFER)))
        assert(framebufferStatus == GL_FRAMEBUFFER_COMPLETE)
        if framebufferStatus != GL_FRAMEBUFFER_COMPLETE {
            return nil
        }

        // clear buffer
        glViewport(0, 0, width, height)
        glClear(GLbitfield(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)); checkGLErrors()

        // render
        renderAtTime(time); checkGLErrors()

        // create the image
        if floatComponents { // float components (16-bits of actual precision)

            // slurp bytes out of OpenGL
            typealias ComponentType = Float

            var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0)
            glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_FLOAT), &imageRawBuffer)

            // flip image vertically — OpenGL has a different 'up' than CoreGraphics
            let rowLength = Int(width) * samplesPerPixel
            for rowIndex in 0..<(Int(height) / 2) {
                let baseIndex = rowIndex * rowLength
                let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength

                swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)])
            }

            // make the CGImage
            var imageBuffer = vImage_Buffer(
                data: UnsafeMutablePointer<Float>(imageRawBuffer),
                height: vImagePixelCount(height),
                width: vImagePixelCount(width),
                rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel)

            var format = vImage_CGImageFormat(
                bitsPerComponent: UInt32(sizeof(ComponentType) * 8),
                bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8),
                colorSpace: nil, // defaults to sRGB
                bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue | CGBitmapInfo.FloatComponents.rawValue),
                version: UInt32(0),
                decode: nil,
                renderingIntent: kCGRenderingIntentDefault)

            var error: vImage_Error = 0
            thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue()

        } else { // byte components

            // slurp bytes out of OpenGL
            typealias ComponentType = UInt8

            var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0)
            glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_UNSIGNED_BYTE), &imageRawBuffer)

            // flip image vertically — OpenGL has a different 'up' than CoreGraphics
            let rowLength = Int(width) * samplesPerPixel
            for rowIndex in 0..<(Int(height) / 2) {
                let baseIndex = rowIndex * rowLength
                let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength

                swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)])
            }

            // make the CGImage
            var imageBuffer = vImage_Buffer(
                data: UnsafeMutablePointer<Float>(imageRawBuffer),
                height: vImagePixelCount(height),
                width: vImagePixelCount(width),
                rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel)

            var format = vImage_CGImageFormat(
                bitsPerComponent: UInt32(sizeof(ComponentType) * 8),
                bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8),
                colorSpace: nil, // defaults to sRGB
                bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue),
                version: UInt32(0),
                decode: nil,
                renderingIntent: kCGRenderingIntentDefault)

            var error: vImage_Error = 0
            thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue()
        }

        #if os(iOS)
            objc_sync_exit(glContext)
            if oldGLContext != nil {
                EAGLContext.setCurrentContext(oldGLContext)
            }
        #elseif os(OSX)
            CGLUnlockContext(glContext)
            if oldGLContext != nil {
                CGLSetCurrentContext(oldGLContext)
            }
        #endif

        return thumbnailCGImage
    }
}


func checkGLErrors() {
    var glError: GLenum
    var hadError = false
    do {
        glError = glGetError()
        if glError != 0 {
            println(String(format: "OpenGL error %#x", glError))
            hadError = true
        }
    } while glError != 0
    assert(!hadError)
}

【讨论】:

  • 这看起来太棒了!如果我们想像从移动的相机节点一样捕捉视频,这会起作用吗?换句话说,目标是模拟相机节点探索 SCNView 的视频。
  • 是的,这是从渲染器的当前 scene.pointOfView 节点渲染的,因此您可以移动该节点和/或将其 SCNCamera 在帧之间更改为动画。
  • 谢谢@WilShipley!如果你不介意,想通过电子邮件问你一些事情。联系您的最佳方式是什么?
  • wjs 在delicious-monster dot com
【解决方案2】:

** 这是使用 Metal 的 SceneKit 的答案。

** 警告:这可能不是 App Store 的正确方法。但它正在工作。

第1步:使用swizzling将CAMetalLayer的nextDrawable方法换成新的。 为每个渲染循环保存 CAMetalDrawable。

extension CAMetalLayer {
  public static func setupSwizzling() {
    struct Static {
      static var token: dispatch_once_t = 0
    }

    dispatch_once(&Static.token) {
      let copiedOriginalSelector = #selector(CAMetalLayer.orginalNextDrawable)
      let originalSelector = #selector(CAMetalLayer.nextDrawable)
      let swizzledSelector = #selector(CAMetalLayer.newNextDrawable)

      let copiedOriginalMethod = class_getInstanceMethod(self, copiedOriginalSelector)
      let originalMethod = class_getInstanceMethod(self, originalSelector)
      let swizzledMethod = class_getInstanceMethod(self, swizzledSelector)

      let oldImp = method_getImplementation(originalMethod)
      method_setImplementation(copiedOriginalMethod, oldImp)
      method_exchangeImplementations(originalMethod, swizzledMethod)
    }
  }


  func newNextDrawable() -> CAMetalDrawable? {
    let drawable = orginalNextDrawable()
    // Save the drawable to any where you want
    AppManager.sharedInstance.currentSceneDrawable = drawable
    return drawable
  }

  func orginalNextDrawable() -> CAMetalDrawable? {
    // This is just a placeholder. Implementation will be replaced with nextDrawable.
    return nil
  }
}

第 2 步: 在 AppDelegate 中设置 swizzling:didFinishLaunchingWithOptions

func application(application: UIApplication, didFinishLaunchingWithOptions launchOptions: [NSObject: AnyObject]?) -> Bool {
  CAMetalLayer.setupSwizzling()
  return true
}

第 3 步: 为您的 SCNView 的 CAMetalLayer 禁用 framebufferOnly(以便为 MTLTexture 调用 getBytes)

if let metalLayer = scnView.layer as? CAMetalLayer {
  metalLayer.framebufferOnly = false
}

第 4 步: 在您的 SCNView 的委托 (SCNSceneRendererDelegate) 中,使用纹理

func renderer(renderer: SCNSceneRenderer, didRenderScene scene: SCNScene, atTime time: NSTimeInterval) {
    if let texture = AppManager.sharedInstance.currentSceneDrawable?.texture where !texture.framebufferOnly {
      AppManager.sharedInstance.currentSceneDrawable = nil
      // Get image from texture
      let image = texture.toImage()
      // Use the image for video recording
    }
}

extension MTLTexture {
  func bytes() -> UnsafeMutablePointer<Void> {
    let width = self.width
    let height   = self.height
    let rowBytes = self.width * 4
    let p = malloc(width * height * 4) //Beware for memory leak
    self.getBytes(p, bytesPerRow: rowBytes, fromRegion: MTLRegionMake2D(0, 0, width, height), mipmapLevel: 0)
    return p
  }

  func toImage() -> UIImage? {
    var uiImage: UIImage?
    let p = bytes()
    let pColorSpace = CGColorSpaceCreateDeviceRGB()
    let rawBitmapInfo = CGImageAlphaInfo.NoneSkipFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue
    let bitmapInfo:CGBitmapInfo = CGBitmapInfo(rawValue: rawBitmapInfo)

    let selftureSize = self.width * self.height * 4
    let rowBytes = self.width * 4
    let provider = CGDataProviderCreateWithData(nil, p, selftureSize, {_,_,_ in })!

    if let cgImage = CGImageCreate(self.width, self.height, 8, 32, rowBytes, pColorSpace, bitmapInfo, provider, nil, true, CGColorRenderingIntent.RenderingIntentDefault) {
      uiImage = UIImage(CGImage: cgImage)
    }
    return uiImage
  }

  func toImageAsJpeg(compressionQuality: CGFloat) -> UIImage? {
  }
}

第 5 步(可选): 您可能需要在 CAMetalLayer 确认您获得的可绘制对象是您的目标。 (如果同时有多个 CAMetalLayer)

【讨论】:

  • 它在我的一些设备上完美运行。但在其中一些上,它会在 SceneKit 框架内崩溃,并显示消息“在 RenderPassDescriptor 中设置渲染目标或设置 renderTargetWidth 和 renderTargetHeight。”。但似乎您的应用程序 Facemous 能够生成正确的视频。你能帮我解决这个问题吗?我可以为您创建一个示例。
【解决方案3】:

其实很简单!这是我将如何做的伪代码(在 SCNView 上):

int numberOfFrames = 300;
int currentFrame = 0;
int framesPerSecond = 30;

-(void) renderAFrame{
    [self renderAtTime:1/framesPerSecond];

    NSImage *frame = [self snapshot];

    // save the image with the frame number in the name such as f_001.png

    currentFrame++;

    if(currentFrame < numberOfFrames){
        [self renderAFrame];
    }

}

它将以每秒 30 帧的速度为您输出一系列图像,您可以将其导入任何编辑软件并转换为视频。

【讨论】:

  • 这对你有用吗?我们尝试了类似的方法,但改变帧之间的场景(例如,放置节点或移动相机)与屏幕截图/快照不同步,这意味着您将创建一个块但它不会反映在下一个快照中。
【解决方案4】:

您可以使用放入 SKScene 中的 SKVideoNode 来执行此操作,该 SKScene 用于映射为 SCNode 的 SCMaterial.Diffuse.Content(希望这很清楚;))

player = AVPlayer(URL: fileURL!)
let videoSpriteKitNodeLeft = SKVideoNode(AVPlayer: player)
let videoNodeLeft = SCNNode()
let spriteKitScene1 = SKScene(size: CGSize(width: 1280 * screenScale, height: 1280 * screenScale))
spriteKitScene1.shouldRasterize = true

videoNodeLeft.geometry = SCNSphere(radius: 30)
spriteKitScene1.scaleMode = .AspectFit
videoSpriteKitNodeLeft.position = CGPoint(
  x: spriteKitScene1.size.width / 2.0, y: spriteKitScene1.size.height / 2.0)
videoSpriteKitNodeLeft.size = spriteKitScene1.size

spriteKitScene1.addChild(videoSpriteKitNodeLeft)

videoNodeLeft.geometry?.firstMaterial?.diffuse.contents = spriteKitScene1
videoNodeLeft.geometry?.firstMaterial?.doubleSided = true

// Flip video upside down, so that it's shown in the right position
var transform = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0)
transform = SCNMatrix4Translate(transform, 1.0, 1.0, 0.0)

videoNodeLeft.pivot = SCNMatrix4MakeRotation(Float(M_PI_2), 0.0, -1.0, 0.0)
videoNodeLeft.geometry?.firstMaterial?.diffuse.contentsTransform = transform

videoNodeLeft.position = SCNVector3(x: 0, y: 0, z: 0)

scene.rootNode.addChildNode(videoNodeLeft)

我从我的一个 github 项目中提取了代码,用于使用 SceneKit 在 3D Sphere 中播放视频的 360 视频播放器:https://github.com/Aralekk/simple360player_iOS/blob/master/simple360player/ViewController.swift

我希望这会有所帮助!

亚瑟

【讨论】:

    猜你喜欢
    • 2016-01-03
    • 1970-01-01
    • 1970-01-01
    • 1970-01-01
    • 2014-01-07
    • 1970-01-01
    • 2015-10-25
    • 1970-01-01
    • 2017-11-18
    相关资源
    最近更新 更多