【发布时间】:2016-04-12 15:50:37
【问题描述】:
我正在开发一个人们可以在视频上叠加水印的应用程序,到目前为止我已经能够成功地做到这一点。但是,在我第一次使用自定义叠加保存视频后,如果我再次尝试保存视频,它会失败并出现上述错误:
可选(错误域=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedDescription=Operation Stopped, NSLocalizedFailureReason=无法合成视频。})
以下是我保存视频的代码:
@IBAction func saveVideo(sender: AnyObject) {
self.videoAsset = AVAsset(URL: fileURL as NSURL!)
// Create Video track (Video + Audio)
let videoTrack: AVMutableCompositionTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioTrack:AVMutableCompositionTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.videoAsset.duration), ofTrack: self.videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0] , atTime: kCMTimeZero)
try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.videoAsset.duration), ofTrack: self.videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0] , atTime: kCMTimeZero)
print("Inserted time ranges just fine\n")
} catch let error as NSError {
print("Failed to insert video/audio tracks!!!!\n")
print(error.localizedDescription)
}
videoLayerIntruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let videoAssetTrack: AVAssetTrack = self.videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
var videoAssetOrientation_: UIImageOrientation = .Up
var isVideoAssetPortrait_: Bool = false
let videoTransform:CGAffineTransform = videoAssetTrack.preferredTransform
if videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0 {
videoAssetOrientation_ = .Right
isVideoAssetPortrait_ = true
}
if videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0 {
videoAssetOrientation_ = .Left
isVideoAssetPortrait_ = true
}
if videoTransform.a == 1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == 1.0 {
videoAssetOrientation_ = .Up
}
if videoTransform.a == -1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == -1.0 {
videoAssetOrientation_ = .Down
}
videoLayerIntruction.setTransform(videoAssetTrack.preferredTransform, atTime: kCMTimeZero)
mainInstruction.layerInstructions = [videoLayerIntruction]
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, self.videoAsset.duration)
var naturalSize = CGSize()
if isVideoAssetPortrait_ {
naturalSize = CGSizeMake(videoAssetTrack.naturalSize.height, videoAssetTrack.naturalSize.width)
} else {
naturalSize = videoAssetTrack.naturalSize
}
renderWidth = naturalSize.width
renderHeight = naturalSize.height
parentLayer.frame = CGRectMake(0, 0, renderWidth, renderHeight)
parentLayer.geometryFlipped = true
parentLayer.anchorPoint = CGPointMake(0.5, 0.5)
videoLayer.frame = CGRectMake(0, 0, renderWidth, renderHeight)
self.overlayLayer.frame = CGRectMake(self.renderWidth, self.renderHeight, self.newRatioWidth, self.newRatioHeight)
self.overlayLayer.addAnimation(self.animation, forKey: "contents")
self.overlayLayer.anchorPoint = CGPointMake(0.5, 0.5)
self.overlayLayer.contentsGravity = kCAGravityResizeAspect
parentLayer.addSublayer(videoLayer)
if addedOverlay == true {
parentLayer.addSublayer(overlayLayer)
}
mainCompositionInst.renderScale = 1.0
mainCompositionInst.renderSize = CGSizeMake(renderWidth, renderHeight)
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, inLayer: parentLayer)
outputURL = NSURL(fileURLWithPath: NSTemporaryDirectory()).URLByAppendingPathComponent("CreatedVideo-\(NSUUID().UUIDString).mov")
let exporter: AVAssetExportSession = AVAssetExportSession(asset: videoAsset, presetName: AVAssetExportPresetHighestQuality)!
exporter.outputURL = outputURL
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.shouldOptimizeForNetworkUse = false
exporter.videoComposition = mainCompositionInst
exporter.exportAsynchronouslyWithCompletionHandler({
dispatch_async(dispatch_get_main_queue(), {
self.exportVideo(exporter)
switch exporter.status{
case AVAssetExportSessionStatus.Failed:
print("FAILED EXPORT - \(exporter.error)\n")
case AVAssetExportSessionStatus.Cancelled:
print("canceled \(exporter.error)\n")
default:
print("COMPLETED EXPORT\n")
}
})
})
}
func exportVideo(sender: AVAssetExportSession) {
print("Asked to export\n")
PHPhotoLibrary.sharedPhotoLibrary().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideoAtFileURL(sender.outputURL!)
}, completionHandler: { success, error in
if success {
print("Success! Finished saving video.")
} else {
print("ERROR - " + (error?.localizedDescription)!)
}
})
}
有谁知道第一次成功工作后导致组合开始失败的原因是什么?希望有任何建议!
【问题讨论】:
-
这听起来像是您正在寻找的答案:stackoverflow.com/a/31146867/1638273
-
我怎么没看到这个。让我质疑我的理智和搜索事物的能力......非常感谢!
-
@riverhawk 我面临同样的问题。 @SeanLintern88 提供的解决方案与您所做的相同:
videoLayerIntruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)那是如何解决问题的?
标签: ios swift video avassetexportsession