2016-06-27 50 views
12

Sto provando a combinare un singolo video con una singola immagine. Questo non sta cercando di combinare più immagini in un unico video comeCome unire * Immagine singola * con un video

Sto usando AVMutableComposition di combinare le tracce . La mia app ha la capacità di combinare video e immagini (ma così com'è, la combinazione dei video va bene!) Tento di usare AVAssetWriter per trasformare una singola immagine in un video (credo che questo sia il mio problema ma non sicuro al 100%). Quindi l'ho salvato nell'app (documents directory). Da lì, accedo alla mia fusione e unisco un video e l'immagine che ora è stata trasformata nel video.

flusso: seleziona

utente immagine ->

immagine in AVAssetWriter cambiare al video ->

Unire un video che ho già preimpostato con il video ->

Risultato: crea 1 video dall'immagine selezionata e il video preimpostato.

Il problema con quello che ho: il mio codice fornisce uno spazio vuoto dove dovrebbe essere l'immagine all'interno del video. Come in, il file ImageConverter che ho, lo convertirà in video, ma vedrò solo l'ULTIMO frame come immagine, mentre ogni altro frame è trasparente, come se l'immagine non fosse lì. Quindi se converto l'immagine in un video per 5 secondi (diciamo a 30 fotogrammi/sec), vedrò uno spazio vuoto per (30 * 5) -1 fotogrammi e poi quell'ultimo fotogramma, l'immagine apparirà finalmente. Sto solo cercando indicazioni su come realizzare una singola immagine in un video O unire un video e un'immagine insieme SENZA convertire l'immagine in un video. Grazie!

unire i file qui

func merge() { 
    if let firstAsset = controller.firstAsset, secondAsset = self.asset { 

     // 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances. 
     let mixComposition = AVMutableComposition() 

     let firstTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, 
                    preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) 
     do { 
      try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, CMTime(seconds: 8, preferredTimescale: 600)), 
              ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] , 
              atTime: kCMTimeZero) 
     } catch _ { 
      print("Failed to load first track") 
     } 

     do { 
      //HERE THE TIME IS 0.666667, BUT SHOULD BE 0 
      print(CMTimeGetSeconds(secondAsset.duration), CMTimeGetSeconds(firstTrack.timeRange.duration)) 
      try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAsset.duration), 
              ofTrack: secondAsset.tracksWithMediaType(AVMediaTypeVideo)[0], 
              atTime: firstTrack.timeRange.duration) 
     } catch _ { 
      print("Failed to load second track") 
     } 
     do { 
      try firstTrack.insertTimeRange(CMTimeRangeMake(CMTime(seconds: 8+CMTimeGetSeconds(secondAsset.duration), preferredTimescale: 600), firstAsset.duration), 
              ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] , 
              atTime: firstTrack.timeRange.duration+secondTrack.timeRange.duration) 
     } catch _ { 
      print("failed") 
     } 

     // 3 - Audio track 
     if let loadedAudioAsset = controller.audioAsset { 
      let audioTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: 0) 
      do { 
       try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration), 
               ofTrack: loadedAudioAsset.tracksWithMediaType(AVMediaTypeAudio)[0] , 
               atTime: kCMTimeZero) 
      } catch _ { 
       print("Failed to load Audio track") 
      } 
     } 

     // 4 - Get path 
     let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0] 
     let dateFormatter = NSDateFormatter() 
     dateFormatter.dateStyle = .LongStyle 
     dateFormatter.timeStyle = .ShortStyle 
     let date = dateFormatter.stringFromDate(NSDate()) 
     let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo.mov") 
     let url = NSURL(fileURLWithPath: savePath) 
     _ = try? NSFileManager().removeItemAtURL(url) 

     // 5 - Create Exporter 
     print("exporting") 
     guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return } 
     exporter.outputURL = url 
     exporter.outputFileType = AVFileTypeQuickTimeMovie 
     exporter.shouldOptimizeForNetworkUse = false 
     exporter.videoComposition = mainComposition 

     // 6 - Perform the Export 
     controller.currentlyEditing = true 
     exporter.exportAsynchronouslyWithCompletionHandler() { 
      dispatch_async(dispatch_get_main_queue()) { _ in 
       print("done") 
       self.controller.currentlyEditing = false 
       self.controller.merged = true 
       self.button.blurView.superview?.hidden = true 
       self.controller.player.replaceCurrentItemWithPlayerItem(AVPlayerItem(URL: url)) 
       self.controller.firstAsset = AVAsset(URL: url) 
      } 
     } 
    } 
} 
func exportDidFinish(session: AVAssetExportSession) { 
    if session.status == AVAssetExportSessionStatus.Failed { 
     print(session.error) 
    } 
    if session.status == AVAssetExportSessionStatus.Completed { 
     print("succed") 
    } 
} 

Converti immagine Qui

class MyConverter: NSObject { 

    var image:UIImage! 

    convenience init(image:UIImage) { 
     self.init() 
     self.image = image 
    } 

    var outputURL: NSURL { 
     let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0] 
     let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo-pic.mov") 
     return getURL(savePath) 
    } 

    func getURL(path:String) -> NSURL { 
     let movieDestinationUrl = NSURL(fileURLWithPath: path) 
     _ = try? NSFileManager().removeItemAtURL(movieDestinationUrl) 
     let url = NSURL(fileURLWithPath: path) 
     return url 
    } 

    func build(completion:() -> Void) { 
     guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else { 
      fatalError("AVAssetWriter error") 
     } 
     let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(image.size.width)), AVVideoHeightKey : NSNumber(float: Float(image.size.height))] 

     guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else { 
      fatalError("Negative : Can't apply the Output settings...") 
     } 

     let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings) 
     let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(image.size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(image.size.height))] 
     let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary) 

     if videoWriter.canAddInput(videoWriterInput) { 
      videoWriter.addInput(videoWriterInput) 
     } 

     if videoWriter.startWriting() { 
      videoWriter.startSessionAtSourceTime(kCMTimeZero) 
      assert(pixelBufferAdaptor.pixelBufferPool != nil) 
     } 

     let media_queue = dispatch_queue_create("mediaInputQueue", nil) 

     videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: {() -> Void in 
      var appendSucceeded = true 
      //Time HERE IS ZERO, but in Merge file, it is 0.66667 
      let presentationTime = CMTimeMake(0, 600) 

      var pixelBuffer: CVPixelBuffer? = nil 
      let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer) 

      if let pixelBuffer = pixelBuffer where status == 0 { 
       let managedPixelBuffer = pixelBuffer 
      CVPixelBufferLockBaseAddress(managedPixelBuffer, 0) 

       let data = CVPixelBufferGetBaseAddress(managedPixelBuffer) 
       let rgbColorSpace = CGColorSpaceCreateDeviceRGB() 
       let context = CGBitmapContextCreate(data, Int(self.image.size.width), Int(self.image.size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue) 

       CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.image.size.width), CGFloat(self.image.size.height))) 


       CGContextDrawImage(context, CGRectMake(0, 0, self.image.size.width, self.image.size.height), self.image.CGImage) 

       CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0) 

       appendSucceeded =  pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime) 
      } else { 
       print("Failed to allocate pixel buffer") 
       appendSucceeded = false 
      } 
      if !appendSucceeded { 
       print("append failed") 
      } 
      videoWriterInput.markAsFinished() 
      videoWriter.finishWritingWithCompletionHandler {() -> Void in 
       print("FINISHED!!!!!") 
       completion() 
      } 
     }) 
    } 
} 

Nota: ho scoperto che se faccio un print(presentationTime) all'interno del ImageConverter la stampa 0, e quindi stampare l'ora della durata all'interno della fusione, ottengo 0.666667

Nota: Nessuna risposta ancora, ma continuerò a rendere questa domanda una taglia finché non trovo una risposta o qualcun altro mi aiuta! Grazie!

risposta

3

Giusto, quindi ho effettivamente affrontato questo problema qualche tempo fa. Il problema è in realtà con il modo in cui stai creando il video dall'immagine. Quello che devi fare è aggiungere il buffer dei pixel al tempo zero e poi AGAIN alla fine, altrimenti finirai con un video vuoto fino all'ultimo fotogramma, come tu stai vivendo.

Il seguente codice sarà il mio miglior tentativo di aggiornare il codice. Alla fine pubblicherò la mia soluzione che si trova in Objective-C nel caso in cui aiuti qualcun altro.

func build(completion:() -> Void) { 
    guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else { 
     fatalError("AVAssetWriter error") 
    } 

    // This might not be a problem for you but width HAS to be divisible by 16 or the movie will come out distorted... don't ask me why. So this is a safeguard 
    let pixelsToRemove: Double = fmod(image.size.width, 16) 
    let pixelsToAdd: Double = 16 - pixelsToRemove 
    let size: CGSize = CGSizeMake(image.size.width + pixelsToAdd, image.size.height) 

    let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(size.width)), AVVideoHeightKey : NSNumber(float: Float(size.height))] 

    guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else { 
     fatalError("Negative : Can't apply the Output settings...") 
    } 

    let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings) 
    let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(size.height))] 
    let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary) 

    if videoWriter.canAddInput(videoWriterInput) { 
     videoWriter.addInput(videoWriterInput) 
    } 

    if videoWriter.startWriting() { 
     videoWriter.startSessionAtSourceTime(kCMTimeZero) 
     assert(pixelBufferAdaptor.pixelBufferPool != nil) 
    } 

    // For simplicity, I'm going to remove the media queue you created and instead explicitly wait until I can append since i am only writing one pixel buffer at two different times 

    var pixelBufferCreated = true 
    var pixelBuffer: CVPixelBuffer? = nil 
    let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer) 

    if let pixelBuffer = pixelBuffer where status == 0 { 
     let managedPixelBuffer = pixelBuffer 
     CVPixelBufferLockBaseAddress(managedPixelBuffer, 0) 

     let data = CVPixelBufferGetBaseAddress(managedPixelBuffer) 
     let rgbColorSpace = CGColorSpaceCreateDeviceRGB() 
     let context = CGBitmapContextCreate(data, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue) 

     CGContextClearRect(context, CGRectMake(0, 0, CGFloat(size.width), CGFloat(size.height))) 

     CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.image.CGImage) 

     CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0) 
    } else { 
     print("Failed to allocate pixel buffer") 
     pixelBufferCreated = false 
    } 

    if (pixelBufferCreated) { 
     // Here is where the magic happens, we have our pixelBuffer it's time to start writing 

     // FIRST - add at time zero 
     var appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: kCMTimeZero]; 
     if (!appendSucceeded) { 
      // something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though 
     } 
     // SECOND - wait until the writer is ready for more data with an empty while 
     while !writerInput.readyForMoreMediaData {} 

     // THIRD - make a CMTime with the desired length of your picture-video. I am going to arbitrarily make it 5 seconds here 
     let frameTime: CMTime = CMTimeMake(5, 1) // 5 seconds 

     // FOURTH - add the same exact pixel to the end of the video you are creating 
     appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: frameTime]; 
     if (!appendSucceeded) { 
      // something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though 
     } 

     videoWriterInput.markAsFinished() { 
      videoWriter.endSessionAtSourceTime(frameTime) 
     } 
     videoWriter.finishWritingWithCompletionHandler {() -> Void in 
      if videoWriter.status != .Completed { 
       // Error writing the video... handle appropriately 
      } else { 
       print("FINISHED!!!!!") 
       completion() 
      } 
     } 
    } 
} 

come sono riuscito a fare questo in Obj-C

Nota: ho dovuto fare alcune modifiche per rendere questo standalone, quindi questo metodo restituirà una stringa contenente il percorso del video SARÀ scritto a. Viene restituito prima che il video scrittura termina quindi può essere possibile accedervi prima che sia pronto se non si sta attenti

-(NSString *)makeMovieFromImageData:(NSData *)imageData { 
    NSError *error; 
    UIImage *image = [UIImage imageWithData:imageData]; 


    // width has to be divisible by 16 or the movie comes out distorted... don't ask me why 
    double pixelsToRemove = fmod(image.size.width, 16); 

    double pixelsToAdd = 16 - pixelsToRemove; 

    CGSize size = CGSizeMake(image.size.width+pixelsToAdd, image.size.height); 

    BOOL hasFoundValidPath = NO; 
    NSURL *tempFileURL; 
    NSString *outputFile; 

    while (!hasFoundValidPath) { 

     NSString *guid = [[NSUUID new] UUIDString]; 
     outputFile = [NSString stringWithFormat:@"picture_%@.mp4", guid]; 

     NSString *outputDirectory = [NSSearchPathForDirectoriesInDomains(NSTemporaryDirectory, NSUserDomainMask, YES) objectAtIndex:0]; 

     NSString *tempPath = [outputDirectory stringByAppendingPathComponent:outputFile]; 

     // Will fail if destination already has a file 
     if ([[NSFileManager defaultManager] fileExistsAtPath:tempPath]) { 
      continue; 
     } else { 
      hasFoundValidPath = YES; 
     } 
     tempFileURL = [NSURL fileURLWithPath:tempPath]; 
    } 


    // Start writing 
    AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:tempFileURL 
                  fileType:AVFileTypeQuickTimeMovie 
                   error:&error]; 

    if (error) { 
     // handle error 
    } 

    NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys: 
            AVVideoCodecH264, AVVideoCodecKey, 
            [NSNumber numberWithInt:size.width], AVVideoWidthKey, 
            [NSNumber numberWithInt:size.height], AVVideoHeightKey, 
            nil]; 

    AVAssetWriterInput* writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo 
                     outputSettings:videoSettings]; 

    NSDictionary *bufferAttributes = [NSDictionary dictionaryWithObjectsAndKeys: 
             [NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil]; 

    AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput 
                                sourcePixelBufferAttributes:bufferAttributes]; 
    if ([videoWriter canAddInput:writerInput]) { 
     [videoWriter addInput:writerInput]; 
    } else { 
     // handle error 
    } 

    [videoWriter startWriting]; 

    [videoWriter startSessionAtSourceTime:kCMTimeZero]; 

    CGImageRef img = [image CGImage]; 

    // Now I am going to create the bixelBuffer 
    NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys: 
          [NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey, 
          [NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, 
          nil]; 
    CVPixelBufferRef buffer = NULL; 

    CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width, 
              size.height, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options, 
              &pxbuffer); 

    if (!(status == kCVReturnSuccess && pxbuffer != NULL)) { 
     NSLog(@"There be some issue. We didn't get a buffer from the image"); 
    } 


    CVPixelBufferLockBaseAddress(buffer, 0); 
    void *pxdata = CVPixelBufferGetBaseAddress(buffer); 

    CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB(); 

    CGContextRef context = CGBitmapContextCreate(pxdata, size.width, 
               size.height, 8, 4*size.width, rgbColorSpace, 
               (CGBitmapInfo)kCGImageAlphaPremultipliedFirst); 
    CGContextSetRGBFillColor(context, 0, 0, 0, 0); 

    CGContextConcatCTM(context, CGAffineTransformIdentity); 

    CGContextDrawImage(context, CGRectMake(0, 0, size.width, 
              size.height), image); 
    CGColorSpaceRelease(rgbColorSpace); 
    CGContextRelease(context); 

    CVPixelBufferUnlockBaseAddress(buffer, 0); 

    // At this point we have our buffer so we are going to start by adding to time zero 

    [adaptor appendPixelBuffer:buffer withPresentationTime:kCMTimeZero]; 

    while (!writerInput.readyForMoreMediaData) {} // wait until ready 

    CMTime frameTime = CMTimeMake(5, 1); // 5 second frame 

    [adaptor appendPixelBuffer:buffer withPresentationTime:frameTime]; 
    CFRelease(buffer); 

    [writerInput markAsFinished]; 

    [videoWriter endSessionAtSourceTime:frameTime]; 

    [videoWriter finishWritingWithCompletionHandler:^{ 
     if (videoWriter.status != AVAssetWriterStatusCompleted) { 
      // Error 
     } 
    }]; // end videoWriter finishWriting Block 

    // NOTE: the URL is actually being returned before the videoWriter finishes writing so be careful to not access it until it's ready 
    return outputFile; 
} 
+0

Sono completamente grato per il tuo tentativo di risolvere il mio problema con la combinazione del codice Obj-C e Swift. Non sono stato in grado di testare questo codice, ma poiché ho solo 15 ore rimanenti e hai dato la risposta più approfondita, sceglierò la tua. Apprezzo il vostro aiuto! Grazie mille! – impression7vx

+0

Questo ha funzionato alla grande! Ma potresti spiegare perché devo aggiungere al momento Zero e alla fine? Ha senso, ma perché non posso semplicemente aggiungere qualcosa con PresentationTime? – impression7vx

+1

Non ne sono completamente sicuro. La mia ipotesi migliore è internamente il modo in cui funziona è che mostrerà un frame finché non avrà un nuovo frame per sostituirlo. Quindi devi aggiungerlo all'inizio per farlo visualizzare subito, e quindi devi aggiungerlo al tempo di presentazione per segnalare la fine (quindi sa per quanto tempo mostrare il primo fotogramma). Se aggiungi solo un fotogramma al momento della presentazione, questo mostrerà solo fino a quando il video non termina e nulla prima. – gadu

1

Qui il suo lavoro per me Spero che questo sia utile a voi: -

-(void)MixVideo:(NSString *)vidioUrlString withImage:(UIImage *)img 
{ 
    NSURL *videoUrl1 = [[NSURL alloc] initFileURLWithPath:vidioUrlString]; 
    AVURLAsset* videoAsset = [[AVURLAsset alloc]initWithURL:videoUrl1 options:nil]; 

    AVMutableComposition* mixComposition = [AVMutableComposition composition]; 

    AVMutableCompositionTrack *compositionVideoTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid]; 

    AVAssetTrack *clipVideoTrack = [[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0]; 

    AVMutableCompositionTrack *compositionAudioTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid]; 

    AVAssetTrack *clipAudioTrack = [[videoAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0]; 


    [compositionVideoTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, videoAsset.duration) ofTrack:clipVideoTrack atTime:kCMTimeZero error:nil]; 

    [compositionAudioTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, videoAsset.duration) ofTrack:clipAudioTrack atTime:kCMTimeZero error:nil]; 

    [compositionVideoTrack setPreferredTransform:[[[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] preferredTransform]]; 

    CGSize sizeOfVideo = CGSizeMake(320, 568); 

    //Image of watermark 
    UIImage *myImage=img; 

    CALayer *layerCa = [CALayer layer]; 

    layerCa.contents = (id)myImage.CGImage; 
    layerCa.frame = CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height); 

    layerCa.opacity = 1.0; 

    CALayer *parentLayer=[CALayer layer]; 

    CALayer *videoLayer=[CALayer layer]; 

    parentLayer.frame=CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height); 

    videoLayer.frame=CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height); 
    [parentLayer addSublayer:videoLayer]; 

    [parentLayer addSublayer:layerCa]; 

    AVMutableVideoComposition *videoComposition=[AVMutableVideoComposition videoComposition] ; 

    videoComposition.frameDuration=CMTimeMake(1, 30); 

    videoComposition.renderSize=sizeOfVideo; 

    videoComposition.animationTool=[AVVideoCompositionCoreAnimationTool videoCompositionCoreAnimationToolWithPostProcessingAsVideoLayer:videoLayer inLayer:parentLayer]; 

    AVMutableVideoCompositionInstruction *instruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction]; 

    instruction.timeRange = CMTimeRangeMake(kCMTimeZero, [mixComposition duration]); 

    AVAssetTrack *videoTrack = [[mixComposition tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0]; 

    AVMutableVideoCompositionLayerInstruction* layerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:videoTrack]; 

    instruction.layerInstructions = [NSArray arrayWithObject:layerInstruction]; 

    videoComposition.instructions = [NSArray arrayWithObject: instruction]; 

    NSString *documentsDirectory = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES)objectAtIndex:0]; 

    finalPath = [documentsDirectory stringByAppendingFormat:@"/myVideo.mp4"]; 

    if ([[NSFileManager defaultManager] fileExistsAtPath:finalPath]) 
    { 
     [[NSFileManager defaultManager] removeItemAtPath:finalPath error:nil]; 
    } 

    SDAVAssetExportSession *encoder = [SDAVAssetExportSession.alloc initWithAsset:mixComposition]; 
    encoder.outputFileType = AVFileTypeMPEG4; 
    encoder.outputURL = [NSURL fileURLWithPath:finalPath]; 
    encoder.videoComposition=videoComposition; 
    encoder.videoSettings = @ 
    { 
    AVVideoCodecKey: AVVideoCodecH264, 
    AVVideoWidthKey: @320, 
    AVVideoHeightKey: @568, 
    AVVideoCompressionPropertiesKey: @ 
     { 
     AVVideoAverageBitRateKey: @900000, 
     AVVideoProfileLevelKey: AVVideoProfileLevelH264MainAutoLevel, 
     }, 
    }; 
    encoder.audioSettings = @ 
    { 
    AVFormatIDKey: @(kAudioFormatMPEG4AAC), 
    AVNumberOfChannelsKey: @2, 
    AVSampleRateKey: @44100, 
    AVEncoderBitRateKey: @128000, 
    }; 

    [encoder exportAsynchronouslyWithCompletionHandler:^ 
    { 

     if (encoder.status == AVAssetExportSessionStatusCompleted) 
     { 

      NSLog(@"Video export succeeded"); 
      if (UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(finalPath)) 
      { 

       NSLog(@"Video exported successfully path = %@ ",finalPath); 
      } 

     } 
     else if (encoder.status == AVAssetExportSessionStatusCancelled) 
     { 
      NSLog(@"Video export cancelled"); 
     } 
     else 
     { 
      NSLog(@"Video export failed with error: %@ (%ld)", encoder.error.localizedDescription, (long)encoder.error.code); 
     } 
    }]; 

} 
+0

Non ho avuto tempo di guardarlo, ma è in Obj-C e il mio è in Swift, quindi sfortunatamente non posso fare questa risposta quello giusto, ma grazie per il tuo impegno e sicuramente transcodificherò su Swift. Grazie! – impression7vx

+0

Puoi provare questo codice usando il bridging. Esempio: crea l'obiettivo c fine .h e .m e inserisci questo metodo in questo e accedi da swift. –

0

Qui funziona per me, esportare una singola immagine in un video (il video è mobile non statico). Swift 3.

// 
// CXEImageToAssetURL.swift 
// CXEngine 
// 
// Created by wulei on 16/12/14. 
// Copyright © 2016年 wulei. All rights reserved. 
// 

import Foundation 
import AVFoundation 
import UIKit 
import Photos 

fileprivate extension UIImage{ 
    func normalizedImage() -> UIImage?{ 
//  if self.imageOrientation == .up{ 
//   return self 
//  } 
     let factor = CGFloat(0.8) 
     UIGraphicsBeginImageContextWithOptions(CGSize(width:self.size.width * factor, height: self.size.height * factor), false, self.scale) 
     self.draw(in: CGRect(x: 0, y: 0, width: self.size.width * factor, height: self.size.height * factor)) 
     let normalImage = UIGraphicsGetImageFromCurrentImageContext() 
     UIGraphicsEndImageContext() 
     return normalImage 
    } 

// func clipImage() -> UIImage { 

//  var x = CGFloat(0) 
//  var y = CGFloat(0) 
//  let imageHeight = (self.size.width * 9)/16 
//  y = (self.size.height - imageHeight)/2 
//  var rcTmp = CGRect(origin: CGPoint(x: x, y: y), size: self.size) 
//  if self.scale > 1.0 { 
//   rcTmp = CGRect(x: rcTmp.origin.x * self.scale, y: rcTmp.origin.y * self.scale, width: rcTmp.size.width * self.scale, height: rcTmp.size.height * self.scale) 
//  } 
//  rcTmp.size.height = imageHeight 
//  let imageRef = self.cgImage!.cropping(to: rcTmp) 
//  let result = UIImage(cgImage: imageRef!, scale: self.scale, orientation: self.imageOrientation) 
//  return result 
//  return self 
// } 
} 

public typealias CXEImageToVideoProgress = (Float) -> Void 
typealias CXEMovieMakerUIImageExtractor = (AnyObject) -> UIImage? 


public class CXEImageToVideo: NSObject{ 

    //MARK: Private Properties 

    private var assetWriter:AVAssetWriter! 
    private var writeInput:AVAssetWriterInput! 
    private var bufferAdapter:AVAssetWriterInputPixelBufferAdaptor! 
    private var videoSettings:[String : Any]! 
    private var frameTime:CMTime! 
    private var fileURL:URL! 
    private var duration:Int = 0 

    //MARK: Class Method 

    private func videoSettingsFunc(width:Int, height:Int) -> [String: Any]{ 
     if(Int(width) % 16 != 0){ 
      print("warning: video settings width must be divisible by 16") 
     } 

     let videoSettings:[String: Any] = [AVVideoCodecKey: AVVideoCodecH264, 
              AVVideoWidthKey: width, 
              AVVideoHeightKey: height] 

     return videoSettings 
    } 

    //MARK: Public methods 

    public init(fileURL: URL, videoWidth:Int, videoHeight:Int) { 
     super.init() 

     self.videoSettings = videoSettingsFunc(width: videoWidth, height: videoHeight) 

     self.fileURL = fileURL 
     self.assetWriter = try! AVAssetWriter(url: self.fileURL, fileType: AVFileTypeQuickTimeMovie) 

     self.writeInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings) 
     assert(self.assetWriter.canAdd(self.writeInput), "add failed") 

     self.assetWriter.add(self.writeInput) 
     let bufferAttributes:[String: Any] = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32ARGB)] 
     self.bufferAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: self.writeInput, sourcePixelBufferAttributes: bufferAttributes) 
     self.frameTime = CMTimeMake(1, 25) 
    } 

// public func createMovieFrom(url: URL, duration:Int, progressExtractor: CXEImageToVideoProgress){ 
//  self.duration = duration 
//  self.createMovieFromSource(image: url as AnyObject, extractor:{(inputObject:AnyObject) ->UIImage? in 
//   return UIImage(data: try! Data(contentsOf: inputObject as! URL))}, progressExtractor: progressExtractor) 
// } 

    public func createMovieFrom(imageData: Data, duration:Int, progressExtractor: CXEImageToVideoProgress){ 
     var image = UIImage(data: imageData) 
     image = image?.normalizedImage() 
     assert(image != nil) 
     self.duration = duration 

     self.createMovieFromSource(image: image!, extractor: {(inputObject:AnyObject) -> UIImage? in 
      return inputObject as? UIImage}, progressExtractor: progressExtractor) 
    } 

    //MARK: Private methods 

    private func createMovieFromSource(image: AnyObject, extractor: @escaping CXEMovieMakerUIImageExtractor, progressExtractor: CXEImageToVideoProgress){ 

     self.assetWriter.startWriting() 
     let zeroTime = CMTimeMake(Int64(0),self.frameTime.timescale) 
     self.assetWriter.startSession(atSourceTime: zeroTime) 

     while !self.writeInput.isReadyForMoreMediaData { 
      usleep(100) 
     } 

     var sampleBuffer:CVPixelBuffer? 
     var pxDataBuffer:CVPixelBuffer? 
     let img = extractor(image) 
     assert(img != nil) 

     let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true] 
     let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int 
     let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int 
     let originHeight = frameWidth * img!.cgImage!.height/img!.cgImage!.width 
     let heightDifference = originHeight - frameHeight 

     let frameCounts = self.duration * Int(self.frameTime.timescale) 
     let spacingOfHeight = heightDifference/frameCounts 

     sampleBuffer = self.newPixelBufferFrom(cgImage: img!.cgImage!) 
     assert(sampleBuffer != nil) 

     var presentTime = CMTimeMake(1, self.frameTime.timescale) 
     var stepRows = 0 

     for i in 0..<frameCounts { 
      progressExtractor(Float(i)/Float(frameCounts)) 

      CVPixelBufferLockBaseAddress(sampleBuffer!, CVPixelBufferLockFlags(rawValue: 0)) 
      let pointer = CVPixelBufferGetBaseAddress(sampleBuffer!) 
      var pxData = pointer?.assumingMemoryBound(to: UInt8.self) 
      let bytes = CVPixelBufferGetBytesPerRow(sampleBuffer!) * stepRows 
      pxData = pxData?.advanced(by: bytes) 

      let status = CVPixelBufferCreateWithBytes(kCFAllocatorDefault, frameWidth, frameHeight, kCVPixelFormatType_32ARGB, pxData!, CVPixelBufferGetBytesPerRow(sampleBuffer!), nil, nil, options as CFDictionary?, &pxDataBuffer) 
      assert(status == kCVReturnSuccess && pxDataBuffer != nil, "newPixelBuffer failed") 
      CVPixelBufferUnlockBaseAddress(sampleBuffer!, CVPixelBufferLockFlags(rawValue: 0)) 

      while !self.writeInput.isReadyForMoreMediaData { 
       usleep(100) 
      } 
      if (self.writeInput.isReadyForMoreMediaData){ 
       if i == 0{ 
        self.bufferAdapter.append(pxDataBuffer!, withPresentationTime: zeroTime) 
       }else{ 
        self.bufferAdapter.append(pxDataBuffer!, withPresentationTime: presentTime) 
       } 
       presentTime = CMTimeAdd(presentTime, self.frameTime) 
      } 

      stepRows += spacingOfHeight 
     } 


     self.writeInput.markAsFinished() 
     self.assetWriter.finishWriting {} 

     var isSuccess:Bool = false 
     while(!isSuccess){ 
      switch self.assetWriter.status { 
      case .completed: 
       isSuccess = true 
       print("completed") 
      case .writing: 
       usleep(100) 
       print("writing") 
      case .failed: 
       isSuccess = true 
       print("failed") 
      case .cancelled: 
       isSuccess = true 
       print("cancelled") 
      default: 
       isSuccess = true 
       print("unknown") 
      } 
     } 
    } 

    private func newPixelBufferFrom(cgImage:CGImage) -> CVPixelBuffer?{ 
     let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true] 
     var pxbuffer:CVPixelBuffer? 
     let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int 
     let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int 

     let originHeight = frameWidth * cgImage.height/cgImage.width 

     let status = CVPixelBufferCreate(kCFAllocatorDefault, frameWidth, originHeight, kCVPixelFormatType_32ARGB, options as CFDictionary?, &pxbuffer) 
     assert(status == kCVReturnSuccess && pxbuffer != nil, "newPixelBuffer failed") 

     CVPixelBufferLockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0)) 
     let pxdata = CVPixelBufferGetBaseAddress(pxbuffer!) 
     let rgbColorSpace = CGColorSpaceCreateDeviceRGB() 
     let context = CGContext(data: pxdata, width: frameWidth, height: originHeight, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pxbuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) 
     assert(context != nil, "context is nil") 

     context!.concatenate(CGAffineTransform.identity) 
     context!.draw(cgImage, in: CGRect(x: 0, y: 0, width: frameWidth, height: originHeight)) 
     CVPixelBufferUnlockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0)) 
     return pxbuffer 
    } 
} 
0

Codice Swift 3 che dovrebbe essere un buon inizio. Se utilizzato in produzione, richiede ancora la gestione degli errori e la gestione del ridimensionamento/orientamento del video per alcuni video.

@discardableResult func merge(
    video videoPath: String, 
    withForegroundImage foregroundImage: UIImage, 
    completion: @escaping (AVAssetExportSession) -> Void) -> AVAssetExportSession { 

    let videoUrl = URL(fileURLWithPath: videoPath) 
    let videoUrlAsset = AVURLAsset(url: videoUrl, options: nil) 

    // Setup `mutableComposition` from the existing video 
    let mutableComposition = AVMutableComposition() 
    let videoAssetTrack = videoUrlAsset.tracks(withMediaType: AVMediaTypeVideo).first! 
    let videoCompositionTrack = mutableComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid) 
    videoCompositionTrack.preferredTransform = videoAssetTrack.preferredTransform 
    try! videoCompositionTrack.insertTimeRange(CMTimeRange(start:kCMTimeZero, duration:videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: kCMTimeZero) 
    let audioAssetTrack = videoUrlAsset.tracks(withMediaType: AVMediaTypeAudio).first! 
    let audioCompositionTrack = mutableComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid) 
    try! audioCompositionTrack.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration:audioAssetTrack.timeRange.duration), of: audioAssetTrack, at: kCMTimeZero) 

    // Create a `videoComposition` to represent the `foregroundImage` 
    let videoSize: CGSize = videoCompositionTrack.naturalSize 
    let frame = CGRect(x: 0.0, y: 0.0, width: videoSize.width, height: videoSize.height) 
    let imageLayer = CALayer() 
    imageLayer.contents = foregroundImage.cgImage 
    imageLayer.frame = frame 
    let videoLayer = CALayer() 
    videoLayer.frame = frame 
    let animationLayer = CALayer() 
    animationLayer.frame = frame 
    animationLayer.addSublayer(videoLayer) 
    animationLayer.addSublayer(imageLayer) 
    let videoComposition = AVMutableVideoComposition(propertiesOf: videoCompositionTrack.asset!) 
    videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: animationLayer) 

    // Export the video 
    let documentDirectory = NSSearchPathForDirectoriesInDomains(FileManager.SearchPathDirectory.cachesDirectory, FileManager.SearchPathDomainMask.userDomainMask, true).first! 
    let documentDirectoryUrl = URL(fileURLWithPath: documentDirectory) 
    let destinationFilePath = documentDirectoryUrl.appendingPathComponent("video_\(NSUUID().uuidString).mov") 
    let exportSession = AVAssetExportSession(asset: mutableComposition, presetName: AVAssetExportPresetHighestQuality)! 
    exportSession.videoComposition = videoComposition 
    exportSession.outputURL = destinationFilePath 
    exportSession.outputFileType = AVFileTypeQuickTimeMovie 
    exportSession.exportAsynchronously { [weak exportSession] in 
     if let strongExportSession = exportSession { 
      completion(strongExportSession) 
     } 
    } 

    return exportSession 
}