Skip to content

Commit

Permalink
feat: Use AVAssetWriterInputPixelBufferAdaptor directly, remove unn…
Browse files Browse the repository at this point in the history
…ecessary extra `CVPixelBufferPoolRef` (#203)

Co-authored-by: Hanno J. Gödecke <[email protected]>
Co-authored-by: Hanno J. Gödecke <[email protected]>
  • Loading branch information
3 people authored Jun 18, 2024
1 parent 8107c3e commit 57da702
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 56 deletions.
10 changes: 4 additions & 6 deletions package/android/libs/filament/include/gltfio/Animator.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,10 @@ class UTILS_PUBLIC Animator {
void updateBoneMatrices();

/**
* Updates the bone matrices of the specified instance using the state of this animatior.
* This is useful if you have another instance that has the same skeleton as the asset of this animator,
* and you wish to apply the same animation to those instances (e.g. clothing).
*
* NOTE: In most cases, you only need to use the updateBoneMatrices() method. This method is necessary
* only when you need to synchronize animations across multiple instances with the same skeleton.
* Updates the bone matrices of the specified instance using the state of this animation.
* This is useful if you have other instances that have the same skeleton as the animator
* from this asset, and you want those instances to be animated by the same animation (e.g. clothing).
* Usually you don't need this and using updateBoneMatrices() is enough.
*
* @param instance The instance to update.
*/
Expand Down
4 changes: 2 additions & 2 deletions package/example/Shared/src/AnimationTransitionsRecording.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ const near = 0.1
const far = 1000

const FPS = 60
const DURATION = 3 // seconds
const DURATION = 10 // seconds

function Renderer() {
const { camera } = useFilamentContext()
Expand Down Expand Up @@ -154,7 +154,7 @@ function Renderer() {
repeat={true}
controls={true}
source={{ uri: videoUri }}
onError={(e) => console.error(e)}
onError={(e) => console.error('Video error', e)}
onLoad={() => console.log('On load')}
onEnd={() => console.log('On end')}
/>
Expand Down
10 changes: 4 additions & 6 deletions package/ios/libs/filament/include/gltfio/Animator.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,10 @@ class UTILS_PUBLIC Animator {
void updateBoneMatrices();

/**
* Updates the bone matrices of the specified instance using the state of this animatior.
* This is useful if you have another instance that has the same skeleton as the asset of this animator,
* and you wish to apply the same animation to those instances (e.g. clothing).
*
* NOTE: In most cases, you only need to use the updateBoneMatrices() method. This method is necessary
* only when you need to synchronize animations across multiple instances with the same skeleton.
* Updates the bone matrices of the specified instance using the state of this animation.
* This is useful if you have other instances that have the same skeleton as the animator
* from this asset, and you want those instances to be animated by the same animation (e.g. clothing).
* Usually you don't need this and using updateBoneMatrices() is enough.
*
* @param instance The instance to update.
*/
Expand Down
1 change: 0 additions & 1 deletion package/ios/src/RNFAppleFilamentRecorder.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ class AppleFilamentRecorder : public FilamentRecorder {
private:
// Render Target is a single PixelBuffer that acts as a 32BGRA Metal Texture
CVPixelBufferRef _pixelBuffer;
CVPixelBufferPoolRef _pixelBufferPool;
// Actual recorder instance
AVAssetWriter* _assetWriter;
AVAssetWriterInput* _assetWriterInput;
Expand Down
95 changes: 54 additions & 41 deletions package/ios/src/RNFAppleFilamentRecorder.mm
Original file line number Diff line number Diff line change
Expand Up @@ -6,39 +6,33 @@
//

#include "RNFAppleFilamentRecorder.h"
#include <CoreFoundation/CoreFoundation.h>
#include <CoreVideo/CoreVideo.h>
#include <VideoToolbox/VTCompressionProperties.h>
#include <memory>
#include <mutex>

namespace margelo {

static int kCVPixelBufferLock_Write = 0;

AppleFilamentRecorder::AppleFilamentRecorder(std::shared_ptr<Dispatcher> renderThreadDispatcher, int width, int height, int fps,
double bitRate)
: FilamentRecorder(renderThreadDispatcher, width, height, fps, bitRate) {
dispatch_queue_attr_t qos = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INITIATED, -1);
_queue = dispatch_queue_create("filament.recorder.queue", qos);

Logger::log(TAG, "Creating CVPixelBufferPool...");
int maxBufferCount = 30;
NSDictionary* poolAttributes = @{(NSString*)kCVPixelBufferPoolMinimumBufferCountKey : @(maxBufferCount)};
Logger::log(TAG, "Creating CVPixelBuffer target texture...");
NSDictionary* pixelBufferAttributes = @{
(NSString*)kCVPixelBufferWidthKey : @(width),
(NSString*)kCVPixelBufferHeightKey : @(height),
(NSString*)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_32BGRA),
(NSString*)kCVPixelBufferMetalCompatibilityKey : @(YES)
};
CVReturn result = CVPixelBufferPoolCreate(kCFAllocatorDefault, (__bridge CFDictionaryRef)poolAttributes,
(__bridge CFDictionaryRef)pixelBufferAttributes, &_pixelBufferPool);
if (result != kCVReturnSuccess) {
throw std::runtime_error("Failed to create " + std::to_string(width) + "x" + std::to_string(height) +
" CVPixelBufferPool! Status: " + std::to_string(result));
}

Logger::log(TAG, "Creating CVPixelBuffer target texture...");
result = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, _pixelBufferPool, &_pixelBuffer);
CVReturn result =
CVPixelBufferCreate(nil, width, height, kCVPixelFormatType_32BGRA, (__bridge CFDictionaryRef)pixelBufferAttributes, &_pixelBuffer);
if (result != kCVReturnSuccess) {
throw std::runtime_error("Failed to create " + std::to_string(width) + "x" + std::to_string(height) +
" CVPixelBuffer texture! Status: " + std::to_string(result));
throw std::runtime_error("Failed to create input texture CVPixelBuffer!");
}

Logger::log(TAG, "Creating temporary file...");
Expand Down Expand Up @@ -74,19 +68,19 @@
AVVideoHeightKey : @(height)
};
_assetWriterInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings];
_assetWriterInput.expectsMediaDataInRealTime = NO;
_assetWriterInput.performsMultiPassEncodingIfSupported = YES;
if (![_assetWriter canAddInput:_assetWriterInput]) {
std::string settingsJson = outputSettings.description.UTF8String;
throw std::runtime_error("Failed to add AVAssetWriterInput to AVAssetWriter! Settings used: " + settingsJson);
}

_assetWriterInput.expectsMediaDataInRealTime = NO;
_assetWriterInput.performsMultiPassEncodingIfSupported = YES;

_pixelBufferAdaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:_assetWriterInput
sourcePixelBufferAttributes:nil];

Logger::log(TAG, "Adding AVAssetWriterInput...");
[_assetWriter addInput:_assetWriterInput];

Logger::log(TAG, "Creating AVAssetWriterInputPixelBufferAdaptor...");
_pixelBufferAdaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:_assetWriterInput
sourcePixelBufferAttributes:pixelBufferAttributes];
}

bool AppleFilamentRecorder::getSupportsHEVC() {
Expand All @@ -106,38 +100,46 @@

Logger::log(TAG, "Rendering Frame with timestamp %f...", timestamp);
if (!_assetWriterInput.isReadyForMoreMediaData) {
// TODO: Dropping this frame is probably not a good idea, as we are rendering from an offscreen context anyways
// and could just wait until the input is ready for more data again. Maybe we can implement a mechanism
// that only renders when isReadyForMoreMediaData turns true?
// This should never happen because we only poll Frames from the AVAssetWriter.
// Once it's ready, renderFrame will be called. But better safe than sorry.
throw std::runtime_error("AVAssetWriterInput was not ready for more data!");
}

CVPixelBufferRef targetBuffer;
CVReturn result = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, _pixelBufferPool, &targetBuffer);
if (result != kCVReturnSuccess) {
throw std::runtime_error("Failed to create CVPixelBuffer for writing! Status: " + std::to_string(result));
CVPixelBufferPoolRef pool = _pixelBufferAdaptor.pixelBufferPool;
if (pool == nil) {
// The pool should always be created once startSession has been called. So in theory that also shouldn't happen.
throw std::runtime_error("AVAssetWriterInputPixelBufferAdaptor's pixel buffer pool was nil! Cannot write Frame.");
}

result = CVPixelBufferLockBaseAddress(targetBuffer, /* write flag */ 0);
if (result != kCVReturnSuccess) {
throw std::runtime_error("Failed to lock target buffer for write access!");
// 1. Get (or create) a pixel buffer from the cache pool
CVPixelBufferRef targetBuffer;
CVReturn result = CVPixelBufferPoolCreatePixelBuffer(nil, pool, &targetBuffer);
if (result != kCVReturnSuccess || targetBuffer == nil) {
throw std::runtime_error("Failed to get a new CVPixelBuffer from the CVPixelBufferPool!");
}

// 2. Lock both pixel buffers for CPU access
result = CVPixelBufferLockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);
if (result != kCVReturnSuccess) {
throw std::runtime_error("Failed to lock input buffer for read access!");
}
result = CVPixelBufferLockBaseAddress(targetBuffer, /* write flag */ 0);
if (result != kCVReturnSuccess) {
throw std::runtime_error("Failed to lock target buffer for write access!");
}

// 3. Copy over Frame data
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(_pixelBuffer);
size_t height = CVPixelBufferGetHeight(_pixelBuffer);

void* destination = CVPixelBufferGetBaseAddress(targetBuffer);
void* source = CVPixelBufferGetBaseAddress(_pixelBuffer);

memcpy(destination, source, bytesPerRow * height);

CVPixelBufferUnlockBaseAddress(targetBuffer, /* write flag */ 0);
// 4. Unlock pixel buffers again
CVPixelBufferUnlockBaseAddress(targetBuffer, kCVPixelBufferLock_Write);
CVPixelBufferUnlockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);

// 5. Append the new copy of the buffer to the pool
CMTime time = CMTimeMake(_frameCount++, getFps());
BOOL success = [_pixelBufferAdaptor appendPixelBuffer:targetBuffer withPresentationTime:time];
if (!success || _assetWriter.status != AVAssetWriterStatusWriting) {
Expand All @@ -148,6 +150,9 @@
}
throw std::runtime_error("Failed to append buffer to AVAssetWriter! " + errorMessage);
}

// 6. Release the pixel buffer
CFRelease(targetBuffer);
}

void* AppleFilamentRecorder::getNativeWindow() {
Expand Down Expand Up @@ -187,13 +192,22 @@
Logger::log(TAG, "Recorder is ready for more data.");
auto self = weakSelf.lock();
if (self != nullptr) {
self->_renderThreadDispatcher->runAsync([self]() {
bool shouldContinueNext = self->onReadyForMoreData();
if (!shouldContinueNext) {
// stop the render queue
[self->_assetWriterInput markAsFinished];
}
});
auto futurePromise =
self->_renderThreadDispatcher->runAsyncAwaitable<void>([self]() {
while ([self->_assetWriterInput isReadyForMoreMediaData]) {
// This will cause our JS render callbacks to be called, which will call
// renderFrame renderFrame will call appendPixelBuffer, and we should call
// appendPixelBuffer as long as isReadyForMoreMediaData is true.
bool shouldContinueNext = self->onReadyForMoreData();
if (!shouldContinueNext) {
// stop the render queue
[self->_assetWriterInput markAsFinished];
}
}
});
// The block in requestMediaDataWhenReadyOnQueue needs to call appendPixelBuffer
// synchronously
futurePromise.get();
}
}];
});
Expand Down Expand Up @@ -235,7 +249,6 @@
}];

self->_isRecording = false;
CVPixelBufferPoolFlush(self->_pixelBufferPool, 0);
});

return promise->get_future();
Expand Down

0 comments on commit 57da702

Please sign in to comment.