Skip to content
This repository has been archived by the owner on Jun 14, 2021. It is now read-only.

Rename PreRenderRequest / RenderingRequest to PreRenderContext / RenderContext #178

Merged
merged 4 commits into from Sep 21, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
16 changes: 8 additions & 8 deletions packages/delir-core/plugin-example/README.md
Expand Up @@ -88,16 +88,16 @@ return Type
.colorRgba('color', {label: 'Color', defaultValue: new Delir.Values.ColorRGBA(0, 0, 0, 1)})
```

### `async initialize(req: Delir.PreRenderRequest)`
### `async initialize(context: Delir.PreRenderContext)`
レンダリング開始前の初期化処理で呼ばれるメソッドです。
画像などのファイルの読み込みはここで行います。

`req`オブジェクトには、Assetなどの初期値が含まれています。

```javascript
async initialize(req: Delir.PreRenderRequest) {
async initialize(context: Delir.PreRenderContext) {
// req.parameters に provideParameters メソッドで指定したパラメータ名で初期値が渡されます
const parameters = preRenderReq.parameters;
const parameters = context.parameters;
const imageAsset = parameters.image;

if (! imageAsset == null) return;
Expand All @@ -110,19 +110,19 @@ async initialize(req: Delir.PreRenderRequest) {
}
```

### `async render(req: RenderRequest)`
### `async render(context: RenderContext)`
1フレームのレンダリングを行います。
`req`オブジェクトには`出力先のcanvas`、 `フレームレート`、`コンポジションのサイズ`、`コンポジション上の現在時間(およびフレーム番号)`、`現在のフレームのパラメータ`などが渡されます。
`context`オブジェクトには`出力先のcanvas`、 `フレームレート`、`コンポジションのサイズ`、`コンポジション上の現在時間(およびフレーム番号)`、`現在のフレームのパラメータ`などが渡されます。

```javascript
async render(req: RenderRequest)
async render(context: RenderContext)
{
if (this.image == null) return;

const dest = req.destCanvas;
const dest = context.destCanvas;
const context = dest.getContext('2d');

// req.parametersにprovideParametersメソッドで指定したパラメータ名で、現在のフレームでの値が渡されます
// context.parametersにprovideParametersメソッドで指定したパラメータ名で、現在のフレームでの値が渡されます
const params = req.parameters;

if (params.visibility !== false) {
Expand Down
26 changes: 13 additions & 13 deletions packages/delir-core/plugin-example/src/index.ts
@@ -1,10 +1,10 @@
import {
PostEffectBase,
PreRenderingRequest,
RenderRequest,
Values,
Type
} from "delir-core";
PreRenderContext,
RenderContext,
Type,
Values
} from '@ragg/delir-core'

interface Params {
x: number
Expand All @@ -24,7 +24,7 @@ export default class ExamplePlugin extends PostEffectBase {
.number('y', {label: 'Position Y', defaultValue: 0, animatable: true})
.number('width', {label: 'Width', defaultValue: 100, animatable: true})
.number('height', {label: 'Height', defaultValue: 100, animatable: true})
.colorRgba('color', {label: 'Fill color', defaultValue: new Values.ColorRGBA(0, 0, 0, 1), animatable: true});
.colorRgba('color', {label: 'Fill color', defaultValue: new Values.ColorRGBA(0, 0, 0, 1), animatable: true})
}

/**
Expand All @@ -33,7 +33,7 @@ export default class ExamplePlugin extends PostEffectBase {
* If you want initializing before rendering (likes load audio, image, etc...)
* Do it in this method.
*/
public async initialize(req: PreRenderingRequest)
public async initialize(context: PreRenderContext)
{

}
Expand All @@ -42,13 +42,13 @@ export default class ExamplePlugin extends PostEffectBase {
* Render frame into destination canvas.
* @param req
*/
public async render(req: RenderRequest<Params>)
public async render(context: RenderContext<Params>)
{
const dest = req.destCanvas;
const context = dest.getContext('2d');
const params = req.parameters as Params;
const dest = req.destCanvas
const context = dest.getContext('2d')
const params = req.parameters as Params

context.fillStyle = params.color.toString();
context.fillRect(params.x, params.y, params.width, params.height);
context.fillStyle = params.color.toString()
context.fillRect(params.x, params.y, params.width, params.height)
}
}
92 changes: 46 additions & 46 deletions packages/delir-core/src/Engine/Engine.ts
Expand Up @@ -19,7 +19,7 @@ import { ColorRGB, ColorRGBA } from '../Values'
import AssetProxy from './AssetProxy'
import DependencyResolver from './DependencyResolver'
import * as ExpressionContext from './ExpressionSupport/ExpressionContext'
import RenderingRequest from './RenderRequest'
import RenderContext from './RenderContext'
import ClipRenderTask from './Task/ClipRenderTask'
import EffectRenderTask from './Task/EffectRenderTask'

Expand Down Expand Up @@ -57,7 +57,7 @@ export interface RealParameterValues {
* Get expression applied values
*/
export const applyExpression = (
req: RenderingRequest,
req: RenderContext,
beforeExpressionParams: RealParameterValues,
expressions: { [param: string]: (exposes: ExpressionContext.ContextSource) => RealParameterValueTypes },
): { [param: string]: ParameterValueTypes } => {
Expand Down Expand Up @@ -257,7 +257,7 @@ export default class Engine
})
}

private _initStage(compositionId: string, option: RenderingOption): RenderingRequest
private _initStage(compositionId: string, option: RenderingOption): RenderContext
{
if (!this._project) throw new RenderingFailedException('Project must be set before rendering')
if (!this._pluginRegistry) throw new RenderingFailedException('Plugin registry not set')
Expand Down Expand Up @@ -285,7 +285,7 @@ export default class Engine
const currentFrame = option.beginFrame
const currentTime = currentFrame / rootComposition.framerate

return new RenderingRequest({
return new RenderContext({
time: currentTime,
timeOnComposition: currentTime,

Expand All @@ -310,11 +310,11 @@ export default class Engine
})
}

private async _taskingStage(req: RenderingRequest, option: RenderingOption): Promise<LayerRenderTask[]>
private async _taskingStage(context: RenderContext, option: RenderingOption): Promise<LayerRenderTask[]>
{
const layerTasks: LayerRenderTask[] = []

const renderOrderLayers = req.rootComposition.layers.slice(0).reverse()
const renderOrderLayers = context.rootComposition.layers.slice(0).reverse()
for (const layer of renderOrderLayers) {

const clips: ClipRenderTask[] = []
Expand All @@ -323,10 +323,10 @@ export default class Engine
const clipRenderTask = ClipRenderTask.build({
clip,
clipRendererCache: this._clipRendererCache,
req,
context,
})

await clipRenderTask.initialize(req)
await clipRenderTask.initialize(context)

// Initialize effects
const effects: EffectRenderTask[] = []
Expand All @@ -335,12 +335,12 @@ export default class Engine
const effectRenderTask = EffectRenderTask.build({
effect,
clip,
req,
context,
effectCache: this._effectCache,
resolver: req.resolver
resolver: context.resolver
})

await effectRenderTask.initialize(req)
await effectRenderTask.initialize(context)
effects.push(effectRenderTask)
} catch (e) {
if (e instanceof EffectPluginMissingException && option.ignoreMissingEffect) {
Expand All @@ -364,62 +364,62 @@ export default class Engine
return layerTasks
}

private async _renderStage(req: RenderingRequest, layerRenderTasks: LayerRenderTask[]): Promise<void>
private async _renderStage(context: RenderContext, layerRenderTasks: LayerRenderTask[]): Promise<void>
{
const destBufferCanvas = req.destCanvas
const destBufferCanvas = context.destCanvas
const destBufferCtx = destBufferCanvas.getContext('2d')!

destBufferCtx.fillStyle = req.rootComposition.backgroundColor.toString()
destBufferCtx.fillRect(0, 0, req.width, req.height)
destBufferCtx.fillStyle = context.rootComposition.backgroundColor.toString()
destBufferCtx.fillRect(0, 0, context.width, context.height)

const channelAudioBuffers = _.times(req.rootComposition.audioChannels, () => {
return new Float32Array(new ArrayBuffer(4 /* bytes */ * req.rootComposition.samplingRate))
const channelAudioBuffers = _.times(context.rootComposition.audioChannels, () => {
return new Float32Array(new ArrayBuffer(4 /* bytes */ * context.rootComposition.samplingRate))
})

const audioBufferingSizeTime = req.neededSamples / req.samplingRate
const audioRenderStartRangeFrame = audioBufferingSizeTime * req.framerate
const audioBufferingSizeTime = context.neededSamples / context.samplingRate
const audioRenderStartRangeFrame = audioBufferingSizeTime * context.framerate

for (const layerTask of layerRenderTasks) {
const layerBufferCanvas = document.createElement('canvas') as HTMLCanvasElement
layerBufferCanvas.width = req.width
layerBufferCanvas.height = req.height
layerBufferCanvas.width = context.width
layerBufferCanvas.height = context.height

const layerBufferCanvasCtx = layerBufferCanvas.getContext('2d')!

// SPEC: The rendering order of the same layer at the same time is not defined.
// In the future, want to ensure that there are no more than two clips in a single layer at a given time.
const renderTargetClips = layerTask.clips.filter(clip => {
if (req.isAudioBufferingNeeded && clip.rendererType === 'audio') {
return clip.clipPlacedFrame <= (req.frameOnComposition + audioRenderStartRangeFrame)
&& clip.clipPlacedFrame + clip.clipDurationFrames >= req.frameOnComposition
if (context.isAudioBufferingNeeded && clip.rendererType === 'audio') {
return clip.clipPlacedFrame <= (context.frameOnComposition + audioRenderStartRangeFrame)
&& clip.clipPlacedFrame + clip.clipDurationFrames >= context.frameOnComposition
}

return clip.clipPlacedFrame <= req.frameOnComposition
&& clip.clipPlacedFrame + clip.clipDurationFrames >= req.frameOnComposition
return clip.clipPlacedFrame <= context.frameOnComposition
&& clip.clipPlacedFrame + clip.clipDurationFrames >= context.frameOnComposition
})

// Render clips
for (const clipTask of renderTargetClips) {
const clipBufferCanvas = document.createElement('canvas') as HTMLCanvasElement
clipBufferCanvas.width = req.width
clipBufferCanvas.height = req.height
clipBufferCanvas.width = context.width
clipBufferCanvas.height = context.height

const clipBufferCtx = clipBufferCanvas.getContext('2d')!

const clipScopeReq = req.clone({
timeOnClip: req.time - (clipTask.clipPlacedFrame / req.framerate),
frameOnClip: req.frame - clipTask.clipPlacedFrame,
const clipScopeContext = context.clone({
timeOnClip: context.time - (clipTask.clipPlacedFrame / context.framerate),
frameOnClip: context.frame - clipTask.clipPlacedFrame,
})

// Lookup current frame prop value from pre-calculated lookup-table
const beforeExpressionParams = _.fromPairs(clipTask.rendererParams.properties.map(desc => {
return [desc.paramName, clipTask.keyframeLUT[desc.paramName][req.frame]]
return [desc.paramName, clipTask.keyframeLUT[desc.paramName][context.frame]]
}))

// Apply expression
const afterExpressionParams = applyExpression(clipScopeReq, beforeExpressionParams, clipTask.expressions)
const afterExpressionParams = applyExpression(clipScopeContext, beforeExpressionParams, clipTask.expressions)

const clipRenderReq = clipScopeReq.clone({
const clipRenderReq = clipScopeContext.clone({
parameters: afterExpressionParams,

srcCanvas: clipTask.rendererType === 'adjustment' ? destBufferCanvas : null,
Expand All @@ -428,14 +428,14 @@ export default class Engine
})

if (/* isCompositionClip */ false) {
const frameOnComposition = req.frame - clipTask.clipPlacedFrame
const frameOnComposition = context.frame - clipTask.clipPlacedFrame

// TODO: frame mapping for set different framerate for sub-composition
const compositionRenderReq = req.clone({
const compositionRenderReq = context.clone({
frameOnComposition,
timeOnComposition: frameOnComposition / req.framerate,
timeOnComposition: frameOnComposition / context.framerate,

parentComposition: req.rootComposition
parentComposition: context.rootComposition
})
}

Expand All @@ -445,12 +445,12 @@ export default class Engine
// Post process effects
for (const effectTask of clipTask.effectRenderTask) {
const beforeExpressionEffectorParams = _.fromPairs(effectTask.effectorProps.properties.map(desc => {
return [desc.paramName, effectTask.keyframeLUT[desc.paramName][req.frame]]
return [desc.paramName, effectTask.keyframeLUT[desc.paramName][context.frame]]
})) as {[paramName: string]: ParameterValueTypes}

const afterExpressionEffectorParams = applyExpression(clipScopeReq, beforeExpressionEffectorParams, effectTask.expressions)
const afterExpressionEffectorParams = applyExpression(clipScopeContext, beforeExpressionEffectorParams, effectTask.expressions)

const effectRenderReq = clipScopeReq.clone({
const effectRenderReq = clipScopeContext.clone({
srcCanvas: clipBufferCanvas,
destCanvas: clipBufferCanvas,
parameters: afterExpressionEffectorParams,
Expand All @@ -474,17 +474,17 @@ export default class Engine

// SPEC: When there are two or more adjustment clips on the same layer at the same time, the layer buffer is cleared for each that clip rendering
// This is not a problem if there is only one clip at a certain time. (Maybe...)
layerBufferCanvasCtx.clearRect(0, 0, req.width, req.height)
layerBufferCanvasCtx.clearRect(0, 0, context.width, context.height)
} else {
layerBufferCanvasCtx.drawImage(clipBufferCanvas, 0, 0)
}

if (req.isAudioBufferingNeeded) {
if (context.isAudioBufferingNeeded) {
await mergeAudioBufferInto(
req.destAudioBuffer,
context.destAudioBuffer,
channelAudioBuffers,
req.audioChannels,
req.samplingRate
context.audioChannels,
context.samplingRate
)

for (const chBuffer of channelAudioBuffers) {
Expand Down
@@ -1,9 +1,9 @@
import { ParameterValueTypes } from '../../PluginSupport/type-descriptor'
import RenderingRequest from '../RenderRequest'
import RenderContext from '../RenderContext'
import { ExpressionContext } from './ExpressionVM'

export interface ContextSource {
req: RenderingRequest
context: RenderContext
clipProperties: {[propName: string]: ParameterValueTypes}
currentValue: any
}
Expand All @@ -14,15 +14,15 @@ export const buildContext = (contextSource: ContextSource): ExpressionContext =>
})

return {
time : contextSource.req.time,
frame : contextSource.req.frame,
timeOnComposition : contextSource.req.timeOnComposition,
frameOnComposition : contextSource.req.frameOnComposition,
width : contextSource.req.width,
height : contextSource.req.height,
audioBuffer : contextSource.req.destAudioBuffer,
duration : contextSource.req.durationFrames / contextSource.req.framerate,
durationFrames : contextSource.req.durationFrames,
time : contextSource.context.time,
frame : contextSource.context.frame,
timeOnComposition : contextSource.context.timeOnComposition,
frameOnComposition : contextSource.context.frameOnComposition,
width : contextSource.context.width,
height : contextSource.context.height,
audioBuffer : contextSource.context.destAudioBuffer,
duration : contextSource.context.durationFrames / contextSource.context.framerate,
durationFrames : contextSource.context.durationFrames,
clipProp : clipPropertyProxy,
currentValue : contextSource.currentValue,
}
Expand Down
Expand Up @@ -4,7 +4,7 @@ import { Composition } from '../Entity'
import { ParameterValueTypes } from '../PluginSupport/type-descriptor'
import DependencyResolver from './DependencyResolver'

export default class PreRenderingRequest<T = {[propName: string]: ParameterValueTypes}>
export default class PreRenderContext<T = {[propName: string]: ParameterValueTypes}>
{
private static _permitKeys = [
'width',
Expand Down Expand Up @@ -59,20 +59,20 @@ export default class PreRenderingRequest<T = {[propName: string]: ParameterValue
//
public resolver: DependencyResolver

constructor(properties: Partial<PreRenderingRequest<T>> = {})
constructor(properties: Partial<PreRenderContext<T>> = {})
{
const props = _.pick(properties, [
...PreRenderingRequest._permitKeys,
...PreRenderingRequest._permitOnlyInitializeKey
...PreRenderContext._permitKeys,
...PreRenderContext._permitOnlyInitializeKey
])

Object.assign(this, props)
Object.freeze(this)
}

public clone(patch: Partial<PreRenderingRequest<T>>): PreRenderingRequest<T>
public clone(patch: Partial<PreRenderContext<T>>): PreRenderContext<T>
{
const permitPatch = _.pick(patch, PreRenderingRequest._permitKeys)
return new PreRenderingRequest<T>(Object.assign({}, this, permitPatch))
const permitPatch = _.pick(patch, PreRenderContext._permitKeys)
return new PreRenderContext<T>(Object.assign({}, this, permitPatch))
}
}