Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • vulkan2021/vkcv-framework
1 result
Show changes
Showing
with 746 additions and 0 deletions
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout(location = 1) in vec3 inNormal;
layout(location = 2) in vec2 inUV;
layout(location = 0) out vec3 passNormal;
layout(location = 1) out vec2 passUV;
layout( push_constant ) uniform constants{
mat4 mvp;
mat4 model;
};
void main() {
gl_Position = mvp * vec4(inPosition, 1.0);
passNormal = (model * vec4(inNormal, 0)).xyz;
passUV = inUV;
}
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlur.inc"
#include "motionBlurConfig.inc"
#include "motionBlurWorkTile.inc"
layout(set=0, binding=0) uniform texture2D inColor;
layout(set=0, binding=1) uniform texture2D inDepth;
layout(set=0, binding=2) uniform texture2D inMotionFullRes;
layout(set=0, binding=3) uniform texture2D inMotionNeighbourhoodMax;
layout(set=0, binding=4) uniform sampler nearestSampler;
layout(set=0, binding=5, r11f_g11f_b10f) uniform image2D outImage;
layout(set=0, binding=6) buffer WorkTileBuffer {
WorkTiles workTiles;
};
layout(local_size_x = motionTileSize, local_size_y = motionTileSize, local_size_z = 1) in;
layout( push_constant ) uniform constants{
// computed from delta time and shutter speed
float motionScaleFactor;
// camera planes are needed to linearize depth
float cameraNearPlane;
float cameraFarPlane;
float motionTileOffsetLength;
};
float linearizeDepth(float depth, float near, float far){
return near * far / (far + depth * (near - far));
}
struct SampleData{
vec3 color;
float depthLinear;
vec2 coordinate;
vec2 motion;
float velocityPixels;
};
struct PointSpreadCompare{
float foreground;
float background;
};
// results in range [0, 1]
// computes if the sample pixel in the foreground would blur over the main pixel and if the sample pixel in the background would be part of the main pixel background
// contribution depends on if the distance between pixels is smaller than it's velocity
// note that compared to the constant falloff used in McGuire's papers this function from Jimenez is constant until the last pixel
// this is important for the later gradient computation
PointSpreadCompare samplePointSpreadCompare(SampleData mainPixel, SampleData samplePixel){
float sampleOffset = distance(mainPixel.coordinate, samplePixel.coordinate);
PointSpreadCompare pointSpread;
pointSpread.foreground = clamp(1 - sampleOffset + samplePixel.velocityPixels, 0, 1);
pointSpread.background = clamp(1 - sampleOffset + mainPixel.velocityPixels, 0, 1);
return pointSpread;
}
struct DepthClassification{
float foreground;
float background;
};
// classifies depthSample compared to depthMain in regards to being in the fore- or background
// the range is [0, 1] and sums to 1
DepthClassification sampleDepthClassification(SampleData mainPixel, SampleData samplePixel){
const float softDepthExtent = 0.1;
DepthClassification classification;
// only the sign is different, so the latter term will cancel out on addition, so only two times 0.5 remains which sums to one
classification.foreground = clamp(0.5 + (mainPixel.depthLinear - samplePixel.depthLinear) / softDepthExtent, 0, 1);
classification.background = clamp(0.5 - (mainPixel.depthLinear - samplePixel.depthLinear) / softDepthExtent, 0, 1);
return classification;
}
// reconstruction filter and helper functions from "Next Generation Post Processing in Call of Duty Advanced Warfare", Jimenez
// returns value in range [0, 1]
float computeSampleWeigth(SampleData mainPixel, SampleData samplePixel){
PointSpreadCompare pointSpread = samplePointSpreadCompare( mainPixel, samplePixel);
DepthClassification depthClassification = sampleDepthClassification(mainPixel, samplePixel);
return
depthClassification.foreground * pointSpread.foreground +
depthClassification.background * pointSpread.background;
}
SampleData loadSampleData(vec2 uv){
SampleData data;
data.color = texture(sampler2D(inColor, nearestSampler), uv).rgb;
data.coordinate = ivec2(uv * imageSize(outImage));
data.motion = processMotionVector(texture(sampler2D(inMotionFullRes, nearestSampler), uv).rg, motionScaleFactor, imageSize(outImage));
data.velocityPixels = length(data.motion * imageSize(outImage));
data.depthLinear = texture(sampler2D(inDepth, nearestSampler), uv).r;
data.depthLinear = linearizeDepth(data.depthLinear, cameraNearPlane, cameraFarPlane);
return data;
}
void main(){
uint tileIndex = gl_WorkGroupID.x;
ivec2 tileCoordinates = workTiles.tileXY[tileIndex];
ivec2 coord = ivec2(tileCoordinates * motionTileSize + gl_LocalInvocationID.xy);
if(any(greaterThanEqual(coord, imageSize(outImage))))
return;
ivec2 textureRes = textureSize(sampler2D(inColor, nearestSampler), 0);
vec2 uv = vec2(coord + 0.5) / textureRes; // + 0.5 to shift uv into pixel center
// the motion tile lookup is jittered, so the hard edges in the blur are replaced by noise
// dither is shifted, so it does not line up with motion tiles
float motionOffset = motionTileOffsetLength * (dither(coord + ivec2(ditherSize / 2)) * 2 - 1);
vec2 motionNeighbourhoodMax = processMotionVector(texelFetch(sampler2D(inMotionNeighbourhoodMax, nearestSampler), ivec2(coord + motionOffset) / motionTileSize, 0).rg, motionScaleFactor, imageSize(outImage));
SampleData mainPixel = loadSampleData(uv);
// early out on movement less than half a pixel
if(length(motionNeighbourhoodMax * imageSize(outImage)) <= 0.5){
imageStore(outImage, coord, vec4(mainPixel.color, 0.f));
return;
}
vec3 color = vec3(0);
float weightSum = 0;
// clamping start and end points avoids artifacts at image borders
// the sampler clamps the sample uvs anyways, but without clamping here, many samples can be stuck at the border
vec2 uvStart = clamp(uv - motionNeighbourhoodMax, 0, 1);
vec2 uvEnd = clamp(uv + motionNeighbourhoodMax, 0, 1);
// samples are placed evenly, but the entire filter is jittered
// dither returns either 0 or 1
// the sampleUV code expects an offset in range [-0.5, 0.5], so the dither is rescaled to a binary -0.25/0.25
float random = dither(coord) * 0.5 - 0.25;
const int sampleCountHalf = 8;
// two samples are processed at a time to allow for mirrored background reconstruction
for(int i = 0; i < sampleCountHalf; i++){
vec2 sampleUV1 = mix(uv, uvEnd, (i + random + 1) / float(sampleCountHalf + 1));
vec2 sampleUV2 = mix(uv, uvStart, (i + random + 1) / float(sampleCountHalf + 1));
SampleData sample1 = loadSampleData(sampleUV1);
SampleData sample2 = loadSampleData(sampleUV2);
float weight1 = computeSampleWeigth(mainPixel, sample1);
float weight2 = computeSampleWeigth(mainPixel, sample2);
bool mirroredBackgroundReconstruction = true;
if(mirroredBackgroundReconstruction){
// see Jimenez paper for details and comparison
// problem is that in the foreground the background is reconstructed, which is blurry
// in the background the background is obviously known, so it is sharper
// at the border between fore- and background this causes a discontinuity
// to fix this the weights are mirrored on this border, effectively reconstructing the background, even though it is known
// these bools check if sample1 is an affected background pixel (further away and slower moving than sample2)
bool inBackground = sample1.depthLinear > sample2.depthLinear;
bool blurredOver = sample1.velocityPixels < sample2.velocityPixels;
// this mirrors the weights depending on the results:
// if both conditions are true, then weight2 is mirrored to weight1
// if both conditions are false, then weight1 is mirrored to weight2, as sample2 is an affected background pixel
// if only one condition is true, then the weights are kept as is
weight1 = inBackground && blurredOver ? weight2 : weight1;
weight2 = inBackground || blurredOver ? weight2 : weight1;
}
weightSum += weight1;
weightSum += weight2;
color += sample1.color * weight1;
color += sample2.color * weight2;
}
// normalize color and weight
weightSum /= sampleCountHalf * 2;
color /= sampleCountHalf * 2;
// the main color is considered the background
// the weight sum can be interpreted as the alpha of the combined samples, see Jimenez paper
color += (1 - weightSum) * mainPixel.color;
imageStore(outImage, coord, vec4(color, 0.f));
}
\ No newline at end of file
#ifndef MOTION_BLUR
#define MOTION_BLUR
#include "motionBlurConfig.inc"
// see "A Reconstruction Filter for Plausible Motion Blur", section 2.2
vec2 processMotionVector(vec2 motion, float motionScaleFactor, ivec2 imageResolution){
// every frame a pixel should blur over the distance it moves
// as we blur in two directions (where it was and where it will be) we must half the motion
vec2 motionHalf = motion * 0.5;
vec2 motionScaled = motionHalf * motionScaleFactor; // scale factor contains shutter speed and delta time
// pixels are anisotropic, so the ratio for clamping the velocity is computed in pixels instead of uv coordinates
vec2 motionPixel = motionScaled * imageResolution;
float velocityPixels = length(motionPixel);
float epsilon = 0.0001;
// this clamps the motion to not exceed the radius given by the motion tile size
return motionScaled * max(0.5, min(velocityPixels, motionTileSize)) / (velocityPixels + epsilon);
}
const int ditherSize = 4;
// simple binary dither pattern
// could be optimized to avoid modulo and branch
float dither(ivec2 coord){
bool x = coord.x % ditherSize < (ditherSize / 2);
bool y = coord.y % ditherSize < (ditherSize / 2);
return x ^^ y ? 1 : 0;
}
#endif // #ifndef MOTION_BLUR
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlurConfig.inc"
#include "motionBlurWorkTile.inc"
layout(set=0, binding=0) uniform texture2D inColor;
layout(set=0, binding=1) uniform sampler nearestSampler;
layout(set=0, binding=2, r11f_g11f_b10f) uniform image2D outImage;
layout(set=0, binding=3) buffer WorkTileBuffer {
WorkTiles workTiles;
};
layout(local_size_x = motionTileSize, local_size_y = motionTileSize, local_size_z = 1) in;
void main(){
uint tileIndex = gl_WorkGroupID.x;
ivec2 tileCoordinates = workTiles.tileXY[tileIndex];
ivec2 coordinate = ivec2(tileCoordinates * motionTileSize + gl_LocalInvocationID.xy);
if(any(greaterThanEqual(coordinate, imageSize(outImage))))
return;
vec3 color = texelFetch(sampler2D(inColor, nearestSampler), coordinate, 0).rgb;
imageStore(outImage, coordinate, vec4(color, 0.f));
}
\ No newline at end of file
#ifndef MOTION_BLUR_CONFIG
#define MOTION_BLUR_CONFIG
const int motionTileSize = 16;
const int maxMotionBlurWidth = 3840;
const int maxMotionBlurHeight = 2160;
#endif // #ifndef MOTION_BLUR_CONFIG
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlur.inc"
#include "motionBlurConfig.inc"
#include "motionBlurWorkTile.inc"
layout(set=0, binding=0) uniform texture2D inColor;
layout(set=0, binding=1) uniform texture2D inMotionNeighbourhoodMax;
layout(set=0, binding=2) uniform sampler nearestSampler;
layout(set=0, binding=3, r11f_g11f_b10f) uniform image2D outImage;
layout(set=0, binding=4) buffer WorkTileBuffer {
WorkTiles workTiles;
};
layout(local_size_x = motionTileSize, local_size_y = motionTileSize, local_size_z = 1) in;
layout( push_constant ) uniform constants{
// computed from delta time and shutter speed
float motionScaleFactor;
};
void main(){
uint tileIndex = gl_WorkGroupID.x;
ivec2 tileCoordinates = workTiles.tileXY[tileIndex];
ivec2 coord = ivec2(tileCoordinates * motionTileSize + gl_LocalInvocationID.xy);
if(any(greaterThanEqual(coord, imageSize(outImage))))
return;
ivec2 textureRes = textureSize(sampler2D(inColor, nearestSampler), 0);
vec2 uv = vec2(coord + 0.5) / textureRes; // + 0.5 to shift uv into pixel center
vec2 motionNeighbourhoodMax = processMotionVector(texelFetch(sampler2D(inMotionNeighbourhoodMax, nearestSampler), coord / motionTileSize, 0).rg, motionScaleFactor, imageSize(outImage));
// early out on movement less than half a pixel
if(length(motionNeighbourhoodMax * imageSize(outImage)) <= 0.5){
vec3 color = texture(sampler2D(inColor, nearestSampler), uv).rgb;
imageStore(outImage, coord, vec4(color, 0.f));
return;
}
vec3 color = vec3(0);
// clamping start and end points avoids artifacts at image borders
// the sampler clamps the sample uvs anyways, but without clamping here, many samples can be stuck at the border
vec2 uvStart = clamp(uv - motionNeighbourhoodMax, 0, 1);
vec2 uvEnd = clamp(uv + motionNeighbourhoodMax, 0, 1);
// samples are placed evenly, but the entire filter is jittered
// dither returns either 0 or 1
// the sampleUV code expects an offset in range [-0.5, 0.5], so the dither is rescaled to a binary -0.25/0.25
float random = dither(coord) * 0.5 - 0.25;
const int sampleCount = 16;
for(int i = 0; i < sampleCount; i++){
vec2 sampleUV = mix(uvStart, uvEnd, (i + random + 1) / float(sampleCount + 1));
color += texture(sampler2D(inColor, nearestSampler), sampleUV).rgb;
}
color /= sampleCount;
imageStore(outImage, coord, vec4(color, 0.f));
}
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlurWorkTile.inc"
layout(set=0, binding=0) uniform texture2D inMotionMax;
layout(set=0, binding=1) uniform texture2D inMotionMin;
layout(set=0, binding=2) uniform sampler nearestSampler;
layout(set=0, binding=3) buffer FullPathTileBuffer {
WorkTiles fullPathTiles;
};
layout(set=0, binding=4) buffer CopyPathTileBuffer {
WorkTiles copyPathTiles;
};
layout(set=0, binding=5) buffer FastPathTileBuffer {
WorkTiles fastPathTiles;
};
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout( push_constant ) uniform constants{
uint width;
uint height;
float fastPathThreshold;
};
void main(){
ivec2 tileCoord = ivec2(gl_GlobalInvocationID.xy);
if(any(greaterThanEqual(gl_GlobalInvocationID.xy, textureSize(sampler2D(inMotionMax, nearestSampler), 0))))
return;
vec2 motionMax = texelFetch(sampler2D(inMotionMax, nearestSampler), tileCoord, 0).rg;
vec2 motionMin = texelFetch(sampler2D(inMotionMin, nearestSampler), tileCoord, 0).rg;
vec2 motionPixelMax = motionMax * vec2(width, height);
vec2 motionPixelMin = motionMin * vec2(width, height);
float velocityPixelMax = length(motionPixelMax);
float minMaxDistance = distance(motionPixelMin, motionPixelMax);
if(velocityPixelMax <= 0.5){
uint index = atomicAdd(copyPathTiles.tileCount, 1);
copyPathTiles.tileXY[index] = tileCoord;
}
else if(minMaxDistance <= fastPathThreshold){
uint index = atomicAdd(fastPathTiles.tileCount, 1);
fastPathTiles.tileXY[index] = tileCoord;
}
else{
uint index = atomicAdd(fullPathTiles.tileCount, 1);
fullPathTiles.tileXY[index] = tileCoord;
}
}
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlurConfig.inc"
#include "motionBlurWorkTile.inc"
layout(set=0, binding=0) uniform texture2D inColor;
layout(set=0, binding=1) uniform sampler nearestSampler;
layout(set=0, binding=2, r11f_g11f_b10f) uniform image2D outImage;
layout(set=0, binding=3) buffer FullPathTileBuffer {
WorkTiles fullPathTiles;
};
layout(set=0, binding=4) buffer CopyPathTileBuffer {
WorkTiles copyPathTiles;
};
layout(set=0, binding=5) buffer FastPathTileBuffer {
WorkTiles fastPathTiles;
};
layout(local_size_x = motionTileSize, local_size_y = motionTileSize, local_size_z = 1) in;
void main(){
uint tileIndexFullPath = gl_WorkGroupID.x;
uint tileIndexCopyPath = gl_WorkGroupID.x - fullPathTiles.tileCount;
uint tileIndexFastPath = gl_WorkGroupID.x - fullPathTiles.tileCount - copyPathTiles.tileCount;
vec3 debugColor;
ivec2 tileCoordinates;
if(tileIndexFullPath < fullPathTiles.tileCount){
debugColor = vec3(1, 0, 0);
tileCoordinates = fullPathTiles.tileXY[tileIndexFullPath];
}
else if(tileIndexCopyPath < copyPathTiles.tileCount){
debugColor = vec3(0, 1, 0);
tileCoordinates = copyPathTiles.tileXY[tileIndexCopyPath];
}
else if(tileIndexFastPath < fastPathTiles.tileCount){
debugColor = vec3(0, 0, 1);
tileCoordinates = fastPathTiles.tileXY[tileIndexFastPath];
}
else{
return;
}
ivec2 coordinate = ivec2(tileCoordinates * motionTileSize + gl_LocalInvocationID.xy);
vec3 color = texelFetch(sampler2D(inColor, nearestSampler), coordinate, 0).rgb;
color = mix(color, debugColor, 0.5);
imageStore(outImage, coordinate, vec4(color, 0));
}
\ No newline at end of file
#ifndef MOTION_BLUR_WORK_TILE
#define MOTION_BLUR_WORK_TILE
#include "motionBlurConfig.inc"
const int maxTileCount =
(maxMotionBlurWidth + motionTileSize - 1) / motionTileSize *
(maxMotionBlurHeight + motionTileSize - 1) / motionTileSize;
struct WorkTiles{
uint tileCount;
// dispatch Y/Z are here so the buffer can be used directly as an indirect dispatch argument buffer
uint dispatchY;
uint dispatchZ;
ivec2 tileXY[maxTileCount];
};
#endif // #ifndef MOTION_BLUR_WORK_TILE
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlurWorkTile.inc"
layout(set=0, binding=0) buffer FullPathTileBuffer {
WorkTiles fullPathTiles;
};
layout(set=0, binding=1) buffer CopyPathTileBuffer {
WorkTiles copyPathTiles;
};
layout(set=0, binding=2) buffer FastPathTileBuffer {
WorkTiles fastPathTiles;
};
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main(){
fullPathTiles.tileCount = 0;
fullPathTiles.dispatchY = 1;
fullPathTiles.dispatchZ = 1;
copyPathTiles.tileCount = 0;
copyPathTiles.dispatchY = 1;
copyPathTiles.dispatchZ = 1;
fastPathTiles.tileCount = 0;
fastPathTiles.dispatchY = 1;
fastPathTiles.dispatchZ = 1;
}
\ No newline at end of file
vec2 computeMotionVector(vec4 NDC, vec4 NDCPrevious){
vec2 ndc = NDC.xy / NDC.w;
vec2 ndcPrevious = NDCPrevious.xy / NDCPrevious.w;
vec2 uv = ndc * 0.5 + 0.5;
vec2 uvPrevious = ndcPrevious * 0.5 + 0.5;
return uvPrevious - uv;
}
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlurConfig.inc"
layout(set=0, binding=0) uniform texture2D inMotion;
layout(set=0, binding=1) uniform sampler textureSampler;
layout(set=0, binding=2, rg16) uniform image2D outMotionMax;
layout(set=0, binding=3, rg16) uniform image2D outMotionMin;
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
void main(){
ivec2 outImageRes = imageSize(outMotionMax);
ivec2 motionTileCoord = ivec2(gl_GlobalInvocationID.xy);
if(any(greaterThanEqual(motionTileCoord, outImageRes)))
return;
float velocityMax = 0;
vec2 motionMax = vec2(0);
float velocityMin = 100000;
vec2 motionMin = vec2(0);
ivec2 motionBufferBaseCoord = motionTileCoord * motionTileSize;
for(int x = 0; x < motionTileSize; x++){
for(int y = 0; y < motionTileSize; y++){
ivec2 sampleCoord = motionBufferBaseCoord + ivec2(x, y);
vec2 motionSample = texelFetch(sampler2D(inMotion, textureSampler), sampleCoord, 0).rg;
float velocitySample = length(motionSample);
if(velocitySample > velocityMax){
velocityMax = velocitySample;
motionMax = motionSample;
}
if(velocitySample < velocityMin){
velocityMin = velocitySample;
motionMin = motionSample;
}
}
}
imageStore(outMotionMax, motionTileCoord, vec4(motionMax, 0, 0));
imageStore(outMotionMin, motionTileCoord, vec4(motionMin, 0, 0));
}
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
layout(set=0, binding=0) uniform texture2D inMotionMax;
layout(set=0, binding=1) uniform texture2D inMotionMin;
layout(set=0, binding=2) uniform sampler textureSampler;
layout(set=0, binding=3, rg16) uniform image2D outMotionMaxNeighbourhood;
layout(set=0, binding=4, rg16) uniform image2D outMotionMinNeighbourhood;
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
void main(){
ivec2 outImageRes = imageSize(outMotionMaxNeighbourhood);
ivec2 motionTileCoord = ivec2(gl_GlobalInvocationID.xy);
if(any(greaterThanEqual(motionTileCoord, outImageRes)))
return;
float velocityMax = 0;
vec2 motionMax = vec2(0);
float velocityMin = 10000;
vec2 motionMin = vec2(0);
for(int x = -1; x <= 1; x++){
for(int y = -1; y <= 1; y++){
ivec2 sampleCoord = motionTileCoord + ivec2(x, y);
vec2 motionSampleMax = texelFetch(sampler2D(inMotionMax, textureSampler), sampleCoord, 0).rg;
float velocitySampleMax = length(motionSampleMax);
if(velocitySampleMax > velocityMax){
velocityMax = velocitySampleMax;
motionMax = motionSampleMax;
}
vec2 motionSampleMin = texelFetch(sampler2D(inMotionMin, textureSampler), sampleCoord, 0).rg;
float velocitySampleMin = length(motionSampleMin);
if(velocitySampleMin < velocityMin){
velocityMin = velocitySampleMin;
motionMin = motionSampleMin;
}
}
}
imageStore(outMotionMaxNeighbourhood, motionTileCoord, vec4(motionMax, 0, 0));
imageStore(outMotionMinNeighbourhood, motionTileCoord, vec4(motionMin, 0, 0));
}
\ No newline at end of file
#version 440
#extension GL_GOOGLE_include_directive : enable
#include "motionBlurConfig.inc"
layout(set=0, binding=0) uniform texture2D inMotion;
layout(set=0, binding=1) uniform sampler textureSampler;
layout(set=0, binding=2, r11f_g11f_b10f) uniform image2D outImage;
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout( push_constant ) uniform constants{
float range;
};
void main(){
ivec2 outImageRes = imageSize(outImage);
ivec2 coord = ivec2(gl_GlobalInvocationID.xy);
if(any(greaterThanEqual(coord, outImageRes)))
return;
vec2 motionVector = texelFetch(sampler2D(inMotion, textureSampler), coord / motionTileSize, 0).rg;
vec2 motionVectorNormalized = clamp(motionVector / range, -1, 1);
vec2 color = motionVectorNormalized * 0.5 + 0.5;
imageStore(outImage, coord, vec4(color, 0.5, 0));
}
\ No newline at end of file
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_GOOGLE_include_directive : enable
#include "motionVector.inc"
layout(location = 0) in vec4 passNDC;
layout(location = 1) in vec4 passNDCPrevious;
layout(location = 0) out vec2 outMotion;
void main() {
outMotion = computeMotionVector(passNDC, passNDCPrevious);
}
\ No newline at end of file
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout(location = 0) out vec4 passNDC;
layout(location = 1) out vec4 passNDCPrevious;
layout( push_constant ) uniform constants{
mat4 mvp;
mat4 mvpPrevious;
};
void main() {
gl_Position = mvp * vec4(inPosition, 1.0);
passNDC = gl_Position;
passNDCPrevious = mvpPrevious * vec4(inPosition, 1.0);
}
\ No newline at end of file
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) out vec3 outColor;
void main() {
outColor = vec3(0, 0.2, 0.9);
}
\ No newline at end of file
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout( push_constant ) uniform constants{
mat4 viewProjection;
};
void main() {
gl_Position = viewProjection * vec4(inPosition, 0.0);
gl_Position.w = gl_Position.z;
}
\ No newline at end of file
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_GOOGLE_include_directive : enable
#include "motionVector.inc"
layout(location = 0) out vec2 outMotion;
layout(location = 0) in vec4 passNDC;
layout(location = 1) in vec4 passNDCPrevious;
void main() {
outMotion = computeMotionVector(passNDC, passNDCPrevious);
}
\ No newline at end of file
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout( push_constant ) uniform constants{
mat4 viewProjection;
mat4 viewProjectionPrevious;
};
layout(location = 0) out vec4 passNDC;
layout(location = 1) out vec4 passNDCPrevious;
void main() {
gl_Position = viewProjection * vec4(inPosition, 0.0);
gl_Position.w = gl_Position.z;
passNDC = gl_Position;
passNDCPrevious = viewProjectionPrevious * vec4(inPosition, 0.0);
passNDCPrevious.w = passNDCPrevious.z;
}
\ No newline at end of file