summaryrefslogtreecommitdiff
path: root/servers/rendering/renderer_rd/shaders/effects
diff options
context:
space:
mode:
Diffstat (limited to 'servers/rendering/renderer_rd/shaders/effects')
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/SCsub17
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/blur_raster.glsl148
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/blur_raster_inc.glsl26
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/bokeh_dof.glsl215
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/bokeh_dof_inc.glsl37
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/bokeh_dof_raster.glsl253
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/copy.glsl284
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/copy_to_fb.glsl169
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cube_to_dp.glsl84
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler.glsl145
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_inc.glsl48
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_raster.glsl163
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_filter.glsl326
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_filter_raster.glsl256
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_roughness.glsl63
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_inc.glsl95
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_raster.glsl79
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/resolve.glsl236
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/screen_space_reflection.glsl255
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_filter.glsl148
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_inc.glsl28
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_scale.glsl106
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/specular_merge.glsl112
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ss_effects_downsample.glsl229
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssao.glsl483
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssao_blur.glsl154
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssao_importance_map.glsl123
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssao_interleave.glsl119
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssil.glsl444
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssil_blur.glsl144
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssil_importance_map.glsl125
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/ssil_interleave.glsl122
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/tonemap.glsl502
-rw-r--r--servers/rendering/renderer_rd/shaders/effects/vrs.glsl72
34 files changed, 5810 insertions, 0 deletions
diff --git a/servers/rendering/renderer_rd/shaders/effects/SCsub b/servers/rendering/renderer_rd/shaders/effects/SCsub
new file mode 100644
index 0000000000..741da8fe69
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/SCsub
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+Import("env")
+
+if "RD_GLSL" in env["BUILDERS"]:
+ # find all include files
+ gl_include_files = [str(f) for f in Glob("*_inc.glsl")]
+
+ # find all shader code(all glsl files excluding our include files)
+ glsl_files = [str(f) for f in Glob("*.glsl") if str(f) not in gl_include_files]
+
+ # make sure we recompile shaders if include files change
+ env.Depends([f + ".gen.h" for f in glsl_files], gl_include_files + ["#glsl_builders.py"])
+
+ # compile shaders
+ for glsl_file in glsl_files:
+ env.RD_GLSL(glsl_file)
diff --git a/servers/rendering/renderer_rd/shaders/effects/blur_raster.glsl b/servers/rendering/renderer_rd/shaders/effects/blur_raster.glsl
new file mode 100644
index 0000000000..96f5c3e9f2
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/blur_raster.glsl
@@ -0,0 +1,148 @@
+/* clang-format off */
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "blur_raster_inc.glsl"
+
+layout(location = 0) out vec2 uv_interp;
+/* clang-format on */
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp = base_arr[gl_VertexIndex];
+
+ gl_Position = vec4(uv_interp * 2.0 - 1.0, 0.0, 1.0);
+}
+
+/* clang-format off */
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "blur_raster_inc.glsl"
+
+layout(location = 0) in vec2 uv_interp;
+/* clang-format on */
+
+layout(set = 0, binding = 0) uniform sampler2D source_color;
+
+#ifdef GLOW_USE_AUTO_EXPOSURE
+layout(set = 1, binding = 0) uniform sampler2D source_auto_exposure;
+#endif
+
+layout(location = 0) out vec4 frag_color;
+
+void main() {
+ // We do not apply our color scale for our mobile renderer here, we'll leave our colors at half brightness and apply scale in the tonemap raster.
+
+#ifdef MODE_MIPMAP
+
+ vec2 pix_size = blur.pixel_size;
+ vec4 color = texture(source_color, uv_interp + vec2(-0.5, -0.5) * pix_size);
+ color += texture(source_color, uv_interp + vec2(0.5, -0.5) * pix_size);
+ color += texture(source_color, uv_interp + vec2(0.5, 0.5) * pix_size);
+ color += texture(source_color, uv_interp + vec2(-0.5, 0.5) * pix_size);
+ frag_color = color / 4.0;
+
+#endif
+
+#ifdef MODE_GAUSSIAN_BLUR
+
+ // Simpler blur uses SIGMA2 for the gaussian kernel for a stronger effect
+
+ // note, for blur blur.luminance_multiplier is irrelavant, we would be multiplying and then dividing by this amount.
+
+ if (bool(blur.flags & FLAG_HORIZONTAL)) {
+ vec2 pix_size = blur.pixel_size;
+ pix_size *= 0.5; //reading from larger buffer, so use more samples
+ vec4 color = texture(source_color, uv_interp + vec2(0.0, 0.0) * pix_size) * 0.214607;
+ color += texture(source_color, uv_interp + vec2(1.0, 0.0) * pix_size) * 0.189879;
+ color += texture(source_color, uv_interp + vec2(2.0, 0.0) * pix_size) * 0.131514;
+ color += texture(source_color, uv_interp + vec2(3.0, 0.0) * pix_size) * 0.071303;
+ color += texture(source_color, uv_interp + vec2(-1.0, 0.0) * pix_size) * 0.189879;
+ color += texture(source_color, uv_interp + vec2(-2.0, 0.0) * pix_size) * 0.131514;
+ color += texture(source_color, uv_interp + vec2(-3.0, 0.0) * pix_size) * 0.071303;
+ frag_color = color;
+ } else {
+ vec2 pix_size = blur.pixel_size;
+ vec4 color = texture(source_color, uv_interp + vec2(0.0, 0.0) * pix_size) * 0.38774;
+ color += texture(source_color, uv_interp + vec2(0.0, 1.0) * pix_size) * 0.24477;
+ color += texture(source_color, uv_interp + vec2(0.0, 2.0) * pix_size) * 0.06136;
+ color += texture(source_color, uv_interp + vec2(0.0, -1.0) * pix_size) * 0.24477;
+ color += texture(source_color, uv_interp + vec2(0.0, -2.0) * pix_size) * 0.06136;
+ frag_color = color;
+ }
+#endif
+
+#ifdef MODE_GAUSSIAN_GLOW
+
+ //Glow uses larger sigma 1 for a more rounded blur effect
+
+#define GLOW_ADD(m_ofs, m_mult) \
+ { \
+ vec2 ofs = uv_interp + m_ofs * pix_size; \
+ vec4 c = texture(source_color, ofs) * m_mult; \
+ if (any(lessThan(ofs, vec2(0.0))) || any(greaterThan(ofs, vec2(1.0)))) { \
+ c *= 0.0; \
+ } \
+ color += c; \
+ }
+
+ if (bool(blur.flags & FLAG_HORIZONTAL)) {
+ vec2 pix_size = blur.pixel_size;
+ pix_size *= 0.5; //reading from larger buffer, so use more samples
+
+ vec4 color = texture(source_color, uv_interp + vec2(0.0, 0.0) * pix_size) * 0.174938;
+ GLOW_ADD(vec2(1.0, 0.0), 0.165569);
+ GLOW_ADD(vec2(2.0, 0.0), 0.140367);
+ GLOW_ADD(vec2(3.0, 0.0), 0.106595);
+ GLOW_ADD(vec2(-1.0, 0.0), 0.165569);
+ GLOW_ADD(vec2(-2.0, 0.0), 0.140367);
+ GLOW_ADD(vec2(-3.0, 0.0), 0.106595);
+
+ // only do this in the horizontal pass, if we also do this in the vertical pass we're doubling up.
+ color *= blur.glow_strength;
+
+ frag_color = color;
+ } else {
+ vec2 pix_size = blur.pixel_size;
+ vec4 color = texture(source_color, uv_interp + vec2(0.0, 0.0) * pix_size) * 0.288713;
+ GLOW_ADD(vec2(0.0, 1.0), 0.233062);
+ GLOW_ADD(vec2(0.0, 2.0), 0.122581);
+ GLOW_ADD(vec2(0.0, -1.0), 0.233062);
+ GLOW_ADD(vec2(0.0, -2.0), 0.122581);
+
+ frag_color = color;
+ }
+
+#undef GLOW_ADD
+
+ if (bool(blur.flags & FLAG_GLOW_FIRST_PASS)) {
+ // In the first pass bring back to correct color range else we're applying the wrong threshold
+ // in subsequent passes we can use it as is as we'd just be undoing it right after.
+ frag_color *= blur.luminance_multiplier;
+
+#ifdef GLOW_USE_AUTO_EXPOSURE
+
+ frag_color /= texelFetch(source_auto_exposure, ivec2(0, 0), 0).r / blur.glow_auto_exposure_grey;
+#endif
+ frag_color *= blur.glow_exposure;
+
+ float luminance = max(frag_color.r, max(frag_color.g, frag_color.b));
+ float feedback = max(smoothstep(blur.glow_hdr_threshold, blur.glow_hdr_threshold + blur.glow_hdr_scale, luminance), blur.glow_bloom);
+
+ frag_color = min(frag_color * feedback, vec4(blur.glow_luminance_cap)) / blur.luminance_multiplier;
+ }
+
+#endif // MODE_GAUSSIAN_GLOW
+
+#ifdef MODE_COPY
+ vec4 color = textureLod(source_color, uv_interp, 0.0);
+ frag_color = color;
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/blur_raster_inc.glsl b/servers/rendering/renderer_rd/shaders/effects/blur_raster_inc.glsl
new file mode 100644
index 0000000000..730504571a
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/blur_raster_inc.glsl
@@ -0,0 +1,26 @@
+#define FLAG_HORIZONTAL (1 << 0)
+#define FLAG_USE_ORTHOGONAL_PROJECTION (1 << 1)
+#define FLAG_GLOW_FIRST_PASS (1 << 2)
+
+layout(push_constant, std430) uniform Blur {
+ vec2 pixel_size; // 08 - 08
+ uint flags; // 04 - 12
+ uint pad; // 04 - 16
+
+ // Glow.
+ float glow_strength; // 04 - 20
+ float glow_bloom; // 04 - 24
+ float glow_hdr_threshold; // 04 - 28
+ float glow_hdr_scale; // 04 - 32
+
+ float glow_exposure; // 04 - 36
+ float glow_white; // 04 - 40
+ float glow_luminance_cap; // 04 - 44
+ float glow_auto_exposure_grey; // 04 - 48
+
+ float luminance_multiplier; // 04 - 52
+ float res1; // 04 - 56
+ float res2; // 04 - 60
+ float res3; // 04 - 64
+}
+blur;
diff --git a/servers/rendering/renderer_rd/shaders/effects/bokeh_dof.glsl b/servers/rendering/renderer_rd/shaders/effects/bokeh_dof.glsl
new file mode 100644
index 0000000000..0438671dd2
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/bokeh_dof.glsl
@@ -0,0 +1,215 @@
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+#define BLOCK_SIZE 8
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = BLOCK_SIZE, local_size_z = 1) in;
+
+#ifdef MODE_GEN_BLUR_SIZE
+layout(rgba16f, set = 0, binding = 0) uniform restrict image2D color_image;
+layout(set = 1, binding = 0) uniform sampler2D source_depth;
+#endif
+
+#if defined(MODE_BOKEH_BOX) || defined(MODE_BOKEH_HEXAGONAL) || defined(MODE_BOKEH_CIRCULAR)
+layout(set = 1, binding = 0) uniform sampler2D color_texture;
+layout(rgba16f, set = 0, binding = 0) uniform restrict writeonly image2D bokeh_image;
+#endif
+
+#ifdef MODE_COMPOSITE_BOKEH
+layout(rgba16f, set = 0, binding = 0) uniform restrict image2D color_image;
+layout(set = 1, binding = 0) uniform sampler2D source_bokeh;
+#endif
+
+// based on https://www.shadertoy.com/view/Xd3GDl
+
+#include "bokeh_dof_inc.glsl"
+
+#ifdef MODE_GEN_BLUR_SIZE
+
+float get_depth_at_pos(vec2 uv) {
+ float depth = textureLod(source_depth, uv, 0.0).x;
+ if (params.orthogonal) {
+ depth = ((depth + (params.z_far + params.z_near) / (params.z_far - params.z_near)) * (params.z_far - params.z_near)) / 2.0;
+ } else {
+ depth = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near - depth * (params.z_far - params.z_near));
+ }
+ return depth;
+}
+
+float get_blur_size(float depth) {
+ if (params.blur_near_active && depth < params.blur_near_begin) {
+ return -(1.0 - smoothstep(params.blur_near_end, params.blur_near_begin, depth)) * params.blur_size - DEPTH_GAP; //near blur is negative
+ }
+
+ if (params.blur_far_active && depth > params.blur_far_begin) {
+ return smoothstep(params.blur_far_begin, params.blur_far_end, depth) * params.blur_size + DEPTH_GAP;
+ }
+
+ return 0.0;
+}
+
+#endif
+
+#if defined(MODE_BOKEH_BOX) || defined(MODE_BOKEH_HEXAGONAL)
+
+vec4 weighted_filter_dir(vec2 dir, vec2 uv, vec2 pixel_size) {
+ dir *= pixel_size;
+ vec4 color = texture(color_texture, uv);
+
+ vec4 accum = color;
+ float total = 1.0;
+
+ float blur_scale = params.blur_size / float(params.blur_steps);
+
+ if (params.use_jitter) {
+ uv += dir * (hash12n(uv + params.jitter_seed) - 0.5);
+ }
+
+ for (int i = -params.blur_steps; i <= params.blur_steps; i++) {
+ if (i == 0) {
+ continue;
+ }
+ float radius = float(i) * blur_scale;
+ vec2 suv = uv + dir * radius;
+ radius = abs(radius);
+
+ vec4 sample_color = texture(color_texture, suv);
+ float limit;
+
+ if (sample_color.a < color.a) {
+ limit = abs(sample_color.a);
+ } else {
+ limit = abs(color.a);
+ }
+
+ limit -= DEPTH_GAP;
+
+ float m = smoothstep(radius - 0.5, radius + 0.5, limit);
+
+ accum += mix(color, sample_color, m);
+
+ total += 1.0;
+ }
+
+ return accum / total;
+}
+
+#endif
+
+void main() {
+ ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
+
+ if (any(greaterThan(pos, params.size))) { //too large, do nothing
+ return;
+ }
+
+ vec2 pixel_size = 1.0 / vec2(params.size);
+ vec2 uv = vec2(pos) / vec2(params.size);
+
+#ifdef MODE_GEN_BLUR_SIZE
+ uv += pixel_size * 0.5;
+ //precompute size in alpha channel
+ float depth = get_depth_at_pos(uv);
+ float size = get_blur_size(depth);
+
+ vec4 color = imageLoad(color_image, pos);
+ color.a = size;
+ imageStore(color_image, pos, color);
+#endif
+
+#ifdef MODE_BOKEH_BOX
+
+ //pixel_size*=0.5; //resolution is doubled
+ if (params.second_pass || !params.half_size) {
+ uv += pixel_size * 0.5; //half pixel to read centers
+ } else {
+ uv += pixel_size * 0.25; //half pixel to read centers from full res
+ }
+
+ vec2 dir = (params.second_pass ? vec2(0.0, 1.0) : vec2(1.0, 0.0));
+
+ vec4 color = weighted_filter_dir(dir, uv, pixel_size);
+
+ imageStore(bokeh_image, pos, color);
+
+#endif
+
+#ifdef MODE_BOKEH_HEXAGONAL
+
+ //pixel_size*=0.5; //resolution is doubled
+ if (params.second_pass || !params.half_size) {
+ uv += pixel_size * 0.5; //half pixel to read centers
+ } else {
+ uv += pixel_size * 0.25; //half pixel to read centers from full res
+ }
+
+ vec2 dir = (params.second_pass ? normalize(vec2(1.0, 0.577350269189626)) : vec2(0.0, 1.0));
+
+ vec4 color = weighted_filter_dir(dir, uv, pixel_size);
+
+ if (params.second_pass) {
+ dir = normalize(vec2(-1.0, 0.577350269189626));
+
+ vec4 color2 = weighted_filter_dir(dir, uv, pixel_size);
+
+ color.rgb = min(color.rgb, color2.rgb);
+ color.a = (color.a + color2.a) * 0.5;
+ }
+
+ imageStore(bokeh_image, pos, color);
+
+#endif
+
+#ifdef MODE_BOKEH_CIRCULAR
+
+ if (params.half_size) {
+ pixel_size *= 0.5; //resolution is doubled
+ }
+
+ uv += pixel_size * 0.5; //half pixel to read centers
+
+ vec4 color = texture(color_texture, uv);
+ float accum = 1.0;
+ float radius = params.blur_scale;
+
+ for (float ang = 0.0; radius < params.blur_size; ang += GOLDEN_ANGLE) {
+ vec2 suv = uv + vec2(cos(ang), sin(ang)) * pixel_size * radius;
+ vec4 sample_color = texture(color_texture, suv);
+ float sample_size = abs(sample_color.a);
+ if (sample_color.a > color.a) {
+ sample_size = clamp(sample_size, 0.0, abs(color.a) * 2.0);
+ }
+
+ float m = smoothstep(radius - 0.5, radius + 0.5, sample_size);
+ color += mix(color / accum, sample_color, m);
+ accum += 1.0;
+ radius += params.blur_scale / radius;
+ }
+
+ color /= accum;
+
+ imageStore(bokeh_image, pos, color);
+#endif
+
+#ifdef MODE_COMPOSITE_BOKEH
+
+ uv += pixel_size * 0.5;
+ vec4 color = imageLoad(color_image, pos);
+ vec4 bokeh = texture(source_bokeh, uv);
+
+ float mix_amount;
+ if (bokeh.a < color.a) {
+ mix_amount = min(1.0, max(0.0, max(abs(color.a), abs(bokeh.a)) - DEPTH_GAP));
+ } else {
+ mix_amount = min(1.0, max(0.0, abs(color.a) - DEPTH_GAP));
+ }
+
+ color.rgb = mix(color.rgb, bokeh.rgb, mix_amount); //blend between hires and lowres
+
+ color.a = 0; //reset alpha
+ imageStore(color_image, pos, color);
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/bokeh_dof_inc.glsl b/servers/rendering/renderer_rd/shaders/effects/bokeh_dof_inc.glsl
new file mode 100644
index 0000000000..b90a527554
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/bokeh_dof_inc.glsl
@@ -0,0 +1,37 @@
+layout(push_constant, std430) uniform Params {
+ ivec2 size;
+ float z_far;
+ float z_near;
+
+ bool orthogonal;
+ float blur_size;
+ float blur_scale;
+ int blur_steps;
+
+ bool blur_near_active;
+ float blur_near_begin;
+ float blur_near_end;
+ bool blur_far_active;
+
+ float blur_far_begin;
+ float blur_far_end;
+ bool second_pass;
+ bool half_size;
+
+ bool use_jitter;
+ float jitter_seed;
+ uint pad[2];
+}
+params;
+
+//used to work around downsampling filter
+#define DEPTH_GAP 0.0
+
+const float GOLDEN_ANGLE = 2.39996323;
+
+//note: uniform pdf rand [0;1[
+float hash12n(vec2 p) {
+ p = fract(p * vec2(5.3987, 5.4421));
+ p += dot(p.yx, p.xy + vec2(21.5351, 14.3137));
+ return fract(p.x * p.y * 95.4307);
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/bokeh_dof_raster.glsl b/servers/rendering/renderer_rd/shaders/effects/bokeh_dof_raster.glsl
new file mode 100644
index 0000000000..a3b3938ee9
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/bokeh_dof_raster.glsl
@@ -0,0 +1,253 @@
+/* clang-format off */
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "bokeh_dof_inc.glsl"
+
+layout(location = 0) out vec2 uv_interp;
+/* clang-format on */
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp = base_arr[gl_VertexIndex];
+
+ gl_Position = vec4(uv_interp * 2.0 - 1.0, 0.0, 1.0);
+}
+
+/* clang-format off */
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "bokeh_dof_inc.glsl"
+
+layout(location = 0) in vec2 uv_interp;
+/* clang-format on */
+
+#ifdef MODE_GEN_BLUR_SIZE
+layout(location = 0) out float weight;
+
+layout(set = 0, binding = 0) uniform sampler2D source_depth;
+#else
+layout(location = 0) out vec4 frag_color;
+#ifdef OUTPUT_WEIGHT
+layout(location = 1) out float weight;
+#endif
+
+layout(set = 0, binding = 0) uniform sampler2D source_color;
+layout(set = 1, binding = 0) uniform sampler2D source_weight;
+#ifdef MODE_COMPOSITE_BOKEH
+layout(set = 2, binding = 0) uniform sampler2D original_weight;
+#endif
+#endif
+
+//DOF
+// Bokeh single pass implementation based on https://tuxedolabs.blogspot.com/2018/05/bokeh-depth-of-field-in-single-pass.html
+
+#ifdef MODE_GEN_BLUR_SIZE
+
+float get_depth_at_pos(vec2 uv) {
+ float depth = textureLod(source_depth, uv, 0.0).x;
+ if (params.orthogonal) {
+ depth = ((depth + (params.z_far + params.z_near) / (params.z_far - params.z_near)) * (params.z_far - params.z_near)) / 2.0;
+ } else {
+ depth = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near - depth * (params.z_far - params.z_near));
+ }
+ return depth;
+}
+
+float get_blur_size(float depth) {
+ if (params.blur_near_active && depth < params.blur_near_begin) {
+ return -(1.0 - smoothstep(params.blur_near_end, params.blur_near_begin, depth)) * params.blur_size - DEPTH_GAP; //near blur is negative
+ }
+
+ if (params.blur_far_active && depth > params.blur_far_begin) {
+ return smoothstep(params.blur_far_begin, params.blur_far_end, depth) * params.blur_size + DEPTH_GAP;
+ }
+
+ return 0.0;
+}
+
+#endif
+
+#if defined(MODE_BOKEH_BOX) || defined(MODE_BOKEH_HEXAGONAL)
+
+vec4 weighted_filter_dir(vec2 dir, vec2 uv, vec2 pixel_size) {
+ dir *= pixel_size;
+ vec4 color = texture(source_color, uv);
+ color.a = texture(source_weight, uv).r;
+
+ vec4 accum = color;
+ float total = 1.0;
+
+ float blur_scale = params.blur_size / float(params.blur_steps);
+
+ if (params.use_jitter) {
+ uv += dir * (hash12n(uv + params.jitter_seed) - 0.5);
+ }
+
+ for (int i = -params.blur_steps; i <= params.blur_steps; i++) {
+ if (i == 0) {
+ continue;
+ }
+ float radius = float(i) * blur_scale;
+ vec2 suv = uv + dir * radius;
+ radius = abs(radius);
+
+ vec4 sample_color = texture(source_color, suv);
+ sample_color.a = texture(source_weight, suv).r;
+ float limit;
+
+ if (sample_color.a < color.a) {
+ limit = abs(sample_color.a);
+ } else {
+ limit = abs(color.a);
+ }
+
+ limit -= DEPTH_GAP;
+
+ float m = smoothstep(radius - 0.5, radius + 0.5, limit);
+
+ accum += mix(color, sample_color, m);
+
+ total += 1.0;
+ }
+
+ return accum / total;
+}
+
+#endif
+
+void main() {
+ vec2 pixel_size = 1.0 / vec2(params.size);
+ vec2 uv = uv_interp;
+
+#ifdef MODE_GEN_BLUR_SIZE
+ uv += pixel_size * 0.5;
+ float center_depth = get_depth_at_pos(uv);
+ weight = get_blur_size(center_depth);
+#endif
+
+#ifdef MODE_BOKEH_BOX
+ //pixel_size*=0.5; //resolution is doubled
+ if (params.second_pass || !params.half_size) {
+ uv += pixel_size * 0.5; //half pixel to read centers
+ } else {
+ uv += pixel_size * 0.25; //half pixel to read centers from full res
+ }
+
+ float alpha = texture(source_color, uv).a; // retain this
+ vec2 dir = (params.second_pass ? vec2(0.0, 1.0) : vec2(1.0, 0.0));
+
+ vec4 color = weighted_filter_dir(dir, uv, pixel_size);
+
+ frag_color = color;
+ frag_color.a = alpha; // attempt to retain this in case we have a transparent background, ignored if half_size
+#ifdef OUTPUT_WEIGHT
+ weight = color.a;
+#endif
+
+#endif
+
+#ifdef MODE_BOKEH_HEXAGONAL
+
+ //pixel_size*=0.5; //resolution is doubled
+ if (params.second_pass || !params.half_size) {
+ uv += pixel_size * 0.5; //half pixel to read centers
+ } else {
+ uv += pixel_size * 0.25; //half pixel to read centers from full res
+ }
+
+ float alpha = texture(source_color, uv).a; // retain this
+
+ vec2 dir = (params.second_pass ? normalize(vec2(1.0, 0.577350269189626)) : vec2(0.0, 1.0));
+
+ vec4 color = weighted_filter_dir(dir, uv, pixel_size);
+
+ if (params.second_pass) {
+ dir = normalize(vec2(-1.0, 0.577350269189626));
+
+ vec4 color2 = weighted_filter_dir(dir, uv, pixel_size);
+
+ color.rgb = min(color.rgb, color2.rgb);
+ color.a = (color.a + color2.a) * 0.5;
+ }
+
+ frag_color = color;
+ frag_color.a = alpha; // attempt to retain this in case we have a transparent background, ignored if half_size
+#ifdef OUTPUT_WEIGHT
+ weight = color.a;
+#endif
+
+#endif
+
+#ifdef MODE_BOKEH_CIRCULAR
+ if (params.half_size) {
+ pixel_size *= 0.5; //resolution is doubled
+ }
+
+ uv += pixel_size * 0.5; //half pixel to read centers
+
+ vec4 color = texture(source_color, uv);
+ float alpha = color.a; // retain this
+ color.a = texture(source_weight, uv).r;
+
+ vec4 color_accum = color;
+ float accum = 1.0;
+
+ float radius = params.blur_scale;
+ for (float ang = 0.0; radius < params.blur_size; ang += GOLDEN_ANGLE) {
+ vec2 uv_adj = uv + vec2(cos(ang), sin(ang)) * pixel_size * radius;
+
+ vec4 sample_color = texture(source_color, uv_adj);
+ sample_color.a = texture(source_weight, uv_adj).r;
+
+ float limit;
+
+ if (sample_color.a < color.a) {
+ limit = abs(sample_color.a);
+ } else {
+ limit = abs(color.a);
+ }
+
+ limit -= DEPTH_GAP;
+
+ float m = smoothstep(radius - 0.5, radius + 0.5, limit);
+ color_accum += mix(color_accum / accum, sample_color, m);
+ accum += 1.0;
+
+ radius += params.blur_scale / radius;
+ }
+
+ color_accum = color_accum / accum;
+
+ frag_color.rgb = color_accum.rgb;
+ frag_color.a = alpha; // attempt to retain this in case we have a transparent background, ignored if half_size
+#ifdef OUTPUT_WEIGHT
+ weight = color_accum.a;
+#endif
+
+#endif
+
+#ifdef MODE_COMPOSITE_BOKEH
+ frag_color.rgb = texture(source_color, uv).rgb;
+
+ float center_weigth = texture(source_weight, uv).r;
+ float sample_weight = texture(original_weight, uv).r;
+
+ float mix_amount;
+ if (sample_weight < center_weigth) {
+ mix_amount = min(1.0, max(0.0, max(abs(center_weigth), abs(sample_weight)) - DEPTH_GAP));
+ } else {
+ mix_amount = min(1.0, max(0.0, abs(center_weigth) - DEPTH_GAP));
+ }
+
+ // let alpha blending take care of mixing
+ frag_color.a = mix_amount;
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/copy.glsl b/servers/rendering/renderer_rd/shaders/effects/copy.glsl
new file mode 100644
index 0000000000..3a4ef86ef0
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/copy.glsl
@@ -0,0 +1,284 @@
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+#define FLAG_HORIZONTAL (1 << 0)
+#define FLAG_USE_BLUR_SECTION (1 << 1)
+#define FLAG_USE_ORTHOGONAL_PROJECTION (1 << 2)
+#define FLAG_DOF_NEAR_FIRST_TAP (1 << 3)
+#define FLAG_GLOW_FIRST_PASS (1 << 4)
+#define FLAG_FLIP_Y (1 << 5)
+#define FLAG_FORCE_LUMINANCE (1 << 6)
+#define FLAG_COPY_ALL_SOURCE (1 << 7)
+#define FLAG_HIGH_QUALITY_GLOW (1 << 8)
+#define FLAG_ALPHA_TO_ONE (1 << 9)
+
+layout(push_constant, std430) uniform Params {
+ ivec4 section;
+ ivec2 target;
+ uint flags;
+ uint pad;
+ // Glow.
+ float glow_strength;
+ float glow_bloom;
+ float glow_hdr_threshold;
+ float glow_hdr_scale;
+
+ float glow_exposure;
+ float glow_white;
+ float glow_luminance_cap;
+ float glow_auto_exposure_grey;
+ // DOF.
+ float camera_z_far;
+ float camera_z_near;
+ uint pad2[2];
+
+ vec4 set_color;
+}
+params;
+
+#ifdef MODE_CUBEMAP_ARRAY_TO_PANORAMA
+layout(set = 0, binding = 0) uniform samplerCubeArray source_color;
+#elif defined(MODE_CUBEMAP_TO_PANORAMA)
+layout(set = 0, binding = 0) uniform samplerCube source_color;
+#elif !defined(MODE_SET_COLOR)
+layout(set = 0, binding = 0) uniform sampler2D source_color;
+#endif
+
+#ifdef GLOW_USE_AUTO_EXPOSURE
+layout(set = 1, binding = 0) uniform sampler2D source_auto_exposure;
+#endif
+
+#if defined(MODE_LINEARIZE_DEPTH_COPY) || defined(MODE_SIMPLE_COPY_DEPTH)
+layout(r32f, set = 3, binding = 0) uniform restrict writeonly image2D dest_buffer;
+#elif defined(DST_IMAGE_8BIT)
+layout(rgba8, set = 3, binding = 0) uniform restrict writeonly image2D dest_buffer;
+#else
+layout(rgba32f, set = 3, binding = 0) uniform restrict writeonly image2D dest_buffer;
+#endif
+
+#ifdef MODE_GAUSSIAN_BLUR
+shared vec4 local_cache[256];
+shared vec4 temp_cache[128];
+#endif
+
+void main() {
+ // Pixel being shaded
+ ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
+
+#ifndef MODE_GAUSSIAN_BLUR // Gaussian blur needs the extra threads
+ if (any(greaterThanEqual(pos, params.section.zw))) { //too large, do nothing
+ return;
+ }
+#endif
+
+#ifdef MODE_MIPMAP
+
+ ivec2 base_pos = (pos + params.section.xy) << 1;
+ vec4 color = texelFetch(source_color, base_pos, 0);
+ color += texelFetch(source_color, base_pos + ivec2(0, 1), 0);
+ color += texelFetch(source_color, base_pos + ivec2(1, 0), 0);
+ color += texelFetch(source_color, base_pos + ivec2(1, 1), 0);
+ color /= 4.0;
+ color = mix(color, vec4(100.0, 100.0, 100.0, 1.0), isinf(color));
+ color = mix(color, vec4(100.0, 100.0, 100.0, 1.0), isnan(color));
+
+ imageStore(dest_buffer, pos + params.target, color);
+#endif
+
+#ifdef MODE_GAUSSIAN_BLUR
+
+ // First pass copy texture into 16x16 local memory for every 8x8 thread block
+ vec2 quad_center_uv = clamp(vec2(gl_GlobalInvocationID.xy + gl_LocalInvocationID.xy - 3.5) / params.section.zw, vec2(0.5 / params.section.zw), vec2(1.0 - 1.5 / params.section.zw));
+ uint dest_index = gl_LocalInvocationID.x * 2 + gl_LocalInvocationID.y * 2 * 16;
+
+#ifdef MODE_GLOW
+ if (bool(params.flags & FLAG_HIGH_QUALITY_GLOW)) {
+ vec2 quad_offset_uv = clamp((vec2(gl_GlobalInvocationID.xy + gl_LocalInvocationID.xy - 3.0)) / params.section.zw, vec2(0.5 / params.section.zw), vec2(1.0 - 1.5 / params.section.zw));
+
+ local_cache[dest_index] = (textureLod(source_color, quad_center_uv, 0) + textureLod(source_color, quad_offset_uv, 0)) * 0.5;
+ local_cache[dest_index + 1] = (textureLod(source_color, quad_center_uv + vec2(1.0 / params.section.z, 0.0), 0) + textureLod(source_color, quad_offset_uv + vec2(1.0 / params.section.z, 0.0), 0)) * 0.5;
+ local_cache[dest_index + 16] = (textureLod(source_color, quad_center_uv + vec2(0.0, 1.0 / params.section.w), 0) + textureLod(source_color, quad_offset_uv + vec2(0.0, 1.0 / params.section.w), 0)) * 0.5;
+ local_cache[dest_index + 16 + 1] = (textureLod(source_color, quad_center_uv + vec2(1.0 / params.section.zw), 0) + textureLod(source_color, quad_offset_uv + vec2(1.0 / params.section.zw), 0)) * 0.5;
+ } else
+#endif
+ {
+ local_cache[dest_index] = textureLod(source_color, quad_center_uv, 0);
+ local_cache[dest_index + 1] = textureLod(source_color, quad_center_uv + vec2(1.0 / params.section.z, 0.0), 0);
+ local_cache[dest_index + 16] = textureLod(source_color, quad_center_uv + vec2(0.0, 1.0 / params.section.w), 0);
+ local_cache[dest_index + 16 + 1] = textureLod(source_color, quad_center_uv + vec2(1.0 / params.section.zw), 0);
+ }
+#ifdef MODE_GLOW
+ if (bool(params.flags & FLAG_GLOW_FIRST_PASS)) {
+ // Tonemap initial samples to reduce weight of fireflies: https://graphicrants.blogspot.com/2013/12/tone-mapping.html
+ local_cache[dest_index] /= 1.0 + dot(local_cache[dest_index].rgb, vec3(0.299, 0.587, 0.114));
+ local_cache[dest_index + 1] /= 1.0 + dot(local_cache[dest_index + 1].rgb, vec3(0.299, 0.587, 0.114));
+ local_cache[dest_index + 16] /= 1.0 + dot(local_cache[dest_index + 16].rgb, vec3(0.299, 0.587, 0.114));
+ local_cache[dest_index + 16 + 1] /= 1.0 + dot(local_cache[dest_index + 16 + 1].rgb, vec3(0.299, 0.587, 0.114));
+ }
+ const float kernel[4] = { 0.174938, 0.165569, 0.140367, 0.106595 };
+#else
+ // Simpler blur uses SIGMA2 for the gaussian kernel for a stronger effect.
+ const float kernel[4] = { 0.214607, 0.189879, 0.131514, 0.071303 };
+#endif
+ memoryBarrierShared();
+ barrier();
+
+ // Horizontal pass. Needs to copy into 8x16 chunk of local memory so vertical pass has full resolution
+ uint read_index = gl_LocalInvocationID.x + gl_LocalInvocationID.y * 32 + 4;
+ vec4 color_top = vec4(0.0);
+ color_top += local_cache[read_index] * kernel[0];
+ color_top += local_cache[read_index + 1] * kernel[1];
+ color_top += local_cache[read_index + 2] * kernel[2];
+ color_top += local_cache[read_index + 3] * kernel[3];
+ color_top += local_cache[read_index - 1] * kernel[1];
+ color_top += local_cache[read_index - 2] * kernel[2];
+ color_top += local_cache[read_index - 3] * kernel[3];
+
+ vec4 color_bottom = vec4(0.0);
+ color_bottom += local_cache[read_index + 16] * kernel[0];
+ color_bottom += local_cache[read_index + 1 + 16] * kernel[1];
+ color_bottom += local_cache[read_index + 2 + 16] * kernel[2];
+ color_bottom += local_cache[read_index + 3 + 16] * kernel[3];
+ color_bottom += local_cache[read_index - 1 + 16] * kernel[1];
+ color_bottom += local_cache[read_index - 2 + 16] * kernel[2];
+ color_bottom += local_cache[read_index - 3 + 16] * kernel[3];
+
+ // rotate samples to take advantage of cache coherency
+ uint write_index = gl_LocalInvocationID.y * 2 + gl_LocalInvocationID.x * 16;
+
+ temp_cache[write_index] = color_top;
+ temp_cache[write_index + 1] = color_bottom;
+
+ memoryBarrierShared();
+ barrier();
+
+ // If destination outside of texture, can stop doing work now
+ if (any(greaterThanEqual(pos, params.section.zw))) {
+ return;
+ }
+
+ // Vertical pass
+ uint index = gl_LocalInvocationID.y + gl_LocalInvocationID.x * 16 + 4;
+ vec4 color = vec4(0.0);
+
+ color += temp_cache[index] * kernel[0];
+ color += temp_cache[index + 1] * kernel[1];
+ color += temp_cache[index + 2] * kernel[2];
+ color += temp_cache[index + 3] * kernel[3];
+ color += temp_cache[index - 1] * kernel[1];
+ color += temp_cache[index - 2] * kernel[2];
+ color += temp_cache[index - 3] * kernel[3];
+
+#ifdef MODE_GLOW
+ if (bool(params.flags & FLAG_GLOW_FIRST_PASS)) {
+ // Undo tonemap to restore range: https://graphicrants.blogspot.com/2013/12/tone-mapping.html
+ color /= 1.0 - dot(color.rgb, vec3(0.299, 0.587, 0.114));
+ }
+
+ color *= params.glow_strength;
+
+ if (bool(params.flags & FLAG_GLOW_FIRST_PASS)) {
+#ifdef GLOW_USE_AUTO_EXPOSURE
+
+ color /= texelFetch(source_auto_exposure, ivec2(0, 0), 0).r / params.glow_auto_exposure_grey;
+#endif
+ color *= params.glow_exposure;
+
+ float luminance = max(color.r, max(color.g, color.b));
+ float feedback = max(smoothstep(params.glow_hdr_threshold, params.glow_hdr_threshold + params.glow_hdr_scale, luminance), params.glow_bloom);
+
+ color = min(color * feedback, vec4(params.glow_luminance_cap));
+ }
+#endif
+ imageStore(dest_buffer, pos + params.target, color);
+
+#endif
+
+#ifdef MODE_SIMPLE_COPY
+
+ vec4 color;
+ if (bool(params.flags & FLAG_COPY_ALL_SOURCE)) {
+ vec2 uv = vec2(pos) / vec2(params.section.zw);
+ if (bool(params.flags & FLAG_FLIP_Y)) {
+ uv.y = 1.0 - uv.y;
+ }
+ color = textureLod(source_color, uv, 0.0);
+
+ } else {
+ color = texelFetch(source_color, pos + params.section.xy, 0);
+
+ if (bool(params.flags & FLAG_FLIP_Y)) {
+ pos.y = params.section.w - pos.y - 1;
+ }
+ }
+
+ if (bool(params.flags & FLAG_FORCE_LUMINANCE)) {
+ color.rgb = vec3(max(max(color.r, color.g), color.b));
+ }
+
+ if (bool(params.flags & FLAG_ALPHA_TO_ONE)) {
+ color.a = 1.0;
+ }
+
+ imageStore(dest_buffer, pos + params.target, color);
+
+#endif
+
+#ifdef MODE_SIMPLE_COPY_DEPTH
+
+ vec4 color = texelFetch(source_color, pos + params.section.xy, 0);
+
+ if (bool(params.flags & FLAG_FLIP_Y)) {
+ pos.y = params.section.w - pos.y - 1;
+ }
+
+ imageStore(dest_buffer, pos + params.target, vec4(color.r));
+
+#endif
+
+#ifdef MODE_LINEARIZE_DEPTH_COPY
+
+ float depth = texelFetch(source_color, pos + params.section.xy, 0).r;
+ depth = depth * 2.0 - 1.0;
+ depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - depth * (params.camera_z_far - params.camera_z_near));
+ vec4 color = vec4(depth / params.camera_z_far);
+
+ if (bool(params.flags & FLAG_FLIP_Y)) {
+ pos.y = params.section.w - pos.y - 1;
+ }
+
+ imageStore(dest_buffer, pos + params.target, color);
+#endif
+
+#if defined(MODE_CUBEMAP_TO_PANORAMA) || defined(MODE_CUBEMAP_ARRAY_TO_PANORAMA)
+
+ const float PI = 3.14159265359;
+ vec2 uv = vec2(pos) / vec2(params.section.zw);
+ if (bool(params.flags & FLAG_FLIP_Y)) {
+ uv.y = 1.0 - uv.y;
+ }
+ float phi = uv.x * 2.0 * PI;
+ float theta = uv.y * PI;
+
+ vec3 normal;
+ normal.x = sin(phi) * sin(theta) * -1.0;
+ normal.y = cos(theta);
+ normal.z = cos(phi) * sin(theta) * -1.0;
+
+#ifdef MODE_CUBEMAP_TO_PANORAMA
+ vec4 color = textureLod(source_color, normal, params.camera_z_far); //the biggest the lod the least the acne
+#else
+ vec4 color = textureLod(source_color, vec4(normal, params.camera_z_far), 0.0); //the biggest the lod the least the acne
+#endif
+ imageStore(dest_buffer, pos + params.target, color);
+#endif
+
+#ifdef MODE_SET_COLOR
+ imageStore(dest_buffer, pos + params.target, params.set_color);
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/copy_to_fb.glsl b/servers/rendering/renderer_rd/shaders/effects/copy_to_fb.glsl
new file mode 100644
index 0000000000..1c17eabb56
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/copy_to_fb.glsl
@@ -0,0 +1,169 @@
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#ifdef MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#extension GL_EXT_multiview : enable
+#define ViewIndex gl_ViewIndex
+#else // has_VK_KHR_multiview
+#define ViewIndex 0
+#endif // has_VK_KHR_multiview
+#endif //MULTIVIEW
+
+#ifdef MULTIVIEW
+layout(location = 0) out vec3 uv_interp;
+#else
+layout(location = 0) out vec2 uv_interp;
+#endif
+
+layout(push_constant, std430) uniform Params {
+ vec4 section;
+ vec2 pixel_size;
+ bool flip_y;
+ bool use_section;
+
+ bool force_luminance;
+ uint pad[3];
+}
+params;
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp.xy = base_arr[gl_VertexIndex];
+#ifdef MULTIVIEW
+ uv_interp.z = ViewIndex;
+#endif
+ vec2 vpos = uv_interp.xy;
+ if (params.use_section) {
+ vpos = params.section.xy + vpos * params.section.zw;
+ }
+
+ gl_Position = vec4(vpos * 2.0 - 1.0, 0.0, 1.0);
+
+ if (params.flip_y) {
+ uv_interp.y = 1.0 - uv_interp.y;
+ }
+}
+
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#ifdef MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#extension GL_EXT_multiview : enable
+#define ViewIndex gl_ViewIndex
+#else // has_VK_KHR_multiview
+#define ViewIndex 0
+#endif // has_VK_KHR_multiview
+#endif //MULTIVIEW
+
+layout(push_constant, std430) uniform Params {
+ vec4 section;
+ vec2 pixel_size;
+ bool flip_y;
+ bool use_section;
+
+ bool force_luminance;
+ bool alpha_to_zero;
+ bool srgb;
+ uint pad;
+}
+params;
+
+#ifdef MULTIVIEW
+layout(location = 0) in vec3 uv_interp;
+#else
+layout(location = 0) in vec2 uv_interp;
+#endif
+
+#ifdef MULTIVIEW
+layout(set = 0, binding = 0) uniform sampler2DArray source_color;
+#ifdef MODE_TWO_SOURCES
+layout(set = 1, binding = 0) uniform sampler2DArray source_depth;
+layout(location = 1) out float depth;
+#endif /* MODE_TWO_SOURCES */
+#else /* MULTIVIEW */
+layout(set = 0, binding = 0) uniform sampler2D source_color;
+#ifdef MODE_TWO_SOURCES
+layout(set = 1, binding = 0) uniform sampler2D source_color2;
+#endif /* MODE_TWO_SOURCES */
+#endif /* MULTIVIEW */
+
+layout(location = 0) out vec4 frag_color;
+
+vec3 linear_to_srgb(vec3 color) {
+ //if going to srgb, clamp from 0 to 1.
+ color = clamp(color, vec3(0.0), vec3(1.0));
+ const vec3 a = vec3(0.055f);
+ return mix((vec3(1.0f) + a) * pow(color.rgb, vec3(1.0f / 2.4f)) - a, 12.92f * color.rgb, lessThan(color.rgb, vec3(0.0031308f)));
+}
+
+void main() {
+#ifdef MULTIVIEW
+ vec3 uv = uv_interp;
+#else
+ vec2 uv = uv_interp;
+#endif
+
+#ifdef MODE_PANORAMA_TO_DP
+ // Note, multiview and panorama should not be mixed at this time
+
+ //obtain normal from dual paraboloid uv
+#define M_PI 3.14159265359
+
+ float side;
+ uv.y = modf(uv.y * 2.0, side);
+ side = side * 2.0 - 1.0;
+ vec3 normal = vec3(uv * 2.0 - 1.0, 0.0);
+ normal.z = 0.5 - 0.5 * ((normal.x * normal.x) + (normal.y * normal.y));
+ normal *= -side;
+ normal = normalize(normal);
+
+ //now convert normal to panorama uv
+
+ vec2 st = vec2(atan(normal.x, normal.z), acos(normal.y));
+
+ if (st.x < 0.0) {
+ st.x += M_PI * 2.0;
+ }
+
+ uv = st / vec2(M_PI * 2.0, M_PI);
+
+ if (side < 0.0) {
+ //uv.y = 1.0 - uv.y;
+ uv = 1.0 - uv;
+ }
+#endif /* MODE_PANORAMA_TO_DP */
+
+#ifdef MULTIVIEW
+ vec4 color = textureLod(source_color, uv, 0.0);
+#ifdef MODE_TWO_SOURCES
+ // In multiview our 2nd input will be our depth map
+ depth = textureLod(source_depth, uv, 0.0).r;
+#endif /* MODE_TWO_SOURCES */
+
+#else /* MULTIVIEW */
+ vec4 color = textureLod(source_color, uv, 0.0);
+#ifdef MODE_TWO_SOURCES
+ color += textureLod(source_color2, uv, 0.0);
+#endif /* MODE_TWO_SOURCES */
+#endif /* MULTIVIEW */
+
+ if (params.force_luminance) {
+ color.rgb = vec3(max(max(color.r, color.g), color.b));
+ }
+ if (params.alpha_to_zero) {
+ color.rgb *= color.a;
+ }
+ if (params.srgb) {
+ color.rgb = linear_to_srgb(color.rgb);
+ }
+
+ frag_color = color;
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cube_to_dp.glsl b/servers/rendering/renderer_rd/shaders/effects/cube_to_dp.glsl
new file mode 100644
index 0000000000..e77d0de719
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cube_to_dp.glsl
@@ -0,0 +1,84 @@
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(push_constant, std430) uniform Params {
+ float z_far;
+ float z_near;
+ vec2 texel_size;
+ vec4 screen_rect;
+}
+params;
+
+layout(location = 0) out vec2 uv_interp;
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp = base_arr[gl_VertexIndex];
+ vec2 screen_pos = uv_interp * params.screen_rect.zw + params.screen_rect.xy;
+ gl_Position = vec4(screen_pos * 2.0 - 1.0, 0.0, 1.0);
+}
+
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(location = 0) in vec2 uv_interp;
+
+layout(set = 0, binding = 0) uniform samplerCube source_cube;
+
+layout(push_constant, std430) uniform Params {
+ float z_far;
+ float z_near;
+ vec2 texel_size;
+ vec4 screen_rect;
+}
+params;
+
+void main() {
+ vec2 uv = uv_interp;
+ vec2 texel_size = abs(params.texel_size);
+
+ uv = clamp(uv * (1.0 + 2.0 * texel_size) - texel_size, vec2(0.0), vec2(1.0));
+
+ vec3 normal = vec3(uv * 2.0 - 1.0, 0.0);
+ normal.z = 0.5 * (1.0 - dot(normal.xy, normal.xy)); // z = 1/2 - 1/2 * (x^2 + y^2)
+ normal = normalize(normal);
+
+ normal.y = -normal.y; //needs to be flipped to match projection matrix
+ if (params.texel_size.x >= 0.0) { // Sign is used to encode Z flip
+ normal.z = -normal.z;
+ }
+
+ float depth = texture(source_cube, normal).r;
+
+ // absolute values for direction cosines, bigger value equals closer to basis axis
+ vec3 unorm = abs(normal);
+
+ if ((unorm.x >= unorm.y) && (unorm.x >= unorm.z)) {
+ // x code
+ unorm = normal.x > 0.0 ? vec3(1.0, 0.0, 0.0) : vec3(-1.0, 0.0, 0.0);
+ } else if ((unorm.y > unorm.x) && (unorm.y >= unorm.z)) {
+ // y code
+ unorm = normal.y > 0.0 ? vec3(0.0, 1.0, 0.0) : vec3(0.0, -1.0, 0.0);
+ } else if ((unorm.z > unorm.x) && (unorm.z > unorm.y)) {
+ // z code
+ unorm = normal.z > 0.0 ? vec3(0.0, 0.0, 1.0) : vec3(0.0, 0.0, -1.0);
+ } else {
+ // oh-no we messed up code
+ // has to be
+ unorm = vec3(1.0, 0.0, 0.0);
+ }
+
+ float depth_fix = 1.0 / dot(normal, unorm);
+
+ depth = 2.0 * depth - 1.0;
+ float linear_depth = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near - depth * (params.z_far - params.z_near));
+ depth = (linear_depth * depth_fix) / params.z_far;
+
+ gl_FragDepth = depth;
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler.glsl
new file mode 100644
index 0000000000..63f0ce690e
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler.glsl
@@ -0,0 +1,145 @@
+// Copyright 2016 Activision Publishing, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+#define BLOCK_SIZE 8
+
+layout(local_size_x = BLOCK_SIZE, local_size_y = BLOCK_SIZE, local_size_z = 1) in;
+
+layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
+
+layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly imageCube dest_cubemap;
+
+#include "cubemap_downsampler_inc.glsl"
+
+void main() {
+ uvec3 id = gl_GlobalInvocationID;
+ uint face_size = params.face_size;
+
+ if (id.x < face_size && id.y < face_size) {
+ float inv_face_size = 1.0 / float(face_size);
+
+ float u0 = (float(id.x) * 2.0 + 1.0 - 0.75) * inv_face_size - 1.0;
+ float u1 = (float(id.x) * 2.0 + 1.0 + 0.75) * inv_face_size - 1.0;
+
+ float v0 = (float(id.y) * 2.0 + 1.0 - 0.75) * -inv_face_size + 1.0;
+ float v1 = (float(id.y) * 2.0 + 1.0 + 0.75) * -inv_face_size + 1.0;
+
+ float weights[4];
+ weights[0] = calcWeight(u0, v0);
+ weights[1] = calcWeight(u1, v0);
+ weights[2] = calcWeight(u0, v1);
+ weights[3] = calcWeight(u1, v1);
+
+ const float wsum = 0.5 / (weights[0] + weights[1] + weights[2] + weights[3]);
+ for (int i = 0; i < 4; i++) {
+ weights[i] = weights[i] * wsum + .125;
+ }
+
+ vec3 dir;
+ vec4 color;
+ switch (id.z) {
+ case 0:
+ get_dir_0(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_0(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_0(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_0(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 1:
+ get_dir_1(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_1(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_1(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_1(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 2:
+ get_dir_2(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_2(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_2(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_2(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 3:
+ get_dir_3(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_3(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_3(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_3(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 4:
+ get_dir_4(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_4(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_4(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_4(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ default:
+ get_dir_5(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_5(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_5(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_5(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ }
+ imageStore(dest_cubemap, ivec3(id), color);
+ }
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_inc.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_inc.glsl
new file mode 100644
index 0000000000..641e0906f5
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_inc.glsl
@@ -0,0 +1,48 @@
+layout(push_constant, std430) uniform Params {
+ uint face_size;
+ uint face_id; // only used in raster shader
+}
+params;
+
+#define M_PI 3.14159265359
+
+void get_dir_0(out vec3 dir, in float u, in float v) {
+ dir[0] = 1.0;
+ dir[1] = v;
+ dir[2] = -u;
+}
+
+void get_dir_1(out vec3 dir, in float u, in float v) {
+ dir[0] = -1.0;
+ dir[1] = v;
+ dir[2] = u;
+}
+
+void get_dir_2(out vec3 dir, in float u, in float v) {
+ dir[0] = u;
+ dir[1] = 1.0;
+ dir[2] = -v;
+}
+
+void get_dir_3(out vec3 dir, in float u, in float v) {
+ dir[0] = u;
+ dir[1] = -1.0;
+ dir[2] = v;
+}
+
+void get_dir_4(out vec3 dir, in float u, in float v) {
+ dir[0] = u;
+ dir[1] = v;
+ dir[2] = 1.0;
+}
+
+void get_dir_5(out vec3 dir, in float u, in float v) {
+ dir[0] = -u;
+ dir[1] = v;
+ dir[2] = -1.0;
+}
+
+float calcWeight(float u, float v) {
+ float val = u * u + v * v + 1.0;
+ return val * sqrt(val);
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_raster.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_raster.glsl
new file mode 100644
index 0000000000..0828ffd921
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_downsampler_raster.glsl
@@ -0,0 +1,163 @@
+// Copyright 2016 Activision Publishing, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+/* clang-format off */
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "cubemap_downsampler_inc.glsl"
+
+layout(location = 0) out vec2 uv_interp;
+/* clang-format on */
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp = base_arr[gl_VertexIndex] * float(params.face_size);
+ gl_Position = vec4(base_arr[gl_VertexIndex] * 2.0 - 1.0, 0.0, 1.0);
+}
+
+/* clang-format off */
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "cubemap_downsampler_inc.glsl"
+
+layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
+
+layout(location = 0) in vec2 uv_interp;
+layout(location = 0) out vec4 frag_color;
+/* clang-format on */
+
+void main() {
+ // Converted from compute shader which uses absolute coordinates.
+ // Could possibly simplify this
+ float face_size = float(params.face_size);
+
+ if (uv_interp.x < face_size && uv_interp.y < face_size) {
+ float inv_face_size = 1.0 / face_size;
+
+ float u0 = (uv_interp.x * 2.0 + 1.0 - 0.75) * inv_face_size - 1.0;
+ float u1 = (uv_interp.x * 2.0 + 1.0 + 0.75) * inv_face_size - 1.0;
+
+ float v0 = (uv_interp.y * 2.0 + 1.0 - 0.75) * -inv_face_size + 1.0;
+ float v1 = (uv_interp.y * 2.0 + 1.0 + 0.75) * -inv_face_size + 1.0;
+
+ float weights[4];
+ weights[0] = calcWeight(u0, v0);
+ weights[1] = calcWeight(u1, v0);
+ weights[2] = calcWeight(u0, v1);
+ weights[3] = calcWeight(u1, v1);
+
+ const float wsum = 0.5 / (weights[0] + weights[1] + weights[2] + weights[3]);
+ for (int i = 0; i < 4; i++) {
+ weights[i] = weights[i] * wsum + .125;
+ }
+
+ vec3 dir;
+ vec4 color;
+ switch (params.face_id) {
+ case 0:
+ get_dir_0(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_0(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_0(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_0(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 1:
+ get_dir_1(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_1(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_1(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_1(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 2:
+ get_dir_2(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_2(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_2(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_2(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 3:
+ get_dir_3(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_3(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_3(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_3(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ case 4:
+ get_dir_4(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_4(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_4(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_4(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ default:
+ get_dir_5(dir, u0, v0);
+ color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
+
+ get_dir_5(dir, u1, v0);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
+
+ get_dir_5(dir, u0, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
+
+ get_dir_5(dir, u1, v1);
+ color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
+ break;
+ }
+ frag_color = color;
+ }
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_filter.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_filter.glsl
new file mode 100644
index 0000000000..2a774b0eb4
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_filter.glsl
@@ -0,0 +1,326 @@
+// Copyright 2016 Activision Publishing, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+#define GROUP_SIZE 64
+
+layout(local_size_x = GROUP_SIZE, local_size_y = 1, local_size_z = 1) in;
+
+layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
+layout(rgba16f, set = 2, binding = 0) uniform restrict writeonly imageCube dest_cubemap0;
+layout(rgba16f, set = 2, binding = 1) uniform restrict writeonly imageCube dest_cubemap1;
+layout(rgba16f, set = 2, binding = 2) uniform restrict writeonly imageCube dest_cubemap2;
+layout(rgba16f, set = 2, binding = 3) uniform restrict writeonly imageCube dest_cubemap3;
+layout(rgba16f, set = 2, binding = 4) uniform restrict writeonly imageCube dest_cubemap4;
+layout(rgba16f, set = 2, binding = 5) uniform restrict writeonly imageCube dest_cubemap5;
+layout(rgba16f, set = 2, binding = 6) uniform restrict writeonly imageCube dest_cubemap6;
+
+#ifdef USE_HIGH_QUALITY
+#define NUM_TAPS 32
+#else
+#define NUM_TAPS 8
+#endif
+
+#define BASE_RESOLUTION 128
+
+#ifdef USE_HIGH_QUALITY
+layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
+ vec4[7][5][3][24] coeffs;
+}
+data;
+#else
+layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
+ vec4[7][5][6] coeffs;
+}
+data;
+#endif
+
+void get_dir(out vec3 dir, in vec2 uv, in uint face) {
+ switch (face) {
+ case 0:
+ dir = vec3(1.0, uv[1], -uv[0]);
+ break;
+ case 1:
+ dir = vec3(-1.0, uv[1], uv[0]);
+ break;
+ case 2:
+ dir = vec3(uv[0], 1.0, -uv[1]);
+ break;
+ case 3:
+ dir = vec3(uv[0], -1.0, uv[1]);
+ break;
+ case 4:
+ dir = vec3(uv[0], uv[1], 1.0);
+ break;
+ default:
+ dir = vec3(-uv[0], uv[1], -1.0);
+ break;
+ }
+}
+
+void main() {
+ // INPUT:
+ // id.x = the linear address of the texel (ignoring face)
+ // id.y = the face
+ // -> use to index output texture
+ // id.x = texel x
+ // id.y = texel y
+ // id.z = face
+ uvec3 id = gl_GlobalInvocationID;
+
+ // determine which texel this is
+#ifndef USE_TEXTURE_ARRAY
+ // NOTE (macOS/MoltenVK): Do not rename, "level" variable name conflicts with the Metal "level(float lod)" mipmap sampling function name.
+ int mip_level = 0;
+ if (id.x < (128 * 128)) {
+ mip_level = 0;
+ } else if (id.x < (128 * 128 + 64 * 64)) {
+ mip_level = 1;
+ id.x -= (128 * 128);
+ } else if (id.x < (128 * 128 + 64 * 64 + 32 * 32)) {
+ mip_level = 2;
+ id.x -= (128 * 128 + 64 * 64);
+ } else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16)) {
+ mip_level = 3;
+ id.x -= (128 * 128 + 64 * 64 + 32 * 32);
+ } else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8)) {
+ mip_level = 4;
+ id.x -= (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16);
+ } else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8 + 4 * 4)) {
+ mip_level = 5;
+ id.x -= (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8);
+ } else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8 + 4 * 4 + 2 * 2)) {
+ mip_level = 6;
+ id.x -= (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8 + 4 * 4);
+ } else {
+ return;
+ }
+ int res = BASE_RESOLUTION >> mip_level;
+#else // Using Texture Arrays so all levels are the same resolution
+ int res = BASE_RESOLUTION;
+ int mip_level = int(id.x / (BASE_RESOLUTION * BASE_RESOLUTION));
+ id.x -= mip_level * BASE_RESOLUTION * BASE_RESOLUTION;
+#endif
+
+ // determine dir / pos for the texel
+ vec3 dir, adir, frameZ;
+ {
+ id.z = id.y;
+ id.y = id.x / res;
+ id.x -= id.y * res;
+
+ vec2 uv;
+ uv.x = (float(id.x) * 2.0 + 1.0) / float(res) - 1.0;
+ uv.y = -(float(id.y) * 2.0 + 1.0) / float(res) + 1.0;
+
+ get_dir(dir, uv, id.z);
+ frameZ = normalize(dir);
+
+ adir = abs(dir);
+ }
+
+ // GGX gather colors
+ vec4 color = vec4(0.0);
+ for (int axis = 0; axis < 3; axis++) {
+ const int otherAxis0 = 1 - (axis & 1) - (axis >> 1);
+ const int otherAxis1 = 2 - (axis >> 1);
+
+ float frameweight = (max(adir[otherAxis0], adir[otherAxis1]) - .75) / .25;
+ if (frameweight > 0.0) {
+ // determine frame
+ vec3 UpVector;
+ switch (axis) {
+ case 0:
+ UpVector = vec3(1, 0, 0);
+ break;
+ case 1:
+ UpVector = vec3(0, 1, 0);
+ break;
+ default:
+ UpVector = vec3(0, 0, 1);
+ break;
+ }
+
+ vec3 frameX = normalize(cross(UpVector, frameZ));
+ vec3 frameY = cross(frameZ, frameX);
+
+ // calculate parametrization for polynomial
+ float Nx = dir[otherAxis0];
+ float Ny = dir[otherAxis1];
+ float Nz = adir[axis];
+
+ float NmaxXY = max(abs(Ny), abs(Nx));
+ Nx /= NmaxXY;
+ Ny /= NmaxXY;
+
+ float theta;
+ if (Ny < Nx) {
+ if (Ny <= -0.999)
+ theta = Nx;
+ else
+ theta = Ny;
+ } else {
+ if (Ny >= 0.999)
+ theta = -Nx;
+ else
+ theta = -Ny;
+ }
+
+ float phi;
+ if (Nz <= -0.999)
+ phi = -NmaxXY;
+ else if (Nz >= 0.999)
+ phi = NmaxXY;
+ else
+ phi = Nz;
+
+ float theta2 = theta * theta;
+ float phi2 = phi * phi;
+
+ // sample
+ for (int iSuperTap = 0; iSuperTap < NUM_TAPS / 4; iSuperTap++) {
+ const int index = (NUM_TAPS / 4) * axis + iSuperTap;
+
+#ifdef USE_HIGH_QUALITY
+ vec4 coeffsDir0[3];
+ vec4 coeffsDir1[3];
+ vec4 coeffsDir2[3];
+ vec4 coeffsLevel[3];
+ vec4 coeffsWeight[3];
+
+ for (int iCoeff = 0; iCoeff < 3; iCoeff++) {
+ coeffsDir0[iCoeff] = data.coeffs[mip_level][0][iCoeff][index];
+ coeffsDir1[iCoeff] = data.coeffs[mip_level][1][iCoeff][index];
+ coeffsDir2[iCoeff] = data.coeffs[mip_level][2][iCoeff][index];
+ coeffsLevel[iCoeff] = data.coeffs[mip_level][3][iCoeff][index];
+ coeffsWeight[iCoeff] = data.coeffs[mip_level][4][iCoeff][index];
+ }
+
+ for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
+ // determine sample attributes (dir, weight, mip_level)
+ vec3 sample_dir = frameX * (coeffsDir0[0][iSubTap] + coeffsDir0[1][iSubTap] * theta2 + coeffsDir0[2][iSubTap] * phi2) + frameY * (coeffsDir1[0][iSubTap] + coeffsDir1[1][iSubTap] * theta2 + coeffsDir1[2][iSubTap] * phi2) + frameZ * (coeffsDir2[0][iSubTap] + coeffsDir2[1][iSubTap] * theta2 + coeffsDir2[2][iSubTap] * phi2);
+
+ float sample_level = coeffsLevel[0][iSubTap] + coeffsLevel[1][iSubTap] * theta2 + coeffsLevel[2][iSubTap] * phi2;
+
+ float sample_weight = coeffsWeight[0][iSubTap] + coeffsWeight[1][iSubTap] * theta2 + coeffsWeight[2][iSubTap] * phi2;
+#else
+ vec4 coeffsDir0 = data.coeffs[mip_level][0][index];
+ vec4 coeffsDir1 = data.coeffs[mip_level][1][index];
+ vec4 coeffsDir2 = data.coeffs[mip_level][2][index];
+ vec4 coeffsLevel = data.coeffs[mip_level][3][index];
+ vec4 coeffsWeight = data.coeffs[mip_level][4][index];
+
+ for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
+ // determine sample attributes (dir, weight, mip_level)
+ vec3 sample_dir = frameX * coeffsDir0[iSubTap] + frameY * coeffsDir1[iSubTap] + frameZ * coeffsDir2[iSubTap];
+
+ float sample_level = coeffsLevel[iSubTap];
+
+ float sample_weight = coeffsWeight[iSubTap];
+#endif
+
+ sample_weight *= frameweight;
+
+ // adjust for jacobian
+ sample_dir /= max(abs(sample_dir[0]), max(abs(sample_dir[1]), abs(sample_dir[2])));
+ sample_level += 0.75 * log2(dot(sample_dir, sample_dir));
+#ifndef USE_TEXTURE_ARRAY
+ sample_level += float(mip_level) / 6.0; // Hack to increase the perceived roughness and reduce upscaling artifacts
+#endif
+ // sample cubemap
+ color.xyz += textureLod(source_cubemap, normalize(sample_dir), sample_level).xyz * sample_weight;
+ color.w += sample_weight;
+ }
+ }
+ }
+ }
+ color /= color.w;
+
+ // write color
+ color.xyz = max(vec3(0.0), color.xyz);
+ color.w = 1.0;
+#ifdef USE_TEXTURE_ARRAY
+ id.xy *= uvec2(2, 2);
+#endif
+
+ switch (mip_level) {
+ case 0:
+ imageStore(dest_cubemap0, ivec3(id), color);
+#ifdef USE_TEXTURE_ARRAY
+ imageStore(dest_cubemap0, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
+ imageStore(dest_cubemap0, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
+ imageStore(dest_cubemap0, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
+#endif
+ break;
+ case 1:
+ imageStore(dest_cubemap1, ivec3(id), color);
+#ifdef USE_TEXTURE_ARRAY
+ imageStore(dest_cubemap1, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
+ imageStore(dest_cubemap1, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
+ imageStore(dest_cubemap1, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
+#endif
+ break;
+ case 2:
+ imageStore(dest_cubemap2, ivec3(id), color);
+#ifdef USE_TEXTURE_ARRAY
+ imageStore(dest_cubemap2, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
+ imageStore(dest_cubemap2, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
+ imageStore(dest_cubemap2, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
+#endif
+ break;
+ case 3:
+ imageStore(dest_cubemap3, ivec3(id), color);
+#ifdef USE_TEXTURE_ARRAY
+ imageStore(dest_cubemap3, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
+ imageStore(dest_cubemap3, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
+ imageStore(dest_cubemap3, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
+#endif
+ break;
+ case 4:
+ imageStore(dest_cubemap4, ivec3(id), color);
+#ifdef USE_TEXTURE_ARRAY
+ imageStore(dest_cubemap4, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
+ imageStore(dest_cubemap4, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
+ imageStore(dest_cubemap4, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
+#endif
+ break;
+ case 5:
+ imageStore(dest_cubemap5, ivec3(id), color);
+#ifdef USE_TEXTURE_ARRAY
+ imageStore(dest_cubemap5, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
+ imageStore(dest_cubemap5, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
+ imageStore(dest_cubemap5, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
+#endif
+ break;
+ default:
+ imageStore(dest_cubemap6, ivec3(id), color);
+#ifdef USE_TEXTURE_ARRAY
+ imageStore(dest_cubemap6, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
+ imageStore(dest_cubemap6, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
+ imageStore(dest_cubemap6, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
+#endif
+ break;
+ }
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_filter_raster.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_filter_raster.glsl
new file mode 100644
index 0000000000..0990dc7c2f
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_filter_raster.glsl
@@ -0,0 +1,256 @@
+// Copyright 2016 Activision Publishing, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+/* clang-format off */
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(push_constant, std430) uniform Params {
+ int mip_level;
+ uint face_id;
+}
+params;
+
+layout(location = 0) out vec2 uv_interp;
+/* clang-format on */
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp = base_arr[gl_VertexIndex];
+ gl_Position = vec4(base_arr[gl_VertexIndex] * 2.0 - 1.0, 0.0, 1.0);
+}
+
+/* clang-format off */
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(push_constant, std430) uniform Params {
+ int mip_level;
+ uint face_id;
+}
+params;
+
+layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
+
+layout(location = 0) in vec2 uv_interp;
+layout(location = 0) out vec4 frag_color;
+
+/* clang-format on */
+
+#ifdef USE_HIGH_QUALITY
+#define NUM_TAPS 32
+#else
+#define NUM_TAPS 8
+#endif
+
+#define BASE_RESOLUTION 128
+
+#ifdef USE_HIGH_QUALITY
+layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
+ vec4[7][5][3][24] coeffs;
+}
+data;
+#else
+layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
+ vec4[7][5][6] coeffs;
+}
+data;
+#endif
+
+void get_dir(out vec3 dir, in vec2 uv, in uint face) {
+ switch (face) {
+ case 0:
+ dir = vec3(1.0, uv[1], -uv[0]);
+ break;
+ case 1:
+ dir = vec3(-1.0, uv[1], uv[0]);
+ break;
+ case 2:
+ dir = vec3(uv[0], 1.0, -uv[1]);
+ break;
+ case 3:
+ dir = vec3(uv[0], -1.0, uv[1]);
+ break;
+ case 4:
+ dir = vec3(uv[0], uv[1], 1.0);
+ break;
+ default:
+ dir = vec3(-uv[0], uv[1], -1.0);
+ break;
+ }
+}
+
+void main() {
+ // determine dir / pos for the texel
+ vec3 dir, adir, frameZ;
+ {
+ vec2 uv;
+ uv.x = uv_interp.x;
+ uv.y = 1.0 - uv_interp.y;
+ uv = uv * 2.0 - 1.0;
+
+ get_dir(dir, uv, params.face_id);
+ frameZ = normalize(dir);
+
+ adir = abs(dir);
+ }
+
+ // determine which texel this is
+ // NOTE (macOS/MoltenVK): Do not rename, "level" variable name conflicts with the Metal "level(float lod)" mipmap sampling function name.
+ int mip_level = 0;
+
+ if (params.mip_level < 0) {
+ // return as is
+ frag_color.rgb = textureLod(source_cubemap, frameZ, 0.0).rgb;
+ frag_color.a = 1.0;
+ return;
+ } else if (params.mip_level > 6) {
+ // maximum level
+ mip_level = 6;
+ } else {
+ mip_level = params.mip_level;
+ }
+
+ // GGX gather colors
+ vec4 color = vec4(0.0);
+ for (int axis = 0; axis < 3; axis++) {
+ const int otherAxis0 = 1 - (axis & 1) - (axis >> 1);
+ const int otherAxis1 = 2 - (axis >> 1);
+
+ float frameweight = (max(adir[otherAxis0], adir[otherAxis1]) - .75) / .25;
+ if (frameweight > 0.0) {
+ // determine frame
+ vec3 UpVector;
+ switch (axis) {
+ case 0:
+ UpVector = vec3(1, 0, 0);
+ break;
+ case 1:
+ UpVector = vec3(0, 1, 0);
+ break;
+ default:
+ UpVector = vec3(0, 0, 1);
+ break;
+ }
+
+ vec3 frameX = normalize(cross(UpVector, frameZ));
+ vec3 frameY = cross(frameZ, frameX);
+
+ // calculate parametrization for polynomial
+ float Nx = dir[otherAxis0];
+ float Ny = dir[otherAxis1];
+ float Nz = adir[axis];
+
+ float NmaxXY = max(abs(Ny), abs(Nx));
+ Nx /= NmaxXY;
+ Ny /= NmaxXY;
+
+ float theta;
+ if (Ny < Nx) {
+ if (Ny <= -0.999)
+ theta = Nx;
+ else
+ theta = Ny;
+ } else {
+ if (Ny >= 0.999)
+ theta = -Nx;
+ else
+ theta = -Ny;
+ }
+
+ float phi;
+ if (Nz <= -0.999)
+ phi = -NmaxXY;
+ else if (Nz >= 0.999)
+ phi = NmaxXY;
+ else
+ phi = Nz;
+
+ float theta2 = theta * theta;
+ float phi2 = phi * phi;
+
+ // sample
+ for (int iSuperTap = 0; iSuperTap < NUM_TAPS / 4; iSuperTap++) {
+ const int index = (NUM_TAPS / 4) * axis + iSuperTap;
+
+#ifdef USE_HIGH_QUALITY
+ vec4 coeffsDir0[3];
+ vec4 coeffsDir1[3];
+ vec4 coeffsDir2[3];
+ vec4 coeffsLevel[3];
+ vec4 coeffsWeight[3];
+
+ for (int iCoeff = 0; iCoeff < 3; iCoeff++) {
+ coeffsDir0[iCoeff] = data.coeffs[mip_level][0][iCoeff][index];
+ coeffsDir1[iCoeff] = data.coeffs[mip_level][1][iCoeff][index];
+ coeffsDir2[iCoeff] = data.coeffs[mip_level][2][iCoeff][index];
+ coeffsLevel[iCoeff] = data.coeffs[mip_level][3][iCoeff][index];
+ coeffsWeight[iCoeff] = data.coeffs[mip_level][4][iCoeff][index];
+ }
+
+ for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
+ // determine sample attributes (dir, weight, mip_level)
+ vec3 sample_dir = frameX * (coeffsDir0[0][iSubTap] + coeffsDir0[1][iSubTap] * theta2 + coeffsDir0[2][iSubTap] * phi2) + frameY * (coeffsDir1[0][iSubTap] + coeffsDir1[1][iSubTap] * theta2 + coeffsDir1[2][iSubTap] * phi2) + frameZ * (coeffsDir2[0][iSubTap] + coeffsDir2[1][iSubTap] * theta2 + coeffsDir2[2][iSubTap] * phi2);
+
+ float sample_level = coeffsLevel[0][iSubTap] + coeffsLevel[1][iSubTap] * theta2 + coeffsLevel[2][iSubTap] * phi2;
+
+ float sample_weight = coeffsWeight[0][iSubTap] + coeffsWeight[1][iSubTap] * theta2 + coeffsWeight[2][iSubTap] * phi2;
+#else
+ vec4 coeffsDir0 = data.coeffs[mip_level][0][index];
+ vec4 coeffsDir1 = data.coeffs[mip_level][1][index];
+ vec4 coeffsDir2 = data.coeffs[mip_level][2][index];
+ vec4 coeffsLevel = data.coeffs[mip_level][3][index];
+ vec4 coeffsWeight = data.coeffs[mip_level][4][index];
+
+ for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
+ // determine sample attributes (dir, weight, mip_level)
+ vec3 sample_dir = frameX * coeffsDir0[iSubTap] + frameY * coeffsDir1[iSubTap] + frameZ * coeffsDir2[iSubTap];
+
+ float sample_level = coeffsLevel[iSubTap];
+
+ float sample_weight = coeffsWeight[iSubTap];
+#endif
+
+ sample_weight *= frameweight;
+
+ // adjust for jacobian
+ sample_dir /= max(abs(sample_dir[0]), max(abs(sample_dir[1]), abs(sample_dir[2])));
+ sample_level += 0.75 * log2(dot(sample_dir, sample_dir));
+ // sample cubemap
+ color.xyz += textureLod(source_cubemap, normalize(sample_dir), sample_level).xyz * sample_weight;
+ color.w += sample_weight;
+ }
+ }
+ }
+ }
+ color /= color.w;
+
+ // write color
+ color.xyz = max(vec3(0.0), color.xyz);
+ color.w = 1.0;
+
+ frag_color = color;
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness.glsl
new file mode 100644
index 0000000000..1d46f59408
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness.glsl
@@ -0,0 +1,63 @@
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+#define GROUP_SIZE 8
+
+layout(local_size_x = GROUP_SIZE, local_size_y = GROUP_SIZE, local_size_z = 1) in;
+
+layout(set = 0, binding = 0) uniform samplerCube source_cube;
+
+layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly imageCube dest_cubemap;
+
+#include "cubemap_roughness_inc.glsl"
+
+void main() {
+ uvec3 id = gl_GlobalInvocationID;
+ id.z += params.face_id;
+
+ vec2 uv = ((vec2(id.xy) * 2.0 + 1.0) / (params.face_size) - 1.0);
+ vec3 N = texelCoordToVec(uv, id.z);
+
+ if (params.use_direct_write) {
+ imageStore(dest_cubemap, ivec3(id), vec4(texture(source_cube, N).rgb, 1.0));
+ } else {
+ vec4 sum = vec4(0.0, 0.0, 0.0, 0.0);
+
+ float solid_angle_texel = 4.0 * M_PI / (6.0 * params.face_size * params.face_size);
+ float roughness2 = params.roughness * params.roughness;
+ float roughness4 = roughness2 * roughness2;
+ vec3 UpVector = abs(N.z) < 0.999 ? vec3(0.0, 0.0, 1.0) : vec3(1.0, 0.0, 0.0);
+ mat3 T;
+ T[0] = normalize(cross(UpVector, N));
+ T[1] = cross(N, T[0]);
+ T[2] = N;
+
+ for (uint sampleNum = 0u; sampleNum < params.sample_count; sampleNum++) {
+ vec2 xi = Hammersley(sampleNum, params.sample_count);
+
+ vec3 H = T * ImportanceSampleGGX(xi, roughness4);
+ float NdotH = dot(N, H);
+ vec3 L = (2.0 * NdotH * H - N);
+
+ float ndotl = clamp(dot(N, L), 0.0, 1.0);
+
+ if (ndotl > 0.0) {
+ float D = DistributionGGX(NdotH, roughness4);
+ float pdf = D * NdotH / (4.0 * NdotH) + 0.0001;
+
+ float solid_angle_sample = 1.0 / (float(params.sample_count) * pdf + 0.0001);
+
+ float mipLevel = params.roughness == 0.0 ? 0.0 : 0.5 * log2(solid_angle_sample / solid_angle_texel);
+
+ sum.rgb += textureLod(source_cube, L, mipLevel).rgb * ndotl;
+ sum.a += ndotl;
+ }
+ }
+ sum /= sum.a;
+
+ imageStore(dest_cubemap, ivec3(id), vec4(sum.rgb, 1.0));
+ }
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_inc.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_inc.glsl
new file mode 100644
index 0000000000..1bee428a6f
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_inc.glsl
@@ -0,0 +1,95 @@
+#define M_PI 3.14159265359
+
+layout(push_constant, std430) uniform Params {
+ uint face_id;
+ uint sample_count;
+ float roughness;
+ bool use_direct_write;
+ float face_size;
+}
+params;
+
+vec3 texelCoordToVec(vec2 uv, uint faceID) {
+ mat3 faceUvVectors[6];
+
+ // -x
+ faceUvVectors[1][0] = vec3(0.0, 0.0, 1.0); // u -> +z
+ faceUvVectors[1][1] = vec3(0.0, -1.0, 0.0); // v -> -y
+ faceUvVectors[1][2] = vec3(-1.0, 0.0, 0.0); // -x face
+
+ // +x
+ faceUvVectors[0][0] = vec3(0.0, 0.0, -1.0); // u -> -z
+ faceUvVectors[0][1] = vec3(0.0, -1.0, 0.0); // v -> -y
+ faceUvVectors[0][2] = vec3(1.0, 0.0, 0.0); // +x face
+
+ // -y
+ faceUvVectors[3][0] = vec3(1.0, 0.0, 0.0); // u -> +x
+ faceUvVectors[3][1] = vec3(0.0, 0.0, -1.0); // v -> -z
+ faceUvVectors[3][2] = vec3(0.0, -1.0, 0.0); // -y face
+
+ // +y
+ faceUvVectors[2][0] = vec3(1.0, 0.0, 0.0); // u -> +x
+ faceUvVectors[2][1] = vec3(0.0, 0.0, 1.0); // v -> +z
+ faceUvVectors[2][2] = vec3(0.0, 1.0, 0.0); // +y face
+
+ // -z
+ faceUvVectors[5][0] = vec3(-1.0, 0.0, 0.0); // u -> -x
+ faceUvVectors[5][1] = vec3(0.0, -1.0, 0.0); // v -> -y
+ faceUvVectors[5][2] = vec3(0.0, 0.0, -1.0); // -z face
+
+ // +z
+ faceUvVectors[4][0] = vec3(1.0, 0.0, 0.0); // u -> +x
+ faceUvVectors[4][1] = vec3(0.0, -1.0, 0.0); // v -> -y
+ faceUvVectors[4][2] = vec3(0.0, 0.0, 1.0); // +z face
+
+ // out = u * s_faceUv[0] + v * s_faceUv[1] + s_faceUv[2].
+ vec3 result = (faceUvVectors[faceID][0] * uv.x) + (faceUvVectors[faceID][1] * uv.y) + faceUvVectors[faceID][2];
+ return normalize(result);
+}
+
+vec3 ImportanceSampleGGX(vec2 xi, float roughness4) {
+ // Compute distribution direction
+ float Phi = 2.0 * M_PI * xi.x;
+ float CosTheta = sqrt((1.0 - xi.y) / (1.0 + (roughness4 - 1.0) * xi.y));
+ float SinTheta = sqrt(1.0 - CosTheta * CosTheta);
+
+ // Convert to spherical direction
+ vec3 H;
+ H.x = SinTheta * cos(Phi);
+ H.y = SinTheta * sin(Phi);
+ H.z = CosTheta;
+
+ return H;
+}
+
+float DistributionGGX(float NdotH, float roughness4) {
+ float NdotH2 = NdotH * NdotH;
+ float denom = (NdotH2 * (roughness4 - 1.0) + 1.0);
+ denom = M_PI * denom * denom;
+
+ return roughness4 / denom;
+}
+
+// https://graphicrants.blogspot.com.au/2013/08/specular-brdf-reference.html
+float GGX(float NdotV, float a) {
+ float k = a / 2.0;
+ return NdotV / (NdotV * (1.0 - k) + k);
+}
+
+// https://graphicrants.blogspot.com.au/2013/08/specular-brdf-reference.html
+float G_Smith(float a, float nDotV, float nDotL) {
+ return GGX(nDotL, a * a) * GGX(nDotV, a * a);
+}
+
+float radicalInverse_VdC(uint bits) {
+ bits = (bits << 16u) | (bits >> 16u);
+ bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
+ bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
+ bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
+ bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
+ return float(bits) * 2.3283064365386963e-10; // / 0x100000000
+}
+
+vec2 Hammersley(uint i, uint N) {
+ return vec2(float(i) / float(N), radicalInverse_VdC(i));
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_raster.glsl b/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_raster.glsl
new file mode 100644
index 0000000000..c29accd8a7
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/cubemap_roughness_raster.glsl
@@ -0,0 +1,79 @@
+/* clang-format off */
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "cubemap_roughness_inc.glsl"
+
+layout(location = 0) out vec2 uv_interp;
+/* clang-format on */
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp = base_arr[gl_VertexIndex];
+ gl_Position = vec4(uv_interp * 2.0 - 1.0, 0.0, 1.0);
+}
+
+/* clang-format off */
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#include "cubemap_roughness_inc.glsl"
+
+layout(location = 0) in vec2 uv_interp;
+
+layout(set = 0, binding = 0) uniform samplerCube source_cube;
+
+layout(location = 0) out vec4 frag_color;
+/* clang-format on */
+
+void main() {
+ vec3 N = texelCoordToVec(uv_interp * 2.0 - 1.0, params.face_id);
+
+ //vec4 color = color_interp;
+
+ if (params.use_direct_write) {
+ frag_color = vec4(texture(source_cube, N).rgb, 1.0);
+ } else {
+ vec4 sum = vec4(0.0, 0.0, 0.0, 0.0);
+
+ float solid_angle_texel = 4.0 * M_PI / (6.0 * params.face_size * params.face_size);
+ float roughness2 = params.roughness * params.roughness;
+ float roughness4 = roughness2 * roughness2;
+ vec3 UpVector = abs(N.z) < 0.999 ? vec3(0.0, 0.0, 1.0) : vec3(1.0, 0.0, 0.0);
+ mat3 T;
+ T[0] = normalize(cross(UpVector, N));
+ T[1] = cross(N, T[0]);
+ T[2] = N;
+
+ for (uint sampleNum = 0u; sampleNum < params.sample_count; sampleNum++) {
+ vec2 xi = Hammersley(sampleNum, params.sample_count);
+
+ vec3 H = T * ImportanceSampleGGX(xi, roughness4);
+ float NdotH = dot(N, H);
+ vec3 L = (2.0 * NdotH * H - N);
+
+ float ndotl = clamp(dot(N, L), 0.0, 1.0);
+
+ if (ndotl > 0.0) {
+ float D = DistributionGGX(NdotH, roughness4);
+ float pdf = D * NdotH / (4.0 * NdotH) + 0.0001;
+
+ float solid_angle_sample = 1.0 / (float(params.sample_count) * pdf + 0.0001);
+
+ float mipLevel = params.roughness == 0.0 ? 0.0 : 0.5 * log2(solid_angle_sample / solid_angle_texel);
+
+ sum.rgb += textureLod(source_cube, L, mipLevel).rgb * ndotl;
+ sum.a += ndotl;
+ }
+ }
+ sum /= sum.a;
+
+ frag_color = vec4(sum.rgb, 1.0);
+ }
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/resolve.glsl b/servers/rendering/renderer_rd/shaders/effects/resolve.glsl
new file mode 100644
index 0000000000..0e086331c0
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/resolve.glsl
@@ -0,0 +1,236 @@
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+#ifdef MODE_RESOLVE_DEPTH
+layout(set = 0, binding = 0) uniform sampler2DMS source_depth;
+layout(r32f, set = 1, binding = 0) uniform restrict writeonly image2D dest_depth;
+#endif
+
+#ifdef MODE_RESOLVE_GI
+layout(set = 0, binding = 0) uniform sampler2DMS source_depth;
+layout(set = 0, binding = 1) uniform sampler2DMS source_normal_roughness;
+
+layout(r32f, set = 1, binding = 0) uniform restrict writeonly image2D dest_depth;
+layout(rgba8, set = 1, binding = 1) uniform restrict writeonly image2D dest_normal_roughness;
+
+#ifdef VOXEL_GI_RESOLVE
+layout(set = 2, binding = 0) uniform usampler2DMS source_voxel_gi;
+layout(rg8ui, set = 3, binding = 0) uniform restrict writeonly uimage2D dest_voxel_gi;
+#endif
+
+#endif
+
+layout(push_constant, std430) uniform Params {
+ ivec2 screen_size;
+ int sample_count;
+ uint pad;
+}
+params;
+
+void main() {
+ // Pixel being shaded
+ ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
+ if (any(greaterThanEqual(pos, params.screen_size))) { //too large, do nothing
+ return;
+ }
+
+#ifdef MODE_RESOLVE_DEPTH
+
+ float depth_avg = 0.0;
+ for (int i = 0; i < params.sample_count; i++) {
+ depth_avg += texelFetch(source_depth, pos, i).r;
+ }
+ depth_avg /= float(params.sample_count);
+ imageStore(dest_depth, pos, vec4(depth_avg));
+
+#endif
+
+#ifdef MODE_RESOLVE_GI
+
+ float best_depth = 1e20;
+ vec4 best_normal_roughness = vec4(0.0);
+#ifdef VOXEL_GI_RESOLVE
+ uvec2 best_voxel_gi;
+#endif
+
+#if 0
+
+ for(int i=0;i<params.sample_count;i++) {
+ float depth = texelFetch(source_depth,pos,i).r;
+ if (depth < best_depth) { //use the depth closest to camera
+ best_depth = depth;
+ best_normal_roughness = texelFetch(source_normal_roughness,pos,i);
+
+#ifdef VOXEL_GI_RESOLVE
+ best_voxel_gi = texelFetch(source_voxel_gi,pos,i).rg;
+#endif
+ }
+ }
+
+#else
+
+#if 1
+
+ vec4 group1;
+ vec4 group2;
+ vec4 group3;
+ vec4 group4;
+ int best_index = 0;
+
+ //2X
+ group1.x = texelFetch(source_depth, pos, 0).r;
+ group1.y = texelFetch(source_depth, pos, 1).r;
+
+ //4X
+ if (params.sample_count >= 4) {
+ group1.z = texelFetch(source_depth, pos, 2).r;
+ group1.w = texelFetch(source_depth, pos, 3).r;
+ }
+ //8X
+ if (params.sample_count >= 8) {
+ group2.x = texelFetch(source_depth, pos, 4).r;
+ group2.y = texelFetch(source_depth, pos, 5).r;
+ group2.z = texelFetch(source_depth, pos, 6).r;
+ group2.w = texelFetch(source_depth, pos, 7).r;
+ }
+ //16X
+ if (params.sample_count >= 16) {
+ group3.x = texelFetch(source_depth, pos, 8).r;
+ group3.y = texelFetch(source_depth, pos, 9).r;
+ group3.z = texelFetch(source_depth, pos, 10).r;
+ group3.w = texelFetch(source_depth, pos, 11).r;
+
+ group4.x = texelFetch(source_depth, pos, 12).r;
+ group4.y = texelFetch(source_depth, pos, 13).r;
+ group4.z = texelFetch(source_depth, pos, 14).r;
+ group4.w = texelFetch(source_depth, pos, 15).r;
+ }
+
+ if (params.sample_count == 2) {
+ best_index = (pos.x & 1) ^ ((pos.y >> 1) & 1); //not much can be done here
+ } else if (params.sample_count == 4) {
+ vec4 freq = vec4(equal(group1, vec4(group1.x)));
+ freq += vec4(equal(group1, vec4(group1.y)));
+ freq += vec4(equal(group1, vec4(group1.z)));
+ freq += vec4(equal(group1, vec4(group1.w)));
+
+ float min_f = freq.x;
+ best_index = 0;
+ if (freq.y < min_f) {
+ best_index = 1;
+ min_f = freq.y;
+ }
+ if (freq.z < min_f) {
+ best_index = 2;
+ min_f = freq.z;
+ }
+ if (freq.w < min_f) {
+ best_index = 3;
+ }
+ } else if (params.sample_count == 8) {
+ vec4 freq0 = vec4(equal(group1, vec4(group1.x)));
+ vec4 freq1 = vec4(equal(group2, vec4(group1.x)));
+ freq0 += vec4(equal(group1, vec4(group1.y)));
+ freq1 += vec4(equal(group2, vec4(group1.y)));
+ freq0 += vec4(equal(group1, vec4(group1.z)));
+ freq1 += vec4(equal(group2, vec4(group1.z)));
+ freq0 += vec4(equal(group1, vec4(group1.w)));
+ freq1 += vec4(equal(group2, vec4(group1.w)));
+ freq0 += vec4(equal(group1, vec4(group2.x)));
+ freq1 += vec4(equal(group2, vec4(group2.x)));
+ freq0 += vec4(equal(group1, vec4(group2.y)));
+ freq1 += vec4(equal(group2, vec4(group2.y)));
+ freq0 += vec4(equal(group1, vec4(group2.z)));
+ freq1 += vec4(equal(group2, vec4(group2.z)));
+ freq0 += vec4(equal(group1, vec4(group2.w)));
+ freq1 += vec4(equal(group2, vec4(group2.w)));
+
+ float min_f0 = freq0.x;
+ int best_index0 = 0;
+ if (freq0.y < min_f0) {
+ best_index0 = 1;
+ min_f0 = freq0.y;
+ }
+ if (freq0.z < min_f0) {
+ best_index0 = 2;
+ min_f0 = freq0.z;
+ }
+ if (freq0.w < min_f0) {
+ best_index0 = 3;
+ min_f0 = freq0.w;
+ }
+
+ float min_f1 = freq1.x;
+ int best_index1 = 4;
+ if (freq1.y < min_f1) {
+ best_index1 = 5;
+ min_f1 = freq1.y;
+ }
+ if (freq1.z < min_f1) {
+ best_index1 = 6;
+ min_f1 = freq1.z;
+ }
+ if (freq1.w < min_f1) {
+ best_index1 = 7;
+ min_f1 = freq1.w;
+ }
+
+ best_index = mix(best_index0, best_index1, min_f0 < min_f1);
+ }
+
+#else
+ float depths[16];
+ int depth_indices[16];
+ int depth_amount[16];
+ int depth_count = 0;
+
+ for (int i = 0; i < params.sample_count; i++) {
+ float depth = texelFetch(source_depth, pos, i).r;
+ int depth_index = -1;
+ for (int j = 0; j < depth_count; j++) {
+ if (abs(depths[j] - depth) < 0.000001) {
+ depth_index = j;
+ break;
+ }
+ }
+
+ if (depth_index == -1) {
+ depths[depth_count] = depth;
+ depth_indices[depth_count] = i;
+ depth_amount[depth_count] = 1;
+ depth_count += 1;
+ } else {
+ depth_amount[depth_index] += 1;
+ }
+ }
+
+ int depth_least = 0xFFFF;
+ int best_index = 0;
+ for (int j = 0; j < depth_count; j++) {
+ if (depth_amount[j] < depth_least) {
+ best_index = depth_indices[j];
+ depth_least = depth_amount[j];
+ }
+ }
+#endif
+ best_depth = texelFetch(source_depth, pos, best_index).r;
+ best_normal_roughness = texelFetch(source_normal_roughness, pos, best_index);
+#ifdef VOXEL_GI_RESOLVE
+ best_voxel_gi = texelFetch(source_voxel_gi, pos, best_index).rg;
+#endif
+
+#endif
+
+ imageStore(dest_depth, pos, vec4(best_depth));
+ imageStore(dest_normal_roughness, pos, vec4(best_normal_roughness));
+#ifdef VOXEL_GI_RESOLVE
+ imageStore(dest_voxel_gi, pos, uvec4(best_voxel_gi, 0, 0));
+#endif
+
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection.glsl b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection.glsl
new file mode 100644
index 0000000000..246ef81cb2
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection.glsl
@@ -0,0 +1,255 @@
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(rgba16f, set = 0, binding = 0) uniform restrict readonly image2D source_diffuse;
+layout(r32f, set = 0, binding = 1) uniform restrict readonly image2D source_depth;
+layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly image2D ssr_image;
+#ifdef MODE_ROUGH
+layout(r8, set = 1, binding = 1) uniform restrict writeonly image2D blur_radius_image;
+#endif
+layout(rgba8, set = 2, binding = 0) uniform restrict readonly image2D source_normal_roughness;
+layout(set = 3, binding = 0) uniform sampler2D source_metallic;
+
+layout(push_constant, std430) uniform Params {
+ vec4 proj_info;
+
+ ivec2 screen_size;
+ float camera_z_near;
+ float camera_z_far;
+
+ int num_steps;
+ float depth_tolerance;
+ float distance_fade;
+ float curve_fade_in;
+
+ bool orthogonal;
+ float filter_mipmap_levels;
+ bool use_half_res;
+ uint metallic_mask;
+
+ uint view_index;
+ uint pad1;
+ uint pad2;
+ uint pad3;
+}
+params;
+
+#include "screen_space_reflection_inc.glsl"
+
+vec2 view_to_screen(vec3 view_pos, out float w) {
+ vec4 projected = scene_data.projection[params.view_index] * vec4(view_pos, 1.0);
+ projected.xyz /= projected.w;
+ projected.xy = projected.xy * 0.5 + 0.5;
+ w = projected.w;
+ return projected.xy;
+}
+
+#define M_PI 3.14159265359
+
+void main() {
+ // Pixel being shaded
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+
+ if (any(greaterThanEqual(ssC.xy, params.screen_size))) { //too large, do nothing
+ return;
+ }
+
+ vec2 pixel_size = 1.0 / vec2(params.screen_size);
+ vec2 uv = vec2(ssC.xy) * pixel_size;
+
+ uv += pixel_size * 0.5;
+
+ float base_depth = imageLoad(source_depth, ssC).r;
+
+ // World space point being shaded
+ vec3 vertex = reconstructCSPosition(uv * vec2(params.screen_size), base_depth);
+
+ vec4 normal_roughness = imageLoad(source_normal_roughness, ssC);
+ vec3 normal = normal_roughness.xyz * 2.0 - 1.0;
+ normal = normalize(normal);
+ normal.y = -normal.y; //because this code reads flipped
+
+ vec3 view_dir;
+ if (sc_multiview) {
+ view_dir = normalize(vertex + scene_data.eye_offset[params.view_index].xyz);
+ } else {
+ view_dir = normalize(vertex);
+ }
+ vec3 ray_dir = normalize(reflect(view_dir, normal));
+
+ if (dot(ray_dir, normal) < 0.001) {
+ imageStore(ssr_image, ssC, vec4(0.0));
+ return;
+ }
+ //ray_dir = normalize(view_dir - normal * dot(normal,view_dir) * 2.0);
+ //ray_dir = normalize(vec3(1.0, 1.0, -1.0));
+
+ ////////////////
+
+ // make ray length and clip it against the near plane (don't want to trace beyond visible)
+ float ray_len = (vertex.z + ray_dir.z * params.camera_z_far) > -params.camera_z_near ? (-params.camera_z_near - vertex.z) / ray_dir.z : params.camera_z_far;
+ vec3 ray_end = vertex + ray_dir * ray_len;
+
+ float w_begin;
+ vec2 vp_line_begin = view_to_screen(vertex, w_begin);
+ float w_end;
+ vec2 vp_line_end = view_to_screen(ray_end, w_end);
+ vec2 vp_line_dir = vp_line_end - vp_line_begin;
+
+ // we need to interpolate w along the ray, to generate perspective correct reflections
+ w_begin = 1.0 / w_begin;
+ w_end = 1.0 / w_end;
+
+ float z_begin = vertex.z * w_begin;
+ float z_end = ray_end.z * w_end;
+
+ vec2 line_begin = vp_line_begin / pixel_size;
+ vec2 line_dir = vp_line_dir / pixel_size;
+ float z_dir = z_end - z_begin;
+ float w_dir = w_end - w_begin;
+
+ // clip the line to the viewport edges
+
+ float scale_max_x = min(1.0, 0.99 * (1.0 - vp_line_begin.x) / max(1e-5, vp_line_dir.x));
+ float scale_max_y = min(1.0, 0.99 * (1.0 - vp_line_begin.y) / max(1e-5, vp_line_dir.y));
+ float scale_min_x = min(1.0, 0.99 * vp_line_begin.x / max(1e-5, -vp_line_dir.x));
+ float scale_min_y = min(1.0, 0.99 * vp_line_begin.y / max(1e-5, -vp_line_dir.y));
+ float line_clip = min(scale_max_x, scale_max_y) * min(scale_min_x, scale_min_y);
+ line_dir *= line_clip;
+ z_dir *= line_clip;
+ w_dir *= line_clip;
+
+ // clip z and w advance to line advance
+ vec2 line_advance = normalize(line_dir); // down to pixel
+ float step_size = length(line_advance) / length(line_dir);
+ float z_advance = z_dir * step_size; // adapt z advance to line advance
+ float w_advance = w_dir * step_size; // adapt w advance to line advance
+
+ // make line advance faster if direction is closer to pixel edges (this avoids sampling the same pixel twice)
+ float advance_angle_adj = 1.0 / max(abs(line_advance.x), abs(line_advance.y));
+ line_advance *= advance_angle_adj; // adapt z advance to line advance
+ z_advance *= advance_angle_adj;
+ w_advance *= advance_angle_adj;
+
+ vec2 pos = line_begin;
+ float z = z_begin;
+ float w = w_begin;
+ float z_from = z / w;
+ float z_to = z_from;
+ float depth;
+ vec2 prev_pos = pos;
+
+ bool found = false;
+
+ float steps_taken = 0.0;
+
+ for (int i = 0; i < params.num_steps; i++) {
+ pos += line_advance;
+ z += z_advance;
+ w += w_advance;
+
+ // convert to linear depth
+
+ depth = imageLoad(source_depth, ivec2(pos - 0.5)).r;
+ if (sc_multiview) {
+ depth = depth * 2.0 - 1.0;
+ depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - depth * (params.camera_z_far - params.camera_z_near));
+ depth = -depth;
+ }
+
+ z_from = z_to;
+ z_to = z / w;
+
+ if (depth > z_to) {
+ // if depth was surpassed
+ if (depth <= max(z_to, z_from) + params.depth_tolerance && -depth < params.camera_z_far) {
+ // check the depth tolerance and far clip
+ // check that normal is valid
+ found = true;
+ }
+ break;
+ }
+
+ steps_taken += 1.0;
+ prev_pos = pos;
+ }
+
+ if (found) {
+ float margin_blend = 1.0;
+
+ vec2 margin = vec2((params.screen_size.x + params.screen_size.y) * 0.05); // make a uniform margin
+ if (any(bvec4(lessThan(pos, vec2(0.0, 0.0)), greaterThan(pos, params.screen_size)))) {
+ // clip at the screen edges
+ imageStore(ssr_image, ssC, vec4(0.0));
+ return;
+ }
+
+ {
+ //blend fading out towards inner margin
+ // 0.5 = midpoint of reflection
+ vec2 margin_grad = mix(params.screen_size - pos, pos, lessThan(pos, params.screen_size * 0.5));
+ margin_blend = smoothstep(0.0, margin.x * margin.y, margin_grad.x * margin_grad.y);
+ //margin_blend = 1.0;
+ }
+
+ vec2 final_pos;
+ float grad = (steps_taken + 1.0) / float(params.num_steps);
+ float initial_fade = params.curve_fade_in == 0.0 ? 1.0 : pow(clamp(grad, 0.0, 1.0), params.curve_fade_in);
+ float fade = pow(clamp(1.0 - grad, 0.0, 1.0), params.distance_fade) * initial_fade;
+ final_pos = pos;
+
+ vec4 final_color;
+
+#ifdef MODE_ROUGH
+
+ // if roughness is enabled, do screen space cone tracing
+ float blur_radius = 0.0;
+ float roughness = normal_roughness.w;
+
+ if (roughness > 0.001) {
+ float cone_angle = min(roughness, 0.999) * M_PI * 0.5;
+ float cone_len = length(final_pos - line_begin);
+ float op_len = 2.0 * tan(cone_angle) * cone_len; // opposite side of iso triangle
+ {
+ // fit to sphere inside cone (sphere ends at end of cone), something like this:
+ // ___
+ // \O/
+ // V
+ //
+ // as it avoids bleeding from beyond the reflection as much as possible. As a plus
+ // it also makes the rough reflection more elongated.
+ float a = op_len;
+ float h = cone_len;
+ float a2 = a * a;
+ float fh2 = 4.0f * h * h;
+ blur_radius = (a * (sqrt(a2 + fh2) - a)) / (4.0f * h);
+ }
+ }
+
+ // Isn't this going to be overwritten after our endif?
+ final_color = imageLoad(source_diffuse, ivec2((final_pos - 0.5) * pixel_size));
+
+ imageStore(blur_radius_image, ssC, vec4(blur_radius / 255.0)); //stored in r8
+
+#endif // MODE_ROUGH
+
+ final_color = vec4(imageLoad(source_diffuse, ivec2(final_pos - 0.5)).rgb, fade * margin_blend);
+
+ //change blend by metallic
+ vec4 metallic_mask = unpackUnorm4x8(params.metallic_mask);
+ final_color.a *= dot(metallic_mask, texelFetch(source_metallic, ssC << 1, 0));
+
+ imageStore(ssr_image, ssC, final_color);
+
+ } else {
+#ifdef MODE_ROUGH
+ imageStore(blur_radius_image, ssC, vec4(0.0));
+#endif
+ imageStore(ssr_image, ssC, vec4(0.0));
+ }
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_filter.glsl b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_filter.glsl
new file mode 100644
index 0000000000..a63d60e0b2
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_filter.glsl
@@ -0,0 +1,148 @@
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(rgba16f, set = 0, binding = 0) uniform restrict readonly image2D source_ssr;
+layout(r8, set = 0, binding = 1) uniform restrict readonly image2D source_radius;
+layout(rgba8, set = 1, binding = 0) uniform restrict readonly image2D source_normal;
+
+layout(rgba16f, set = 2, binding = 0) uniform restrict writeonly image2D dest_ssr;
+#ifndef VERTICAL_PASS
+layout(r8, set = 2, binding = 1) uniform restrict writeonly image2D dest_radius;
+#endif
+layout(r32f, set = 3, binding = 0) uniform restrict readonly image2D source_depth;
+
+layout(push_constant, std430) uniform Params {
+ vec4 proj_info;
+
+ bool orthogonal;
+ float edge_tolerance;
+ int increment;
+ uint view_index;
+
+ ivec2 screen_size;
+ bool vertical;
+ uint steps;
+}
+params;
+
+#include "screen_space_reflection_inc.glsl"
+
+#define GAUSS_TABLE_SIZE 15
+
+const float gauss_table[GAUSS_TABLE_SIZE + 1] = float[](
+ 0.1847392078702266,
+ 0.16595854345772326,
+ 0.12031364177766891,
+ 0.07038755277896766,
+ 0.03322925565155569,
+ 0.012657819729901945,
+ 0.0038903040680094217,
+ 0.0009646503390864025,
+ 0.00019297087402915717,
+ 0.000031139936308099136,
+ 0.000004053309048174758,
+ 4.255228059965837e-7,
+ 3.602517634249573e-8,
+ 2.4592560765896795e-9,
+ 1.3534945386863618e-10,
+ 0.0 //one more for interpolation
+);
+
+float gauss_weight(float p_val) {
+ float idxf;
+ float c = modf(max(0.0, p_val * float(GAUSS_TABLE_SIZE)), idxf);
+ int idx = int(idxf);
+ if (idx >= GAUSS_TABLE_SIZE + 1) {
+ return 0.0;
+ }
+
+ return mix(gauss_table[idx], gauss_table[idx + 1], c);
+}
+
+#define M_PI 3.14159265359
+
+void do_filter(inout vec4 accum, inout float accum_radius, inout float divisor, ivec2 texcoord, ivec2 increment, vec3 p_pos, vec3 normal, float p_limit_radius) {
+ for (int i = 1; i < params.steps; i++) {
+ float d = float(i * params.increment);
+ ivec2 tc = texcoord + increment * i;
+ float depth = imageLoad(source_depth, tc).r;
+ vec3 view_pos = reconstructCSPosition(vec2(tc) + 0.5, depth);
+ vec3 view_normal = normalize(imageLoad(source_normal, tc).rgb * 2.0 - 1.0);
+ view_normal.y = -view_normal.y;
+
+ float r = imageLoad(source_radius, tc).r;
+ float radius = round(r * 255.0);
+
+ float angle_n = 1.0 - abs(dot(normal, view_normal));
+ if (angle_n > params.edge_tolerance) {
+ break;
+ }
+
+ float angle = abs(dot(normal, normalize(view_pos - p_pos)));
+
+ if (angle > params.edge_tolerance) {
+ break;
+ }
+
+ if (d < radius) {
+ float w = gauss_weight(d / radius);
+ accum += imageLoad(source_ssr, tc) * w;
+#ifndef VERTICAL_PASS
+ accum_radius += r * w;
+#endif
+ divisor += w;
+ }
+ }
+}
+
+void main() {
+ // Pixel being shaded
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+
+ if (any(greaterThanEqual(ssC.xy, params.screen_size))) { //too large, do nothing
+ return;
+ }
+
+ float base_contrib = gauss_table[0];
+
+ vec4 accum = imageLoad(source_ssr, ssC);
+
+ float accum_radius = imageLoad(source_radius, ssC).r;
+ float radius = accum_radius * 255.0;
+
+ float divisor = gauss_table[0];
+ accum *= divisor;
+ accum_radius *= divisor;
+#ifdef VERTICAL_PASS
+ ivec2 direction = ivec2(0, params.increment);
+#else
+ ivec2 direction = ivec2(params.increment, 0);
+#endif
+ float depth = imageLoad(source_depth, ssC).r;
+ vec3 pos = reconstructCSPosition(vec2(ssC.xy) + 0.5, depth);
+ vec3 normal = imageLoad(source_normal, ssC).xyz * 2.0 - 1.0;
+ normal = normalize(normal);
+ normal.y = -normal.y;
+
+ do_filter(accum, accum_radius, divisor, ssC.xy, direction, pos, normal, radius);
+ do_filter(accum, accum_radius, divisor, ssC.xy, -direction, pos, normal, radius);
+
+ if (divisor > 0.0) {
+ accum /= divisor;
+ accum_radius /= divisor;
+ } else {
+ accum = vec4(0.0);
+ accum_radius = 0.0;
+ }
+
+ imageStore(dest_ssr, ssC, accum);
+
+#ifndef VERTICAL_PASS
+ imageStore(dest_radius, ssC, vec4(accum_radius));
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_inc.glsl b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_inc.glsl
new file mode 100644
index 0000000000..26405ab040
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_inc.glsl
@@ -0,0 +1,28 @@
+layout(constant_id = 0) const bool sc_multiview = false;
+
+layout(set = 4, binding = 0, std140) uniform SceneData {
+ mat4x4 projection[2];
+ mat4x4 inv_projection[2];
+ vec4 eye_offset[2];
+}
+scene_data;
+
+vec3 reconstructCSPosition(vec2 screen_pos, float z) {
+ if (sc_multiview) {
+ vec4 pos;
+ pos.xy = (2.0 * vec2(screen_pos) / vec2(params.screen_size)) - 1.0;
+ pos.z = z * 2.0 - 1.0;
+ pos.w = 1.0;
+
+ pos = scene_data.inv_projection[params.view_index] * pos;
+ pos.xyz /= pos.w;
+
+ return pos.xyz;
+ } else {
+ if (params.orthogonal) {
+ return vec3((screen_pos.xy * params.proj_info.xy + params.proj_info.zw), z);
+ } else {
+ return vec3((screen_pos.xy * params.proj_info.xy + params.proj_info.zw) * z, z);
+ }
+ }
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_scale.glsl b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_scale.glsl
new file mode 100644
index 0000000000..a7da0812df
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/screen_space_reflection_scale.glsl
@@ -0,0 +1,106 @@
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+/* Specialization Constants (Toggles) */
+
+layout(constant_id = 0) const bool sc_multiview = false;
+
+/* inputs */
+layout(set = 0, binding = 0) uniform sampler2D source_ssr;
+layout(set = 1, binding = 0) uniform sampler2D source_depth;
+layout(set = 1, binding = 1) uniform sampler2D source_normal;
+layout(rgba16f, set = 2, binding = 0) uniform restrict writeonly image2D dest_ssr;
+layout(r32f, set = 3, binding = 0) uniform restrict writeonly image2D dest_depth;
+layout(rgba8, set = 3, binding = 1) uniform restrict writeonly image2D dest_normal;
+
+layout(push_constant, std430) uniform Params {
+ ivec2 screen_size;
+ float camera_z_near;
+ float camera_z_far;
+
+ bool orthogonal;
+ bool filtered;
+ uint pad[2];
+}
+params;
+
+void main() {
+ // Pixel being shaded
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+
+ if (any(greaterThanEqual(ssC.xy, params.screen_size))) { //too large, do nothing
+ return;
+ }
+ //do not filter, SSR will generate arctifacts if this is done
+
+ float divisor = 0.0;
+ vec4 color;
+ float depth;
+ vec4 normal;
+
+ if (params.filtered) {
+ color = vec4(0.0);
+ depth = 0.0;
+ normal = vec4(0.0);
+
+ for (int i = 0; i < 4; i++) {
+ ivec2 ofs = ssC << 1;
+ if (bool(i & 1)) {
+ ofs.x += 1;
+ }
+ if (bool(i & 2)) {
+ ofs.y += 1;
+ }
+ color += texelFetch(source_ssr, ofs, 0);
+ float d = texelFetch(source_depth, ofs, 0).r;
+ vec4 nr = texelFetch(source_normal, ofs, 0);
+ normal.xyz += nr.xyz * 2.0 - 1.0;
+ normal.w += nr.w;
+
+ if (sc_multiview) {
+ // we're doing a full unproject so we need the value as is.
+ depth += d;
+ } else {
+ // unproject our Z value so we can use it directly.
+ d = d * 2.0 - 1.0;
+ if (params.orthogonal) {
+ d = ((d + (params.camera_z_far + params.camera_z_near) / (params.camera_z_far - params.camera_z_near)) * (params.camera_z_far - params.camera_z_near)) / 2.0;
+ } else {
+ d = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - d * (params.camera_z_far - params.camera_z_near));
+ }
+ depth += -d;
+ }
+ }
+
+ color /= 4.0;
+ depth /= 4.0;
+ normal.xyz = normalize(normal.xyz / 4.0) * 0.5 + 0.5;
+ normal.w /= 4.0;
+ } else {
+ ivec2 ofs = ssC << 1;
+
+ color = texelFetch(source_ssr, ofs, 0);
+ depth = texelFetch(source_depth, ofs, 0).r;
+ normal = texelFetch(source_normal, ofs, 0);
+
+ if (!sc_multiview) {
+ // unproject our Z value so we can use it directly.
+ depth = depth * 2.0 - 1.0;
+ if (params.orthogonal) {
+ depth = ((depth + (params.camera_z_far + params.camera_z_near) / (params.camera_z_far - params.camera_z_near)) * (params.camera_z_far - params.camera_z_near)) / 2.0;
+ } else {
+ depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - depth * (params.camera_z_far - params.camera_z_near));
+ }
+ depth = -depth;
+ }
+ }
+
+ imageStore(dest_ssr, ssC, color);
+ imageStore(dest_depth, ssC, vec4(depth));
+ imageStore(dest_normal, ssC, normal);
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/specular_merge.glsl b/servers/rendering/renderer_rd/shaders/effects/specular_merge.glsl
new file mode 100644
index 0000000000..c62144fdaf
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/specular_merge.glsl
@@ -0,0 +1,112 @@
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#if defined(USE_MULTIVIEW) && defined(has_VK_KHR_multiview)
+#extension GL_EXT_multiview : enable
+#endif
+
+#ifdef USE_MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#define ViewIndex gl_ViewIndex
+#else // has_VK_KHR_multiview
+// !BAS! This needs to become an input once we implement our fallback!
+#define ViewIndex 0
+#endif // has_VK_KHR_multiview
+#else // USE_MULTIVIEW
+// Set to zero, not supported in non stereo
+#define ViewIndex 0
+#endif //USE_MULTIVIEW
+
+#ifdef USE_MULTIVIEW
+layout(location = 0) out vec3 uv_interp;
+#else // USE_MULTIVIEW
+layout(location = 0) out vec2 uv_interp;
+#endif //USE_MULTIVIEW
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+
+#ifdef USE_MULTIVIEW
+ uv_interp = vec3(base_arr[gl_VertexIndex], ViewIndex);
+
+ gl_Position = vec4(uv_interp.xy * 2.0 - 1.0, 0.0, 1.0);
+#else
+ uv_interp = base_arr[gl_VertexIndex];
+
+ gl_Position = vec4(uv_interp * 2.0 - 1.0, 0.0, 1.0);
+#endif
+}
+
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#if defined(USE_MULTIVIEW) && defined(has_VK_KHR_multiview)
+#extension GL_EXT_multiview : enable
+#endif
+
+#ifdef USE_MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#define ViewIndex gl_ViewIndex
+#else // has_VK_KHR_multiview
+// !BAS! This needs to become an input once we implement our fallback!
+#define ViewIndex 0
+#endif // has_VK_KHR_multiview
+#else // USE_MULTIVIEW
+// Set to zero, not supported in non stereo
+#define ViewIndex 0
+#endif //USE_MULTIVIEW
+
+#ifdef USE_MULTIVIEW
+layout(location = 0) in vec3 uv_interp;
+#else // USE_MULTIVIEW
+layout(location = 0) in vec2 uv_interp;
+#endif //USE_MULTIVIEW
+
+#ifdef USE_MULTIVIEW
+layout(set = 0, binding = 0) uniform sampler2DArray specular;
+#else // USE_MULTIVIEW
+layout(set = 0, binding = 0) uniform sampler2D specular;
+#endif //USE_MULTIVIEW
+
+#ifdef MODE_SSR
+
+#ifdef USE_MULTIVIEW
+layout(set = 1, binding = 0) uniform sampler2DArray ssr;
+#else // USE_MULTIVIEW
+layout(set = 1, binding = 0) uniform sampler2D ssr;
+#endif //USE_MULTIVIEW
+
+#endif
+
+#ifdef MODE_MERGE
+
+#ifdef USE_MULTIVIEW
+layout(set = 2, binding = 0) uniform sampler2DArray diffuse;
+#else // USE_MULTIVIEW
+layout(set = 2, binding = 0) uniform sampler2D diffuse;
+#endif //USE_MULTIVIEW
+
+#endif
+
+layout(location = 0) out vec4 frag_color;
+
+void main() {
+ frag_color.rgb = texture(specular, uv_interp).rgb;
+ frag_color.a = 0.0;
+#ifdef MODE_SSR
+
+ vec4 ssr_color = texture(ssr, uv_interp);
+ frag_color.rgb = mix(frag_color.rgb, ssr_color.rgb, ssr_color.a);
+#endif
+
+#ifdef MODE_MERGE
+ frag_color += texture(diffuse, uv_interp);
+#endif
+ //added using additive blend
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ss_effects_downsample.glsl b/servers/rendering/renderer_rd/shaders/effects/ss_effects_downsample.glsl
new file mode 100644
index 0000000000..134aae5ce7
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ss_effects_downsample.glsl
@@ -0,0 +1,229 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(push_constant, std430) uniform Params {
+ vec2 pixel_size;
+ float z_far;
+ float z_near;
+ bool orthogonal;
+ float radius_sq;
+ uvec2 pad;
+}
+params;
+
+layout(set = 0, binding = 0) uniform sampler2D source_depth;
+
+layout(r16f, set = 1, binding = 0) uniform restrict writeonly image2DArray dest_image0; //rename
+#ifdef GENERATE_MIPS
+layout(r16f, set = 2, binding = 0) uniform restrict writeonly image2DArray dest_image1;
+layout(r16f, set = 2, binding = 1) uniform restrict writeonly image2DArray dest_image2;
+layout(r16f, set = 2, binding = 2) uniform restrict writeonly image2DArray dest_image3;
+#ifdef GENERATE_FULL_MIPS
+layout(r16f, set = 2, binding = 3) uniform restrict writeonly image2DArray dest_image4;
+#endif
+#endif
+
+vec4 screen_space_to_view_space_depth(vec4 p_depth) {
+ if (params.orthogonal) {
+ vec4 depth = p_depth * 2.0 - 1.0;
+ return ((depth + (params.z_far + params.z_near) / (params.z_far - params.z_near)) * (params.z_far - params.z_near)) / 2.0;
+ }
+
+ float depth_linearize_mul = params.z_near;
+ float depth_linearize_add = params.z_far;
+
+ // Optimised version of "-cameraClipNear / (cameraClipFar - projDepth * (cameraClipFar - cameraClipNear)) * cameraClipFar"
+
+ // Set your depth_linearize_mul and depth_linearize_add to:
+ // depth_linearize_mul = ( cameraClipFar * cameraClipNear) / ( cameraClipFar - cameraClipNear );
+ // depth_linearize_add = cameraClipFar / ( cameraClipFar - cameraClipNear );
+
+ return depth_linearize_mul / (depth_linearize_add - p_depth);
+}
+
+float screen_space_to_view_space_depth(float p_depth) {
+ if (params.orthogonal) {
+ float depth = p_depth * 2.0 - 1.0;
+ return ((depth + (params.z_far + params.z_near) / (params.z_far - params.z_near)) * (params.z_far - params.z_near)) / (2.0 * params.z_far);
+ }
+
+ float depth_linearize_mul = params.z_near;
+ float depth_linearize_add = params.z_far;
+
+ return depth_linearize_mul / (depth_linearize_add - p_depth);
+}
+
+#ifdef GENERATE_MIPS
+
+shared float depth_buffer[4][8][8];
+
+float mip_smart_average(vec4 p_depths) {
+ float closest = min(min(p_depths.x, p_depths.y), min(p_depths.z, p_depths.w));
+ float fallof_sq = -1.0f / params.radius_sq;
+ vec4 dists = p_depths - closest.xxxx;
+ vec4 weights = clamp(dists * dists * fallof_sq + 1.0, 0.0, 1.0);
+ return dot(weights, p_depths) / dot(weights, vec4(1.0, 1.0, 1.0, 1.0));
+}
+
+void prepare_depths_and_mips(vec4 p_samples, uvec2 p_output_coord, uvec2 p_gtid) {
+ p_samples = screen_space_to_view_space_depth(p_samples);
+
+ depth_buffer[0][p_gtid.x][p_gtid.y] = p_samples.w;
+ depth_buffer[1][p_gtid.x][p_gtid.y] = p_samples.z;
+ depth_buffer[2][p_gtid.x][p_gtid.y] = p_samples.x;
+ depth_buffer[3][p_gtid.x][p_gtid.y] = p_samples.y;
+
+ imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 0), vec4(p_samples.w));
+ imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 1), vec4(p_samples.z));
+ imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 2), vec4(p_samples.x));
+ imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 3), vec4(p_samples.y));
+
+ uint depth_array_index = 2 * (p_gtid.y % 2) + (p_gtid.x % 2);
+ uvec2 depth_array_offset = ivec2(p_gtid.x % 2, p_gtid.y % 2);
+ ivec2 buffer_coord = ivec2(p_gtid) - ivec2(depth_array_offset);
+
+ p_output_coord /= 2;
+ groupMemoryBarrier();
+ barrier();
+
+ // if (still_alive) <-- all threads alive here
+ {
+ float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
+ float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 1];
+ float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 1][buffer_coord.y + 0];
+ float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 1][buffer_coord.y + 1];
+
+ float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
+ imageStore(dest_image1, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
+ depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
+ }
+
+ bool still_alive = p_gtid.x % 4 == depth_array_offset.x && p_gtid.y % 4 == depth_array_offset.y;
+
+ p_output_coord /= 2;
+ groupMemoryBarrier();
+ barrier();
+
+ if (still_alive) {
+ float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
+ float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 2];
+ float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 2][buffer_coord.y + 0];
+ float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 2][buffer_coord.y + 2];
+
+ float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
+ imageStore(dest_image2, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
+ depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
+ }
+
+ still_alive = p_gtid.x % 8 == depth_array_offset.x && depth_array_offset.y % 8 == depth_array_offset.y;
+
+ p_output_coord /= 2;
+ groupMemoryBarrier();
+ barrier();
+
+ if (still_alive) {
+ float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
+ float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 4];
+ float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 4][buffer_coord.y + 0];
+ float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 4][buffer_coord.y + 4];
+
+ float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
+ imageStore(dest_image3, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
+#ifndef GENERATE_FULL_MIPS
+ }
+#else
+ depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
+ }
+ still_alive = p_gtid.x % 16 == depth_array_offset.x && depth_array_offset.y % 16 == depth_array_offset.y;
+
+ p_output_coord /= 2;
+ groupMemoryBarrier();
+ barrier();
+
+ if (still_alive) {
+ float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
+ float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 8];
+ float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 8][buffer_coord.y + 0];
+ float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 8][buffer_coord.y + 8];
+
+ float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
+ imageStore(dest_image4, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
+ }
+#endif
+}
+#else
+#ifndef USE_HALF_BUFFERS
+void prepare_depths(vec4 p_samples, uvec2 p_tid) {
+ p_samples = screen_space_to_view_space_depth(p_samples);
+
+ imageStore(dest_image0, ivec3(p_tid, 0), vec4(p_samples.w));
+ imageStore(dest_image0, ivec3(p_tid, 1), vec4(p_samples.z));
+ imageStore(dest_image0, ivec3(p_tid, 2), vec4(p_samples.x));
+ imageStore(dest_image0, ivec3(p_tid, 3), vec4(p_samples.y));
+}
+#endif
+#endif
+
+void main() {
+#ifdef USE_HALF_BUFFERS
+#ifdef USE_HALF_SIZE
+ float sample_00 = texelFetch(source_depth, ivec2(4 * gl_GlobalInvocationID.x + 0, 4 * gl_GlobalInvocationID.y + 0), 0).x;
+ float sample_11 = texelFetch(source_depth, ivec2(4 * gl_GlobalInvocationID.x + 2, 4 * gl_GlobalInvocationID.y + 2), 0).x;
+#else
+ float sample_00 = texelFetch(source_depth, ivec2(2 * gl_GlobalInvocationID.x + 0, 2 * gl_GlobalInvocationID.y + 0), 0).x;
+ float sample_11 = texelFetch(source_depth, ivec2(2 * gl_GlobalInvocationID.x + 1, 2 * gl_GlobalInvocationID.y + 1), 0).x;
+#endif
+ sample_00 = screen_space_to_view_space_depth(sample_00);
+ sample_11 = screen_space_to_view_space_depth(sample_11);
+
+ imageStore(dest_image0, ivec3(gl_GlobalInvocationID.xy, 0), vec4(sample_00));
+ imageStore(dest_image0, ivec3(gl_GlobalInvocationID.xy, 3), vec4(sample_11));
+#else //!USE_HALF_BUFFERS
+#ifdef USE_HALF_SIZE
+ ivec2 depth_buffer_coord = 4 * ivec2(gl_GlobalInvocationID.xy);
+ ivec2 output_coord = ivec2(gl_GlobalInvocationID);
+
+ vec2 uv = (vec2(depth_buffer_coord) + 0.5f) * params.pixel_size;
+ vec4 samples;
+ samples.x = textureLodOffset(source_depth, uv, 0, ivec2(0, 2)).x;
+ samples.y = textureLodOffset(source_depth, uv, 0, ivec2(2, 2)).x;
+ samples.z = textureLodOffset(source_depth, uv, 0, ivec2(2, 0)).x;
+ samples.w = textureLodOffset(source_depth, uv, 0, ivec2(0, 0)).x;
+#else
+ ivec2 depth_buffer_coord = 2 * ivec2(gl_GlobalInvocationID.xy);
+ ivec2 output_coord = ivec2(gl_GlobalInvocationID);
+
+ vec2 uv = (vec2(depth_buffer_coord) + 0.5f) * params.pixel_size;
+ vec4 samples = textureGather(source_depth, uv);
+#endif
+#ifdef GENERATE_MIPS
+ prepare_depths_and_mips(samples, output_coord, gl_LocalInvocationID.xy);
+#else
+ prepare_depths(samples, gl_GlobalInvocationID.xy);
+#endif
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssao.glsl b/servers/rendering/renderer_rd/shaders/effects/ssao.glsl
new file mode 100644
index 0000000000..2a87e273bc
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssao.glsl
@@ -0,0 +1,483 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+#define INTELSSAO_MAIN_DISK_SAMPLE_COUNT (32)
+const vec4 sample_pattern[INTELSSAO_MAIN_DISK_SAMPLE_COUNT] = {
+ vec4(0.78488064, 0.56661671, 1.500000, -0.126083), vec4(0.26022232, -0.29575172, 1.500000, -1.064030), vec4(0.10459357, 0.08372527, 1.110000, -2.730563), vec4(-0.68286800, 0.04963045, 1.090000, -0.498827),
+ vec4(-0.13570161, -0.64190155, 1.250000, -0.532765), vec4(-0.26193795, -0.08205118, 0.670000, -1.783245), vec4(-0.61177456, 0.66664219, 0.710000, -0.044234), vec4(0.43675563, 0.25119025, 0.610000, -1.167283),
+ vec4(0.07884444, 0.86618668, 0.640000, -0.459002), vec4(-0.12790935, -0.29869005, 0.600000, -1.729424), vec4(-0.04031125, 0.02413622, 0.600000, -4.792042), vec4(0.16201244, -0.52851415, 0.790000, -1.067055),
+ vec4(-0.70991218, 0.47301072, 0.640000, -0.335236), vec4(0.03277707, -0.22349690, 0.600000, -1.982384), vec4(0.68921727, 0.36800742, 0.630000, -0.266718), vec4(0.29251814, 0.37775412, 0.610000, -1.422520),
+ vec4(-0.12224089, 0.96582592, 0.600000, -0.426142), vec4(0.11071457, -0.16131058, 0.600000, -2.165947), vec4(0.46562141, -0.59747696, 0.600000, -0.189760), vec4(-0.51548797, 0.11804193, 0.600000, -1.246800),
+ vec4(0.89141309, -0.42090443, 0.600000, 0.028192), vec4(-0.32402530, -0.01591529, 0.600000, -1.543018), vec4(0.60771245, 0.41635221, 0.600000, -0.605411), vec4(0.02379565, -0.08239821, 0.600000, -3.809046),
+ vec4(0.48951152, -0.23657045, 0.600000, -1.189011), vec4(-0.17611565, -0.81696892, 0.600000, -0.513724), vec4(-0.33930185, -0.20732205, 0.600000, -1.698047), vec4(-0.91974425, 0.05403209, 0.600000, 0.062246),
+ vec4(-0.15064627, -0.14949332, 0.600000, -1.896062), vec4(0.53180975, -0.35210401, 0.600000, -0.758838), vec4(0.41487166, 0.81442589, 0.600000, -0.505648), vec4(-0.24106961, -0.32721516, 0.600000, -1.665244)
+};
+
+// these values can be changed (up to SSAO_MAX_TAPS) with no changes required elsewhere; values for 4th and 5th preset are ignored but array needed to avoid compilation errors
+// the actual number of texture samples is two times this value (each "tap" has two symmetrical depth texture samples)
+const int num_taps[5] = { 3, 5, 12, 0, 0 };
+
+#define SSAO_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET (99) // to disable simply set to 99 or similar
+#define SSAO_TILT_SAMPLES_AMOUNT (0.4)
+//
+#define SSAO_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET (1) // to disable simply set to 99 or similar
+#define SSAO_HALOING_REDUCTION_AMOUNT (0.6) // values from 0.0 - 1.0, 1.0 means max weighting (will cause artifacts, 0.8 is more reasonable)
+//
+#define SSAO_NORMAL_BASED_EDGES_ENABLE_AT_QUALITY_PRESET (2) // to disable simply set to 99 or similar
+#define SSAO_NORMAL_BASED_EDGES_DOT_THRESHOLD (0.5) // use 0-0.1 for super-sharp normal-based edges
+//
+#define SSAO_DETAIL_AO_ENABLE_AT_QUALITY_PRESET (1) // whether to use detail; to disable simply set to 99 or similar
+//
+#define SSAO_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET (2) // !!warning!! the MIP generation on the C++ side will be enabled on quality preset 2 regardless of this value, so if changing here, change the C++ side too
+#define SSAO_DEPTH_MIPS_GLOBAL_OFFSET (-4.3) // best noise/quality/performance tradeoff, found empirically
+//
+// !!warning!! the edge handling is hard-coded to 'disabled' on quality level 0, and enabled above, on the C++ side; while toggling it here will work for
+// testing purposes, it will not yield performance gains (or correct results)
+#define SSAO_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET (1)
+//
+#define SSAO_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET (1)
+
+#define SSAO_MAX_TAPS 32
+#define SSAO_ADAPTIVE_TAP_BASE_COUNT 5
+#define SSAO_ADAPTIVE_TAP_FLEXIBLE_COUNT (SSAO_MAX_TAPS - SSAO_ADAPTIVE_TAP_BASE_COUNT)
+#define SSAO_DEPTH_MIP_LEVELS 4
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(set = 0, binding = 0) uniform sampler2DArray source_depth_mipmaps;
+layout(rgba8, set = 0, binding = 1) uniform restrict readonly image2D source_normal;
+layout(set = 0, binding = 2) uniform Constants { //get into a lower set
+ vec4 rotation_matrices[20];
+}
+constants;
+
+#ifdef ADAPTIVE
+layout(rg8, set = 1, binding = 0) uniform restrict readonly image2DArray source_ssao;
+layout(set = 1, binding = 1) uniform sampler2D source_importance;
+layout(set = 1, binding = 2, std430) buffer Counter {
+ uint sum;
+}
+counter;
+#endif
+
+layout(rg8, set = 2, binding = 0) uniform restrict writeonly image2D dest_image;
+
+// This push_constant is full - 128 bytes - if you need to add more data, consider adding to the uniform buffer instead
+layout(push_constant, std430) uniform Params {
+ ivec2 screen_size;
+ int pass;
+ int quality;
+
+ vec2 half_screen_pixel_size;
+ int size_multiplier;
+ float detail_intensity;
+
+ vec2 NDC_to_view_mul;
+ vec2 NDC_to_view_add;
+
+ vec2 pad2;
+ vec2 half_screen_pixel_size_x025;
+
+ float radius;
+ float intensity;
+ float shadow_power;
+ float shadow_clamp;
+
+ float fade_out_mul;
+ float fade_out_add;
+ float horizon_angle_threshold;
+ float inv_radius_near_limit;
+
+ bool is_orthogonal;
+ float neg_inv_radius;
+ float load_counter_avg_div;
+ float adaptive_sample_limit;
+
+ ivec2 pass_coord_offset;
+ vec2 pass_uv_offset;
+}
+params;
+
+// packing/unpacking for edges; 2 bits per edge mean 4 gradient values (0, 0.33, 0.66, 1) for smoother transitions!
+float pack_edges(vec4 p_edgesLRTB) {
+ p_edgesLRTB = round(clamp(p_edgesLRTB, 0.0, 1.0) * 3.05);
+ return dot(p_edgesLRTB, vec4(64.0 / 255.0, 16.0 / 255.0, 4.0 / 255.0, 1.0 / 255.0));
+}
+
+vec3 NDC_to_view_space(vec2 p_pos, float p_viewspace_depth) {
+ if (params.is_orthogonal) {
+ return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add), p_viewspace_depth);
+ } else {
+ return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add) * p_viewspace_depth, p_viewspace_depth);
+ }
+}
+
+// calculate effect radius and fit our screen sampling pattern inside it
+void calculate_radius_parameters(const float p_pix_center_length, const vec2 p_pixel_size_at_center, out float r_lookup_radius, out float r_radius, out float r_fallof_sq) {
+ r_radius = params.radius;
+
+ // when too close, on-screen sampling disk will grow beyond screen size; limit this to avoid closeup temporal artifacts
+ const float too_close_limit = clamp(p_pix_center_length * params.inv_radius_near_limit, 0.0, 1.0) * 0.8 + 0.2;
+
+ r_radius *= too_close_limit;
+
+ // 0.85 is to reduce the radius to allow for more samples on a slope to still stay within influence
+ r_lookup_radius = (0.85 * r_radius) / p_pixel_size_at_center.x;
+
+ // used to calculate falloff (both for AO samples and per-sample weights)
+ r_fallof_sq = -1.0 / (r_radius * r_radius);
+}
+
+vec4 calculate_edges(const float p_center_z, const float p_left_z, const float p_right_z, const float p_top_z, const float p_bottom_z) {
+ // slope-sensitive depth-based edge detection
+ vec4 edgesLRTB = vec4(p_left_z, p_right_z, p_top_z, p_bottom_z) - p_center_z;
+ vec4 edgesLRTB_slope_adjusted = edgesLRTB + edgesLRTB.yxwz;
+ edgesLRTB = min(abs(edgesLRTB), abs(edgesLRTB_slope_adjusted));
+ return clamp((1.3 - edgesLRTB / (p_center_z * 0.040)), 0.0, 1.0);
+}
+
+vec3 decode_normal(vec3 p_encoded_normal) {
+ vec3 normal = p_encoded_normal * 2.0 - 1.0;
+ return normal;
+}
+
+vec3 load_normal(ivec2 p_pos) {
+ vec3 encoded_normal = imageLoad(source_normal, p_pos).xyz;
+ encoded_normal.z = 1.0 - encoded_normal.z;
+ return decode_normal(encoded_normal);
+}
+
+vec3 load_normal(ivec2 p_pos, ivec2 p_offset) {
+ vec3 encoded_normal = imageLoad(source_normal, p_pos + p_offset).xyz;
+ encoded_normal.z = 1.0 - encoded_normal.z;
+ return decode_normal(encoded_normal);
+}
+
+// all vectors in viewspace
+float calculate_pixel_obscurance(vec3 p_pixel_normal, vec3 p_hit_delta, float p_fallof_sq) {
+ float length_sq = dot(p_hit_delta, p_hit_delta);
+ float NdotD = dot(p_pixel_normal, p_hit_delta) / sqrt(length_sq);
+
+ float falloff_mult = max(0.0, length_sq * p_fallof_sq + 1.0);
+
+ return max(0, NdotD - params.horizon_angle_threshold) * falloff_mult;
+}
+
+void SSAO_tap_inner(const int p_quality_level, inout float r_obscurance_sum, inout float r_weight_sum, const vec2 p_sampling_uv, const float p_mip_level, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const float p_fallof_sq, const float p_weight_mod) {
+ // get depth at sample
+ float viewspace_sample_z = textureLod(source_depth_mipmaps, vec3(p_sampling_uv, params.pass), p_mip_level).x;
+
+ // convert to viewspace
+ vec3 hit_pos = NDC_to_view_space(p_sampling_uv.xy, viewspace_sample_z).xyz;
+ vec3 hit_delta = hit_pos - p_pix_center_pos;
+
+ float obscurance = calculate_pixel_obscurance(p_pixel_normal, hit_delta, p_fallof_sq);
+ float weight = 1.0;
+
+ if (p_quality_level >= SSAO_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET) {
+ float reduct = max(0, -hit_delta.z);
+ reduct = clamp(reduct * params.neg_inv_radius + 2.0, 0.0, 1.0);
+ weight = SSAO_HALOING_REDUCTION_AMOUNT * reduct + (1.0 - SSAO_HALOING_REDUCTION_AMOUNT);
+ }
+ weight *= p_weight_mod;
+ r_obscurance_sum += obscurance * weight;
+ r_weight_sum += weight;
+}
+
+void SSAOTap(const int p_quality_level, inout float r_obscurance_sum, inout float r_weight_sum, const int p_tap_index, const mat2 p_rot_scale, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const vec2 p_normalized_screen_pos, const float p_mip_offset, const float p_fallof_sq, float p_weight_mod, vec2 p_norm_xy, float p_norm_xy_length) {
+ vec2 sample_offset;
+ float sample_pow_2_len;
+
+ // patterns
+ {
+ vec4 new_sample = sample_pattern[p_tap_index];
+ sample_offset = new_sample.xy * p_rot_scale;
+ sample_pow_2_len = new_sample.w; // precalculated, same as: sample_pow_2_len = log2( length( new_sample.xy ) );
+ p_weight_mod *= new_sample.z;
+ }
+
+ // snap to pixel center (more correct obscurance math, avoids artifacts)
+ sample_offset = round(sample_offset);
+
+ // calculate MIP based on the sample distance from the centre, similar to as described
+ // in http://graphics.cs.williams.edu/papers/SAOHPG12/.
+ float mip_level = (p_quality_level < SSAO_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (sample_pow_2_len + p_mip_offset);
+
+ vec2 sampling_uv = sample_offset * params.half_screen_pixel_size + p_normalized_screen_pos;
+
+ SSAO_tap_inner(p_quality_level, r_obscurance_sum, r_weight_sum, sampling_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
+
+ // for the second tap, just use the mirrored offset
+ vec2 sample_offset_mirrored_uv = -sample_offset;
+
+ // tilt the second set of samples so that the disk is effectively rotated by the normal
+ // effective at removing one set of artifacts, but too expensive for lower quality settings
+ if (p_quality_level >= SSAO_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET) {
+ float dot_norm = dot(sample_offset_mirrored_uv, p_norm_xy);
+ sample_offset_mirrored_uv -= dot_norm * p_norm_xy_length * p_norm_xy;
+ sample_offset_mirrored_uv = round(sample_offset_mirrored_uv);
+ }
+
+ // snap to pixel center (more correct obscurance math, avoids artifacts)
+ vec2 sampling_mirrored_uv = sample_offset_mirrored_uv * params.half_screen_pixel_size + p_normalized_screen_pos;
+
+ SSAO_tap_inner(p_quality_level, r_obscurance_sum, r_weight_sum, sampling_mirrored_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
+}
+
+void generate_SSAO_shadows_internal(out float r_shadow_term, out vec4 r_edges, out float r_weight, const vec2 p_pos, int p_quality_level, bool p_adaptive_base) {
+ vec2 pos_rounded = trunc(p_pos);
+ uvec2 upos = uvec2(pos_rounded);
+
+ const int number_of_taps = (p_adaptive_base) ? (SSAO_ADAPTIVE_TAP_BASE_COUNT) : (num_taps[p_quality_level]);
+ float pix_z, pix_left_z, pix_top_z, pix_right_z, pix_bottom_z;
+
+ vec4 valuesUL = textureGather(source_depth_mipmaps, vec3(pos_rounded * params.half_screen_pixel_size, params.pass));
+ vec4 valuesBR = textureGather(source_depth_mipmaps, vec3((pos_rounded + vec2(1.0)) * params.half_screen_pixel_size, params.pass));
+
+ // get this pixel's viewspace depth
+ pix_z = valuesUL.y;
+
+ // get left right top bottom neighbouring pixels for edge detection (gets compiled out on quality_level == 0)
+ pix_left_z = valuesUL.x;
+ pix_top_z = valuesUL.z;
+ pix_right_z = valuesBR.z;
+ pix_bottom_z = valuesBR.x;
+
+ vec2 normalized_screen_pos = pos_rounded * params.half_screen_pixel_size + params.half_screen_pixel_size_x025;
+ vec3 pix_center_pos = NDC_to_view_space(normalized_screen_pos, pix_z);
+
+ // Load this pixel's viewspace normal
+ uvec2 full_res_coord = upos * 2 * params.size_multiplier + params.pass_coord_offset.xy;
+ vec3 pixel_normal = load_normal(ivec2(full_res_coord));
+
+ const vec2 pixel_size_at_center = NDC_to_view_space(normalized_screen_pos.xy + params.half_screen_pixel_size, pix_center_pos.z).xy - pix_center_pos.xy;
+
+ float pixel_lookup_radius;
+ float fallof_sq;
+
+ // calculate effect radius and fit our screen sampling pattern inside it
+ float viewspace_radius;
+ calculate_radius_parameters(length(pix_center_pos), pixel_size_at_center, pixel_lookup_radius, viewspace_radius, fallof_sq);
+
+ // calculate samples rotation/scaling
+ mat2 rot_scale_matrix;
+ uint pseudo_random_index;
+
+ {
+ vec4 rotation_scale;
+ // reduce effect radius near the screen edges slightly; ideally, one would render a larger depth buffer (5% on each side) instead
+ if (!p_adaptive_base && (p_quality_level >= SSAO_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET)) {
+ float near_screen_border = min(min(normalized_screen_pos.x, 1.0 - normalized_screen_pos.x), min(normalized_screen_pos.y, 1.0 - normalized_screen_pos.y));
+ near_screen_border = clamp(10.0 * near_screen_border + 0.6, 0.0, 1.0);
+ pixel_lookup_radius *= near_screen_border;
+ }
+
+ // load & update pseudo-random rotation matrix
+ pseudo_random_index = uint(pos_rounded.y * 2 + pos_rounded.x) % 5;
+ rotation_scale = constants.rotation_matrices[params.pass * 5 + pseudo_random_index];
+ rot_scale_matrix = mat2(rotation_scale.x * pixel_lookup_radius, rotation_scale.y * pixel_lookup_radius, rotation_scale.z * pixel_lookup_radius, rotation_scale.w * pixel_lookup_radius);
+ }
+
+ // the main obscurance & sample weight storage
+ float obscurance_sum = 0.0;
+ float weight_sum = 0.0;
+
+ // edge mask for between this and left/right/top/bottom neighbour pixels - not used in quality level 0 so initialize to "no edge" (1 is no edge, 0 is edge)
+ vec4 edgesLRTB = vec4(1.0, 1.0, 1.0, 1.0);
+
+ // Move center pixel slightly towards camera to avoid imprecision artifacts due to using of 16bit depth buffer; a lot smaller offsets needed when using 32bit floats
+ pix_center_pos *= 0.9992;
+
+ if (!p_adaptive_base && (p_quality_level >= SSAO_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
+ edgesLRTB = calculate_edges(pix_z, pix_left_z, pix_right_z, pix_top_z, pix_bottom_z);
+ }
+
+ // adds a more high definition sharp effect, which gets blurred out (reuses left/right/top/bottom samples that we used for edge detection)
+ if (!p_adaptive_base && (p_quality_level >= SSAO_DETAIL_AO_ENABLE_AT_QUALITY_PRESET)) {
+ // disable in case of quality level 4 (reference)
+ if (p_quality_level != 4) {
+ //approximate neighbouring pixels positions (actually just deltas or "positions - pix_center_pos" )
+ vec3 normalized_viewspace_dir = vec3(pix_center_pos.xy / pix_center_pos.zz, 1.0);
+ vec3 pixel_left_delta = vec3(-pixel_size_at_center.x, 0.0, 0.0) + normalized_viewspace_dir * (pix_left_z - pix_center_pos.z);
+ vec3 pixel_right_delta = vec3(+pixel_size_at_center.x, 0.0, 0.0) + normalized_viewspace_dir * (pix_right_z - pix_center_pos.z);
+ vec3 pixel_top_delta = vec3(0.0, -pixel_size_at_center.y, 0.0) + normalized_viewspace_dir * (pix_top_z - pix_center_pos.z);
+ vec3 pixel_bottom_delta = vec3(0.0, +pixel_size_at_center.y, 0.0) + normalized_viewspace_dir * (pix_bottom_z - pix_center_pos.z);
+
+ const float range_reduction = 4.0f; // this is to avoid various artifacts
+ const float modified_fallof_sq = range_reduction * fallof_sq;
+
+ vec4 additional_obscurance;
+ additional_obscurance.x = calculate_pixel_obscurance(pixel_normal, pixel_left_delta, modified_fallof_sq);
+ additional_obscurance.y = calculate_pixel_obscurance(pixel_normal, pixel_right_delta, modified_fallof_sq);
+ additional_obscurance.z = calculate_pixel_obscurance(pixel_normal, pixel_top_delta, modified_fallof_sq);
+ additional_obscurance.w = calculate_pixel_obscurance(pixel_normal, pixel_bottom_delta, modified_fallof_sq);
+
+ obscurance_sum += params.detail_intensity * dot(additional_obscurance, edgesLRTB);
+ }
+ }
+
+ // Sharp normals also create edges - but this adds to the cost as well
+ if (!p_adaptive_base && (p_quality_level >= SSAO_NORMAL_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
+ vec3 neighbour_normal_left = load_normal(ivec2(full_res_coord), ivec2(-2, 0));
+ vec3 neighbour_normal_right = load_normal(ivec2(full_res_coord), ivec2(2, 0));
+ vec3 neighbour_normal_top = load_normal(ivec2(full_res_coord), ivec2(0, -2));
+ vec3 neighbour_normal_bottom = load_normal(ivec2(full_res_coord), ivec2(0, 2));
+
+ const float dot_threshold = SSAO_NORMAL_BASED_EDGES_DOT_THRESHOLD;
+
+ vec4 normal_edgesLRTB;
+ normal_edgesLRTB.x = clamp((dot(pixel_normal, neighbour_normal_left) + dot_threshold), 0.0, 1.0);
+ normal_edgesLRTB.y = clamp((dot(pixel_normal, neighbour_normal_right) + dot_threshold), 0.0, 1.0);
+ normal_edgesLRTB.z = clamp((dot(pixel_normal, neighbour_normal_top) + dot_threshold), 0.0, 1.0);
+ normal_edgesLRTB.w = clamp((dot(pixel_normal, neighbour_normal_bottom) + dot_threshold), 0.0, 1.0);
+
+ edgesLRTB *= normal_edgesLRTB;
+ }
+
+ const float global_mip_offset = SSAO_DEPTH_MIPS_GLOBAL_OFFSET;
+ float mip_offset = (p_quality_level < SSAO_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (log2(pixel_lookup_radius) + global_mip_offset);
+
+ // Used to tilt the second set of samples so that the disk is effectively rotated by the normal
+ // effective at removing one set of artifacts, but too expensive for lower quality settings
+ vec2 norm_xy = vec2(pixel_normal.x, pixel_normal.y);
+ float norm_xy_length = length(norm_xy);
+ norm_xy /= vec2(norm_xy_length, -norm_xy_length);
+ norm_xy_length *= SSAO_TILT_SAMPLES_AMOUNT;
+
+ // standard, non-adaptive approach
+ if ((p_quality_level != 3) || p_adaptive_base) {
+ for (int i = 0; i < number_of_taps; i++) {
+ SSAOTap(p_quality_level, obscurance_sum, weight_sum, i, rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, 1.0, norm_xy, norm_xy_length);
+ }
+ }
+#ifdef ADAPTIVE
+ else {
+ // add new ones if needed
+ vec2 full_res_uv = normalized_screen_pos + params.pass_uv_offset.xy;
+ float importance = textureLod(source_importance, full_res_uv, 0.0).x;
+
+ // this is to normalize SSAO_DETAIL_AO_AMOUNT across all pixel regardless of importance
+ obscurance_sum *= (SSAO_ADAPTIVE_TAP_BASE_COUNT / float(SSAO_MAX_TAPS)) + (importance * SSAO_ADAPTIVE_TAP_FLEXIBLE_COUNT / float(SSAO_MAX_TAPS));
+
+ // load existing base values
+ vec2 base_values = imageLoad(source_ssao, ivec3(upos, params.pass)).xy;
+ weight_sum += base_values.y * float(SSAO_ADAPTIVE_TAP_BASE_COUNT * 4.0);
+ obscurance_sum += (base_values.x) * weight_sum;
+
+ // increase importance around edges
+ float edge_count = dot(1.0 - edgesLRTB, vec4(1.0, 1.0, 1.0, 1.0));
+
+ float avg_total_importance = float(counter.sum) * params.load_counter_avg_div;
+
+ float importance_limiter = clamp(params.adaptive_sample_limit / avg_total_importance, 0.0, 1.0);
+ importance *= importance_limiter;
+
+ float additional_sample_count = SSAO_ADAPTIVE_TAP_FLEXIBLE_COUNT * importance;
+
+ const float blend_range = 3.0;
+ const float blend_range_inv = 1.0 / blend_range;
+
+ additional_sample_count += 0.5;
+ uint additional_samples = uint(additional_sample_count);
+ uint additional_samples_to = min(SSAO_MAX_TAPS, additional_samples + SSAO_ADAPTIVE_TAP_BASE_COUNT);
+
+ for (uint i = SSAO_ADAPTIVE_TAP_BASE_COUNT; i < additional_samples_to; i++) {
+ additional_sample_count -= 1.0f;
+ float weight_mod = clamp(additional_sample_count * blend_range_inv, 0.0, 1.0);
+ SSAOTap(p_quality_level, obscurance_sum, weight_sum, int(i), rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, weight_mod, norm_xy, norm_xy_length);
+ }
+ }
+#endif
+
+ // early out for adaptive base - just output weight (used for the next pass)
+ if (p_adaptive_base) {
+ float obscurance = obscurance_sum / weight_sum;
+
+ r_shadow_term = obscurance;
+ r_edges = vec4(0.0);
+ r_weight = weight_sum;
+ return;
+ }
+
+ // calculate weighted average
+ float obscurance = obscurance_sum / weight_sum;
+
+ // calculate fadeout (1 close, gradient, 0 far)
+ float fade_out = clamp(pix_center_pos.z * params.fade_out_mul + params.fade_out_add, 0.0, 1.0);
+
+ // Reduce the SSAO shadowing if we're on the edge to remove artifacts on edges (we don't care for the lower quality one)
+ if (!p_adaptive_base && (p_quality_level >= SSAO_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
+ // when there's more than 2 opposite edges, start fading out the occlusion to reduce aliasing artifacts
+ float edge_fadeout_factor = clamp((1.0 - edgesLRTB.x - edgesLRTB.y) * 0.35, 0.0, 1.0) + clamp((1.0 - edgesLRTB.z - edgesLRTB.w) * 0.35, 0.0, 1.0);
+
+ fade_out *= clamp(1.0 - edge_fadeout_factor, 0.0, 1.0);
+ }
+
+ // strength
+ obscurance = params.intensity * obscurance;
+
+ // clamp
+ obscurance = min(obscurance, params.shadow_clamp);
+
+ // fadeout
+ obscurance *= fade_out;
+
+ // conceptually switch to occlusion with the meaning being visibility (grows with visibility, occlusion == 1 implies full visibility),
+ // to be in line with what is more commonly used.
+ float occlusion = 1.0 - obscurance;
+
+ // modify the gradient
+ // note: this cannot be moved to a later pass because of loss of precision after storing in the render target
+ occlusion = pow(clamp(occlusion, 0.0, 1.0), params.shadow_power);
+
+ // outputs!
+ r_shadow_term = occlusion; // Our final 'occlusion' term (0 means fully occluded, 1 means fully lit)
+ r_edges = edgesLRTB; // These are used to prevent blurring across edges, 1 means no edge, 0 means edge, 0.5 means half way there, etc.
+ r_weight = weight_sum;
+}
+
+void main() {
+ float out_shadow_term;
+ float out_weight;
+ vec4 out_edges;
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+ if (any(greaterThanEqual(ssC, params.screen_size))) { //too large, do nothing
+ return;
+ }
+
+ vec2 uv = vec2(gl_GlobalInvocationID) + vec2(0.5);
+#ifdef SSAO_BASE
+ generate_SSAO_shadows_internal(out_shadow_term, out_edges, out_weight, uv, params.quality, true);
+
+ imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(out_shadow_term, out_weight / (float(SSAO_ADAPTIVE_TAP_BASE_COUNT) * 4.0), 0.0, 0.0));
+#else
+ generate_SSAO_shadows_internal(out_shadow_term, out_edges, out_weight, uv, params.quality, false); // pass in quality levels
+ if (params.quality == 0) {
+ out_edges = vec4(1.0);
+ }
+
+ imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(out_shadow_term, pack_edges(out_edges), 0.0, 0.0));
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssao_blur.glsl b/servers/rendering/renderer_rd/shaders/effects/ssao_blur.glsl
new file mode 100644
index 0000000000..f42734c46d
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssao_blur.glsl
@@ -0,0 +1,154 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(set = 0, binding = 0) uniform sampler2D source_ssao;
+
+layout(rg8, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
+
+layout(push_constant, std430) uniform Params {
+ float edge_sharpness;
+ float pad;
+ vec2 half_screen_pixel_size;
+}
+params;
+
+vec4 unpack_edges(float p_packed_val) {
+ uint packed_val = uint(p_packed_val * 255.5);
+ vec4 edgesLRTB;
+ edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
+ edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
+ edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
+ edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
+
+ return clamp(edgesLRTB + params.edge_sharpness, 0.0, 1.0);
+}
+
+void add_sample(float p_ssao_value, float p_edge_value, inout float r_sum, inout float r_sum_weight) {
+ float weight = p_edge_value;
+
+ r_sum += (weight * p_ssao_value);
+ r_sum_weight += weight;
+}
+
+#ifdef MODE_WIDE
+vec2 sample_blurred_wide(vec2 p_coord) {
+ vec2 vC = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(0, 0)).xy;
+ vec2 vL = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(-2, 0)).xy;
+ vec2 vT = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(0, -2)).xy;
+ vec2 vR = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(2, 0)).xy;
+ vec2 vB = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(0, 2)).xy;
+
+ float packed_edges = vC.y;
+ vec4 edgesLRTB = unpack_edges(packed_edges);
+ edgesLRTB.x *= unpack_edges(vL.y).y;
+ edgesLRTB.z *= unpack_edges(vT.y).w;
+ edgesLRTB.y *= unpack_edges(vR.y).x;
+ edgesLRTB.w *= unpack_edges(vB.y).z;
+
+ float ssao_value = vC.x;
+ float ssao_valueL = vL.x;
+ float ssao_valueT = vT.x;
+ float ssao_valueR = vR.x;
+ float ssao_valueB = vB.x;
+
+ float sum_weight = 0.8f;
+ float sum = ssao_value * sum_weight;
+
+ add_sample(ssao_valueL, edgesLRTB.x, sum, sum_weight);
+ add_sample(ssao_valueR, edgesLRTB.y, sum, sum_weight);
+ add_sample(ssao_valueT, edgesLRTB.z, sum, sum_weight);
+ add_sample(ssao_valueB, edgesLRTB.w, sum, sum_weight);
+
+ float ssao_avg = sum / sum_weight;
+
+ ssao_value = ssao_avg;
+
+ return vec2(ssao_value, packed_edges);
+}
+#endif
+
+#ifdef MODE_SMART
+vec2 sample_blurred(vec3 p_pos, vec2 p_coord) {
+ float packed_edges = texelFetch(source_ssao, ivec2(p_pos.xy), 0).y;
+ vec4 edgesLRTB = unpack_edges(packed_edges);
+
+ vec4 valuesUL = textureGather(source_ssao, vec2(p_coord - params.half_screen_pixel_size * 0.5));
+ vec4 valuesBR = textureGather(source_ssao, vec2(p_coord + params.half_screen_pixel_size * 0.5));
+
+ float ssao_value = valuesUL.y;
+ float ssao_valueL = valuesUL.x;
+ float ssao_valueT = valuesUL.z;
+ float ssao_valueR = valuesBR.z;
+ float ssao_valueB = valuesBR.x;
+
+ float sum_weight = 0.5;
+ float sum = ssao_value * sum_weight;
+
+ add_sample(ssao_valueL, edgesLRTB.x, sum, sum_weight);
+ add_sample(ssao_valueR, edgesLRTB.y, sum, sum_weight);
+
+ add_sample(ssao_valueT, edgesLRTB.z, sum, sum_weight);
+ add_sample(ssao_valueB, edgesLRTB.w, sum, sum_weight);
+
+ float ssao_avg = sum / sum_weight;
+
+ ssao_value = ssao_avg;
+
+ return vec2(ssao_value, packed_edges);
+}
+#endif
+
+void main() {
+ // Pixel being shaded
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+
+#ifdef MODE_NON_SMART
+
+ vec2 half_pixel = params.half_screen_pixel_size * 0.5;
+
+ vec2 uv = (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size;
+
+ vec2 center = textureLod(source_ssao, vec2(uv), 0.0).xy;
+
+ vec4 vals;
+ vals.x = textureLod(source_ssao, vec2(uv + vec2(-half_pixel.x * 3, -half_pixel.y)), 0.0).x;
+ vals.y = textureLod(source_ssao, vec2(uv + vec2(+half_pixel.x, -half_pixel.y * 3)), 0.0).x;
+ vals.z = textureLod(source_ssao, vec2(uv + vec2(-half_pixel.x, +half_pixel.y * 3)), 0.0).x;
+ vals.w = textureLod(source_ssao, vec2(uv + vec2(+half_pixel.x * 3, +half_pixel.y)), 0.0).x;
+
+ vec2 sampled = vec2(dot(vals, vec4(0.2)) + center.x * 0.2, center.y);
+
+#else
+#ifdef MODE_SMART
+ vec2 sampled = sample_blurred(vec3(gl_GlobalInvocationID), (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
+#else // MODE_WIDE
+ vec2 sampled = sample_blurred_wide((vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
+#endif
+
+#endif
+ imageStore(dest_image, ivec2(ssC), vec4(sampled, 0.0, 0.0));
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssao_importance_map.glsl b/servers/rendering/renderer_rd/shaders/effects/ssao_importance_map.glsl
new file mode 100644
index 0000000000..04f98964e8
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssao_importance_map.glsl
@@ -0,0 +1,123 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+#ifdef GENERATE_MAP
+layout(set = 0, binding = 0) uniform sampler2DArray source_texture;
+#else
+layout(set = 0, binding = 0) uniform sampler2D source_importance;
+#endif
+layout(r8, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
+
+#ifdef PROCESS_MAPB
+layout(set = 2, binding = 0, std430) buffer Counter {
+ uint sum;
+}
+counter;
+#endif
+
+layout(push_constant, std430) uniform Params {
+ vec2 half_screen_pixel_size;
+ float intensity;
+ float power;
+}
+params;
+
+void main() {
+ // Pixel being shaded
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+
+#ifdef GENERATE_MAP
+ // importance map stuff
+ uvec2 base_position = ssC * 2;
+
+ vec2 base_uv = (vec2(base_position) + vec2(0.5f, 0.5f)) * params.half_screen_pixel_size;
+
+ float minV = 1.0;
+ float maxV = 0.0;
+ for (int i = 0; i < 4; i++) {
+ vec4 vals = textureGather(source_texture, vec3(base_uv, i));
+
+ // apply the same modifications that would have been applied in the main shader
+ vals = params.intensity * vals;
+
+ vals = 1 - vals;
+
+ vals = pow(clamp(vals, 0.0, 1.0), vec4(params.power));
+
+ maxV = max(maxV, max(max(vals.x, vals.y), max(vals.z, vals.w)));
+ minV = min(minV, min(min(vals.x, vals.y), min(vals.z, vals.w)));
+ }
+
+ float min_max_diff = maxV - minV;
+
+ imageStore(dest_image, ssC, vec4(pow(clamp(min_max_diff * 2.0, 0.0, 1.0), 0.8)));
+#endif
+
+#ifdef PROCESS_MAPA
+ vec2 uv = (vec2(ssC) + 0.5f) * params.half_screen_pixel_size * 2.0;
+
+ float centre = textureLod(source_importance, uv, 0.0).x;
+
+ vec2 half_pixel = params.half_screen_pixel_size;
+
+ vec4 vals;
+ vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, -half_pixel.y), 0.0).x;
+ vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x, -half_pixel.y * 3), 0.0).x;
+ vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, +half_pixel.y), 0.0).x;
+ vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x, +half_pixel.y * 3), 0.0).x;
+
+ float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
+
+ imageStore(dest_image, ssC, vec4(avg));
+#endif
+
+#ifdef PROCESS_MAPB
+ vec2 uv = (vec2(ssC) + 0.5f) * params.half_screen_pixel_size * 2.0;
+
+ float centre = textureLod(source_importance, uv, 0.0).x;
+
+ vec2 half_pixel = params.half_screen_pixel_size;
+
+ vec4 vals;
+ vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x, -half_pixel.y * 3), 0.0).x;
+ vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, -half_pixel.y), 0.0).x;
+ vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x, +half_pixel.y * 3), 0.0).x;
+ vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, +half_pixel.y), 0.0).x;
+
+ float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
+
+ imageStore(dest_image, ssC, vec4(avg));
+
+ // sum the average; to avoid overflowing we assume max AO resolution is not bigger than 16384x16384; so quarter res (used here) will be 4096x4096, which leaves us with 8 bits per pixel
+ uint sum = uint(clamp(avg, 0.0, 1.0) * 255.0 + 0.5);
+
+ // save every 9th to avoid InterlockedAdd congestion - since we're blurring, this is good enough; compensated by multiplying load_counter_avg_div by 9
+ if (((ssC.x % 3) + (ssC.y % 3)) == 0) {
+ atomicAdd(counter.sum, sum);
+ }
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssao_interleave.glsl b/servers/rendering/renderer_rd/shaders/effects/ssao_interleave.glsl
new file mode 100644
index 0000000000..f6a9a92fac
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssao_interleave.glsl
@@ -0,0 +1,119 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(rgba8, set = 0, binding = 0) uniform restrict writeonly image2D dest_image;
+layout(set = 1, binding = 0) uniform sampler2DArray source_texture;
+
+layout(push_constant, std430) uniform Params {
+ float inv_sharpness;
+ uint size_modifier;
+ vec2 pixel_size;
+}
+params;
+
+vec4 unpack_edges(float p_packed_val) {
+ uint packed_val = uint(p_packed_val * 255.5);
+ vec4 edgesLRTB;
+ edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
+ edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
+ edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
+ edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
+
+ return clamp(edgesLRTB + params.inv_sharpness, 0.0, 1.0);
+}
+
+void main() {
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+ if (any(greaterThanEqual(ssC, ivec2(1.0 / params.pixel_size)))) { //too large, do nothing
+ return;
+ }
+
+#ifdef MODE_SMART
+ float ao;
+ uvec2 pix_pos = uvec2(gl_GlobalInvocationID.xy);
+ vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
+
+ // calculate index in the four deinterleaved source array texture
+ int mx = int(pix_pos.x % 2);
+ int my = int(pix_pos.y % 2);
+ int index_center = mx + my * 2; // center index
+ int index_horizontal = (1 - mx) + my * 2; // neighbouring, horizontal
+ int index_vertical = mx + (1 - my) * 2; // neighbouring, vertical
+ int index_diagonal = (1 - mx) + (1 - my) * 2; // diagonal
+
+ vec2 center_val = texelFetch(source_texture, ivec3(pix_pos / uvec2(params.size_modifier), index_center), 0).xy;
+
+ ao = center_val.x;
+
+ vec4 edgesLRTB = unpack_edges(center_val.y);
+
+ // convert index shifts to sampling offsets
+ float fmx = float(mx);
+ float fmy = float(my);
+
+ // in case of an edge, push sampling offsets away from the edge (towards pixel center)
+ float fmxe = (edgesLRTB.y - edgesLRTB.x);
+ float fmye = (edgesLRTB.w - edgesLRTB.z);
+
+ // calculate final sampling offsets and sample using bilinear filter
+ vec2 uv_horizontal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx + fmxe - 0.5, 0.5 - fmy)) * params.pixel_size;
+ float ao_horizontal = textureLod(source_texture, vec3(uv_horizontal, index_horizontal), 0.0).x;
+ vec2 uv_vertical = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(0.5 - fmx, fmy - 0.5 + fmye)) * params.pixel_size;
+ float ao_vertical = textureLod(source_texture, vec3(uv_vertical, index_vertical), 0.0).x;
+ vec2 uv_diagonal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx - 0.5 + fmxe, fmy - 0.5 + fmye)) * params.pixel_size;
+ float ao_diagonal = textureLod(source_texture, vec3(uv_diagonal, index_diagonal), 0.0).x;
+
+ // reduce weight for samples near edge - if the edge is on both sides, weight goes to 0
+ vec4 blendWeights;
+ blendWeights.x = 1.0;
+ blendWeights.y = (edgesLRTB.x + edgesLRTB.y) * 0.5;
+ blendWeights.z = (edgesLRTB.z + edgesLRTB.w) * 0.5;
+ blendWeights.w = (blendWeights.y + blendWeights.z) * 0.5;
+
+ // calculate weighted average
+ float blendWeightsSum = dot(blendWeights, vec4(1.0, 1.0, 1.0, 1.0));
+ ao = dot(vec4(ao, ao_horizontal, ao_vertical, ao_diagonal), blendWeights) / blendWeightsSum;
+
+ imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(ao));
+#else // !MODE_SMART
+
+ vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
+#ifdef MODE_HALF
+ float a = textureLod(source_texture, vec3(uv, 0), 0.0).x;
+ float d = textureLod(source_texture, vec3(uv, 3), 0.0).x;
+ float avg = (a + d) * 0.5;
+
+#else
+ float a = textureLod(source_texture, vec3(uv, 0), 0.0).x;
+ float b = textureLod(source_texture, vec3(uv, 1), 0.0).x;
+ float c = textureLod(source_texture, vec3(uv, 2), 0.0).x;
+ float d = textureLod(source_texture, vec3(uv, 3), 0.0).x;
+ float avg = (a + b + c + d) * 0.25;
+
+#endif
+ imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(avg));
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssil.glsl b/servers/rendering/renderer_rd/shaders/effects/ssil.glsl
new file mode 100644
index 0000000000..513791dfbf
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssil.glsl
@@ -0,0 +1,444 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+// 2021-05-27: clayjohn: convert SSAO to SSIL
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+#define SSIL_MAIN_DISK_SAMPLE_COUNT (32)
+const vec4 sample_pattern[SSIL_MAIN_DISK_SAMPLE_COUNT] = {
+ vec4(0.78488064, 0.56661671, 1.500000, -0.126083), vec4(0.26022232, -0.29575172, 1.500000, -1.064030), vec4(0.10459357, 0.08372527, 1.110000, -2.730563), vec4(-0.68286800, 0.04963045, 1.090000, -0.498827),
+ vec4(-0.13570161, -0.64190155, 1.250000, -0.532765), vec4(-0.26193795, -0.08205118, 0.670000, -1.783245), vec4(-0.61177456, 0.66664219, 0.710000, -0.044234), vec4(0.43675563, 0.25119025, 0.610000, -1.167283),
+ vec4(0.07884444, 0.86618668, 0.640000, -0.459002), vec4(-0.12790935, -0.29869005, 0.600000, -1.729424), vec4(-0.04031125, 0.02413622, 0.600000, -4.792042), vec4(0.16201244, -0.52851415, 0.790000, -1.067055),
+ vec4(-0.70991218, 0.47301072, 0.640000, -0.335236), vec4(0.03277707, -0.22349690, 0.600000, -1.982384), vec4(0.68921727, 0.36800742, 0.630000, -0.266718), vec4(0.29251814, 0.37775412, 0.610000, -1.422520),
+ vec4(-0.12224089, 0.96582592, 0.600000, -0.426142), vec4(0.11071457, -0.16131058, 0.600000, -2.165947), vec4(0.46562141, -0.59747696, 0.600000, -0.189760), vec4(-0.51548797, 0.11804193, 0.600000, -1.246800),
+ vec4(0.89141309, -0.42090443, 0.600000, 0.028192), vec4(-0.32402530, -0.01591529, 0.600000, -1.543018), vec4(0.60771245, 0.41635221, 0.600000, -0.605411), vec4(0.02379565, -0.08239821, 0.600000, -3.809046),
+ vec4(0.48951152, -0.23657045, 0.600000, -1.189011), vec4(-0.17611565, -0.81696892, 0.600000, -0.513724), vec4(-0.33930185, -0.20732205, 0.600000, -1.698047), vec4(-0.91974425, 0.05403209, 0.600000, 0.062246),
+ vec4(-0.15064627, -0.14949332, 0.600000, -1.896062), vec4(0.53180975, -0.35210401, 0.600000, -0.758838), vec4(0.41487166, 0.81442589, 0.600000, -0.505648), vec4(-0.24106961, -0.32721516, 0.600000, -1.665244)
+};
+
+// these values can be changed (up to SSIL_MAX_TAPS) with no changes required elsewhere; values for 4th and 5th preset are ignored but array needed to avoid compilation errors
+// the actual number of texture samples is two times this value (each "tap" has two symmetrical depth texture samples)
+const int num_taps[5] = { 3, 5, 12, 0, 0 };
+
+#define SSIL_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET (99) // to disable simply set to 99 or similar
+#define SSIL_TILT_SAMPLES_AMOUNT (0.4)
+//
+#define SSIL_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET (1) // to disable simply set to 99 or similar
+#define SSIL_HALOING_REDUCTION_AMOUNT (0.8) // values from 0.0 - 1.0, 1.0 means max weighting (will cause artifacts, 0.8 is more reasonable)
+//
+#define SSIL_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET (2)
+#define SSIL_DEPTH_MIPS_GLOBAL_OFFSET (-4.3) // best noise/quality/performance tradeoff, found empirically
+//
+// !!warning!! the edge handling is hard-coded to 'disabled' on quality level 0, and enabled above, on the C++ side; while toggling it here will work for
+// testing purposes, it will not yield performance gains (or correct results)
+#define SSIL_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET (1)
+//
+#define SSIL_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET (1)
+
+#define SSIL_MAX_TAPS 32
+#define SSIL_ADAPTIVE_TAP_BASE_COUNT 5
+#define SSIL_ADAPTIVE_TAP_FLEXIBLE_COUNT (SSIL_MAX_TAPS - SSIL_ADAPTIVE_TAP_BASE_COUNT)
+#define SSIL_DEPTH_MIP_LEVELS 4
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(set = 0, binding = 0) uniform sampler2DArray source_depth_mipmaps;
+layout(rgba8, set = 0, binding = 1) uniform restrict readonly image2D source_normal;
+layout(set = 0, binding = 2) uniform Constants { //get into a lower set
+ vec4 rotation_matrices[20];
+}
+constants;
+
+#ifdef ADAPTIVE
+layout(rgba16, set = 1, binding = 0) uniform restrict readonly image2DArray source_ssil;
+layout(set = 1, binding = 1) uniform sampler2D source_importance;
+layout(set = 1, binding = 2, std430) buffer Counter {
+ uint sum;
+}
+counter;
+#endif
+
+layout(rgba16, set = 2, binding = 0) uniform restrict writeonly image2D dest_image;
+layout(r8, set = 2, binding = 1) uniform image2D edges_weights_image;
+
+layout(set = 3, binding = 0) uniform sampler2D last_frame;
+layout(set = 3, binding = 1) uniform ProjectionConstants {
+ mat4 reprojection;
+}
+projection_constants;
+
+layout(push_constant, std430) uniform Params {
+ ivec2 screen_size;
+ int pass;
+ int quality;
+
+ vec2 half_screen_pixel_size;
+ vec2 half_screen_pixel_size_x025;
+
+ vec2 NDC_to_view_mul;
+ vec2 NDC_to_view_add;
+
+ vec2 pad2;
+ float z_near;
+ float z_far;
+
+ float radius;
+ float intensity;
+ int size_multiplier;
+ int pad;
+
+ float fade_out_mul;
+ float fade_out_add;
+ float normal_rejection_amount;
+ float inv_radius_near_limit;
+
+ bool is_orthogonal;
+ float neg_inv_radius;
+ float load_counter_avg_div;
+ float adaptive_sample_limit;
+
+ ivec2 pass_coord_offset;
+ vec2 pass_uv_offset;
+}
+params;
+
+float pack_edges(vec4 p_edgesLRTB) {
+ p_edgesLRTB = round(clamp(p_edgesLRTB, 0.0, 1.0) * 3.05);
+ return dot(p_edgesLRTB, vec4(64.0 / 255.0, 16.0 / 255.0, 4.0 / 255.0, 1.0 / 255.0));
+}
+
+vec3 NDC_to_view_space(vec2 p_pos, float p_viewspace_depth) {
+ if (params.is_orthogonal) {
+ return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add), p_viewspace_depth);
+ } else {
+ return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add) * p_viewspace_depth, p_viewspace_depth);
+ }
+}
+
+// calculate effect radius and fit our screen sampling pattern inside it
+void calculate_radius_parameters(const float p_pix_center_length, const vec2 p_pixel_size_at_center, out float r_lookup_radius, out float r_radius, out float r_fallof_sq) {
+ r_radius = params.radius;
+
+ // when too close, on-screen sampling disk will grow beyond screen size; limit this to avoid closeup temporal artifacts
+ const float too_close_limit = clamp(p_pix_center_length * params.inv_radius_near_limit, 0.0, 1.0) * 0.8 + 0.2;
+
+ r_radius *= too_close_limit;
+
+ // 0.85 is to reduce the radius to allow for more samples on a slope to still stay within influence
+ r_lookup_radius = (0.85 * r_radius) / p_pixel_size_at_center.x;
+
+ // used to calculate falloff (both for AO samples and per-sample weights)
+ r_fallof_sq = -1.0 / (r_radius * r_radius);
+}
+
+vec4 calculate_edges(const float p_center_z, const float p_left_z, const float p_right_z, const float p_top_z, const float p_bottom_z) {
+ // slope-sensitive depth-based edge detection
+ vec4 edgesLRTB = vec4(p_left_z, p_right_z, p_top_z, p_bottom_z) - p_center_z;
+ vec4 edgesLRTB_slope_adjusted = edgesLRTB + edgesLRTB.yxwz;
+ edgesLRTB = min(abs(edgesLRTB), abs(edgesLRTB_slope_adjusted));
+ return clamp((1.3 - edgesLRTB / (p_center_z * 0.040)), 0.0, 1.0);
+}
+
+vec3 decode_normal(vec3 p_encoded_normal) {
+ vec3 normal = p_encoded_normal * 2.0 - 1.0;
+ return normal;
+}
+
+vec3 load_normal(ivec2 p_pos) {
+ vec3 encoded_normal = imageLoad(source_normal, p_pos).xyz;
+ encoded_normal.z = 1.0 - encoded_normal.z;
+ return decode_normal(encoded_normal);
+}
+
+vec3 load_normal(ivec2 p_pos, ivec2 p_offset) {
+ vec3 encoded_normal = imageLoad(source_normal, p_pos + p_offset).xyz;
+ encoded_normal.z = 1.0 - encoded_normal.z;
+ return decode_normal(encoded_normal);
+}
+
+// all vectors in viewspace
+float calculate_pixel_obscurance(vec3 p_pixel_normal, vec3 p_hit_delta, float p_fallof_sq) {
+ float length_sq = dot(p_hit_delta, p_hit_delta);
+ float NdotD = dot(p_pixel_normal, p_hit_delta) / sqrt(length_sq);
+
+ float falloff_mult = max(0.0, length_sq * p_fallof_sq + 1.0);
+
+ return max(0, NdotD - 0.05) * falloff_mult;
+}
+
+void SSIL_tap_inner(const int p_quality_level, inout vec3 r_color_sum, inout float r_obscurance_sum, inout float r_weight_sum, const vec2 p_sampling_uv, const float p_mip_level, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const float p_fallof_sq, const float p_weight_mod) {
+ // get depth at sample
+ float viewspace_sample_z = textureLod(source_depth_mipmaps, vec3(p_sampling_uv, params.pass), p_mip_level).x;
+ vec3 sample_normal = load_normal(ivec2(p_sampling_uv * vec2(params.screen_size)));
+
+ // convert to viewspace
+ vec3 hit_pos = NDC_to_view_space(p_sampling_uv.xy, viewspace_sample_z);
+ vec3 hit_delta = hit_pos - p_pix_center_pos;
+
+ float obscurance = calculate_pixel_obscurance(p_pixel_normal, hit_delta, p_fallof_sq);
+ float weight = 1.0;
+
+ if (p_quality_level >= SSIL_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET) {
+ float reduct = max(0, -hit_delta.z);
+ reduct = clamp(reduct * params.neg_inv_radius + 2.0, 0.0, 1.0);
+ weight = SSIL_HALOING_REDUCTION_AMOUNT * reduct + (1.0 - SSIL_HALOING_REDUCTION_AMOUNT);
+ }
+
+ // Translate sampling_uv to last screen's coordinates
+ const vec4 sample_pos = projection_constants.reprojection * vec4(p_sampling_uv * 2.0 - 1.0, (viewspace_sample_z - params.z_near) / (params.z_far - params.z_near) * 2.0 - 1.0, 1.0);
+ vec2 reprojected_sampling_uv = (sample_pos.xy / sample_pos.w) * 0.5 + 0.5;
+
+ weight *= p_weight_mod;
+
+ r_obscurance_sum += obscurance * weight;
+
+ vec3 sample_color = textureLod(last_frame, reprojected_sampling_uv, 5.0).rgb;
+ // Reduce impact of fireflies by tonemapping before averaging: http://graphicrants.blogspot.com/2013/12/tone-mapping.html
+ sample_color /= (1.0 + dot(sample_color, vec3(0.299, 0.587, 0.114)));
+ r_color_sum += sample_color * obscurance * weight * mix(1.0, smoothstep(0.0, 0.1, -dot(sample_normal, normalize(hit_delta))), params.normal_rejection_amount);
+ r_weight_sum += weight;
+}
+
+void SSILTap(const int p_quality_level, inout vec3 r_color_sum, inout float r_obscurance_sum, inout float r_weight_sum, const int p_tap_index, const mat2 p_rot_scale, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const vec2 p_normalized_screen_pos, const float p_mip_offset, const float p_fallof_sq, float p_weight_mod, vec2 p_norm_xy, float p_norm_xy_length) {
+ vec2 sample_offset;
+ float sample_pow_2_len;
+
+ // patterns
+ {
+ vec4 new_sample = sample_pattern[p_tap_index];
+ sample_offset = new_sample.xy * p_rot_scale;
+ sample_pow_2_len = new_sample.w; // precalculated, same as: sample_pow_2_len = log2( length( new_sample.xy ) );
+ p_weight_mod *= new_sample.z;
+ }
+
+ // snap to pixel center (more correct obscurance math, avoids artifacts)
+ sample_offset = round(sample_offset);
+
+ // calculate MIP based on the sample distance from the centre, similar to as described
+ // in http://graphics.cs.williams.edu/papers/SAOHPG12/.
+ float mip_level = (p_quality_level < SSIL_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (sample_pow_2_len + p_mip_offset);
+
+ vec2 sampling_uv = sample_offset * params.half_screen_pixel_size + p_normalized_screen_pos;
+
+ SSIL_tap_inner(p_quality_level, r_color_sum, r_obscurance_sum, r_weight_sum, sampling_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
+
+ // for the second tap, just use the mirrored offset
+ vec2 sample_offset_mirrored_uv = -sample_offset;
+
+ // tilt the second set of samples so that the disk is effectively rotated by the normal
+ // effective at removing one set of artifacts, but too expensive for lower quality settings
+ if (p_quality_level >= SSIL_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET) {
+ float dot_norm = dot(sample_offset_mirrored_uv, p_norm_xy);
+ sample_offset_mirrored_uv -= dot_norm * p_norm_xy_length * p_norm_xy;
+ sample_offset_mirrored_uv = round(sample_offset_mirrored_uv);
+ }
+
+ // snap to pixel center (more correct obscurance math, avoids artifacts)
+ vec2 sampling_mirrored_uv = sample_offset_mirrored_uv * params.half_screen_pixel_size + p_normalized_screen_pos;
+
+ SSIL_tap_inner(p_quality_level, r_color_sum, r_obscurance_sum, r_weight_sum, sampling_mirrored_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
+}
+
+void generate_SSIL(out vec3 r_color, out vec4 r_edges, out float r_obscurance, out float r_weight, const vec2 p_pos, int p_quality_level, bool p_adaptive_base) {
+ vec2 pos_rounded = trunc(p_pos);
+ uvec2 upos = uvec2(pos_rounded);
+
+ const int number_of_taps = (p_adaptive_base) ? (SSIL_ADAPTIVE_TAP_BASE_COUNT) : (num_taps[p_quality_level]);
+ float pix_z, pix_left_z, pix_top_z, pix_right_z, pix_bottom_z;
+
+ vec4 valuesUL = textureGather(source_depth_mipmaps, vec3(pos_rounded * params.half_screen_pixel_size, params.pass));
+ vec4 valuesBR = textureGather(source_depth_mipmaps, vec3((pos_rounded + vec2(1.0)) * params.half_screen_pixel_size, params.pass));
+
+ // get this pixel's viewspace depth
+ pix_z = valuesUL.y;
+
+ // get left right top bottom neighbouring pixels for edge detection (gets compiled out on quality_level == 0)
+ pix_left_z = valuesUL.x;
+ pix_top_z = valuesUL.z;
+ pix_right_z = valuesBR.z;
+ pix_bottom_z = valuesBR.x;
+
+ vec2 normalized_screen_pos = pos_rounded * params.half_screen_pixel_size + params.half_screen_pixel_size_x025;
+ vec3 pix_center_pos = NDC_to_view_space(normalized_screen_pos, pix_z);
+
+ // Load this pixel's viewspace normal
+ uvec2 full_res_coord = upos * 2 * params.size_multiplier + params.pass_coord_offset.xy;
+ vec3 pixel_normal = load_normal(ivec2(full_res_coord));
+
+ const vec2 pixel_size_at_center = NDC_to_view_space(normalized_screen_pos.xy + params.half_screen_pixel_size, pix_center_pos.z).xy - pix_center_pos.xy;
+
+ float pixel_lookup_radius;
+ float fallof_sq;
+
+ // calculate effect radius and fit our screen sampling pattern inside it
+ float viewspace_radius;
+ calculate_radius_parameters(length(pix_center_pos), pixel_size_at_center, pixel_lookup_radius, viewspace_radius, fallof_sq);
+
+ // calculate samples rotation/scaling
+ mat2 rot_scale_matrix;
+ uint pseudo_random_index;
+
+ {
+ vec4 rotation_scale;
+ // reduce effect radius near the screen edges slightly; ideally, one would render a larger depth buffer (5% on each side) instead
+ if (!p_adaptive_base && (p_quality_level >= SSIL_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET)) {
+ float near_screen_border = min(min(normalized_screen_pos.x, 1.0 - normalized_screen_pos.x), min(normalized_screen_pos.y, 1.0 - normalized_screen_pos.y));
+ near_screen_border = clamp(10.0 * near_screen_border + 0.6, 0.0, 1.0);
+ pixel_lookup_radius *= near_screen_border;
+ }
+
+ // load & update pseudo-random rotation matrix
+ pseudo_random_index = uint(pos_rounded.y * 2 + pos_rounded.x) % 5;
+ rotation_scale = constants.rotation_matrices[params.pass * 5 + pseudo_random_index];
+ rot_scale_matrix = mat2(rotation_scale.x * pixel_lookup_radius, rotation_scale.y * pixel_lookup_radius, rotation_scale.z * pixel_lookup_radius, rotation_scale.w * pixel_lookup_radius);
+ }
+
+ // the main obscurance & sample weight storage
+ vec3 color_sum = vec3(0.0);
+ float obscurance_sum = 0.0;
+ float weight_sum = 0.0;
+
+ // edge mask for between this and left/right/top/bottom neighbour pixels - not used in quality level 0 so initialize to "no edge" (1 is no edge, 0 is edge)
+ vec4 edgesLRTB = vec4(1.0, 1.0, 1.0, 1.0);
+
+ // Move center pixel slightly towards camera to avoid imprecision artifacts due to using of 16bit depth buffer; a lot smaller offsets needed when using 32bit floats
+ pix_center_pos *= 0.9992;
+
+ if (!p_adaptive_base && (p_quality_level >= SSIL_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
+ edgesLRTB = calculate_edges(pix_z, pix_left_z, pix_right_z, pix_top_z, pix_bottom_z);
+ }
+
+ const float global_mip_offset = SSIL_DEPTH_MIPS_GLOBAL_OFFSET;
+ float mip_offset = (p_quality_level < SSIL_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (log2(pixel_lookup_radius) + global_mip_offset);
+
+ // Used to tilt the second set of samples so that the disk is effectively rotated by the normal
+ // effective at removing one set of artifacts, but too expensive for lower quality settings
+ vec2 norm_xy = vec2(pixel_normal.x, pixel_normal.y);
+ float norm_xy_length = length(norm_xy);
+ norm_xy /= vec2(norm_xy_length, -norm_xy_length);
+ norm_xy_length *= SSIL_TILT_SAMPLES_AMOUNT;
+
+ // standard, non-adaptive approach
+ if ((p_quality_level != 3) || p_adaptive_base) {
+ for (int i = 0; i < number_of_taps; i++) {
+ SSILTap(p_quality_level, color_sum, obscurance_sum, weight_sum, i, rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, 1.0, norm_xy, norm_xy_length);
+ }
+ }
+#ifdef ADAPTIVE
+ else {
+ // add new ones if needed
+ vec2 full_res_uv = normalized_screen_pos + params.pass_uv_offset.xy;
+ float importance = textureLod(source_importance, full_res_uv, 0.0).x;
+
+ //Need to store obscurance from base pass
+ // load existing base values
+ vec4 base_values = imageLoad(source_ssil, ivec3(upos, params.pass));
+ weight_sum += imageLoad(edges_weights_image, ivec2(upos)).r * float(SSIL_ADAPTIVE_TAP_BASE_COUNT * 4.0);
+ color_sum += (base_values.rgb) * weight_sum;
+ obscurance_sum += (base_values.a) * weight_sum;
+
+ // increase importance around edges
+ float edge_count = dot(1.0 - edgesLRTB, vec4(1.0, 1.0, 1.0, 1.0));
+
+ float avg_total_importance = float(counter.sum) * params.load_counter_avg_div;
+
+ float importance_limiter = clamp(params.adaptive_sample_limit / avg_total_importance, 0.0, 1.0);
+ importance *= importance_limiter;
+
+ float additional_sample_count = SSIL_ADAPTIVE_TAP_FLEXIBLE_COUNT * importance;
+
+ const float blend_range = 3.0;
+ const float blend_range_inv = 1.0 / blend_range;
+
+ additional_sample_count += 0.5;
+ uint additional_samples = uint(additional_sample_count);
+ uint additional_samples_to = min(SSIL_MAX_TAPS, additional_samples + SSIL_ADAPTIVE_TAP_BASE_COUNT);
+
+ for (uint i = SSIL_ADAPTIVE_TAP_BASE_COUNT; i < additional_samples_to; i++) {
+ additional_sample_count -= 1.0f;
+ float weight_mod = clamp(additional_sample_count * blend_range_inv, 0.0, 1.0);
+ SSILTap(p_quality_level, color_sum, obscurance_sum, weight_sum, int(i), rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, weight_mod, norm_xy, norm_xy_length);
+ }
+ }
+#endif
+
+ // Early out for adaptive base
+ if (p_adaptive_base) {
+ vec3 color = color_sum / weight_sum;
+
+ r_color = color;
+ r_edges = vec4(0.0);
+ r_obscurance = obscurance_sum / weight_sum;
+ r_weight = weight_sum;
+ return;
+ }
+
+ // Calculate weighted average
+ vec3 color = color_sum / weight_sum;
+ color /= 1.0 - dot(color, vec3(0.299, 0.587, 0.114));
+
+ // Calculate fadeout (1 close, gradient, 0 far)
+ float fade_out = clamp(pix_center_pos.z * params.fade_out_mul + params.fade_out_add, 0.0, 1.0);
+
+ // Reduce the SSIL if we're on the edge to remove artifacts on edges (we don't care for the lower quality one)
+ if (!p_adaptive_base && (p_quality_level >= SSIL_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
+ // when there's more than 2 opposite edges, start fading out the occlusion to reduce aliasing artifacts
+ float edge_fadeout_factor = clamp((1.0 - edgesLRTB.x - edgesLRTB.y) * 0.35, 0.0, 1.0) + clamp((1.0 - edgesLRTB.z - edgesLRTB.w) * 0.35, 0.0, 1.0);
+
+ fade_out *= clamp(1.0 - edge_fadeout_factor, 0.0, 1.0);
+ }
+
+ color = params.intensity * color;
+
+ color *= fade_out;
+
+ // outputs!
+ r_color = color;
+ r_edges = edgesLRTB; // These are used to prevent blurring across edges, 1 means no edge, 0 means edge, 0.5 means half way there, etc.
+ r_obscurance = clamp((obscurance_sum / weight_sum) * params.intensity, 0.0, 1.0);
+ r_weight = weight_sum;
+}
+
+void main() {
+ vec3 out_color;
+ float out_obscurance;
+ float out_weight;
+ vec4 out_edges;
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+ if (any(greaterThanEqual(ssC, params.screen_size))) { //too large, do nothing
+ return;
+ }
+
+ vec2 uv = vec2(gl_GlobalInvocationID) + vec2(0.5);
+#ifdef SSIL_BASE
+ generate_SSIL(out_color, out_edges, out_obscurance, out_weight, uv, params.quality, true);
+
+ imageStore(dest_image, ssC, vec4(out_color, out_obscurance));
+ imageStore(edges_weights_image, ssC, vec4(out_weight / (float(SSIL_ADAPTIVE_TAP_BASE_COUNT) * 4.0)));
+#else
+ generate_SSIL(out_color, out_edges, out_obscurance, out_weight, uv, params.quality, false); // pass in quality levels
+
+ imageStore(dest_image, ssC, vec4(out_color, out_obscurance));
+ imageStore(edges_weights_image, ssC, vec4(pack_edges(out_edges)));
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssil_blur.glsl b/servers/rendering/renderer_rd/shaders/effects/ssil_blur.glsl
new file mode 100644
index 0000000000..47c56571f6
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssil_blur.glsl
@@ -0,0 +1,144 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+// 2021-05-27: clayjohn: convert SSAO to SSIL
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(set = 0, binding = 0) uniform sampler2D source_ssil;
+
+layout(rgba16, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
+
+layout(r8, set = 2, binding = 0) uniform restrict readonly image2D source_edges;
+
+layout(push_constant, std430) uniform Params {
+ float edge_sharpness;
+ float pad;
+ vec2 half_screen_pixel_size;
+}
+params;
+
+vec4 unpack_edges(float p_packed_val) {
+ uint packed_val = uint(p_packed_val * 255.5);
+ vec4 edgesLRTB;
+ edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
+ edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
+ edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
+ edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
+
+ return clamp(edgesLRTB + params.edge_sharpness, 0.0, 1.0);
+}
+
+void add_sample(vec4 p_ssil_value, float p_edge_value, inout vec4 r_sum, inout float r_sum_weight) {
+ float weight = p_edge_value;
+
+ r_sum += (weight * p_ssil_value);
+ r_sum_weight += weight;
+}
+
+#ifdef MODE_WIDE
+vec4 sample_blurred_wide(ivec2 p_pos, vec2 p_coord) {
+ vec4 ssil_value = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 0));
+ vec4 ssil_valueL = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(-2, 0));
+ vec4 ssil_valueT = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, -2));
+ vec4 ssil_valueR = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(2, 0));
+ vec4 ssil_valueB = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 2));
+
+ vec4 edgesLRTB = unpack_edges(imageLoad(source_edges, p_pos).r);
+ edgesLRTB.x *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(-2, 0)).r).y;
+ edgesLRTB.z *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(0, -2)).r).w;
+ edgesLRTB.y *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(2, 0)).r).x;
+ edgesLRTB.w *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(0, 2)).r).z;
+
+ float sum_weight = 0.8;
+ vec4 sum = ssil_value * sum_weight;
+
+ add_sample(ssil_valueL, edgesLRTB.x, sum, sum_weight);
+ add_sample(ssil_valueR, edgesLRTB.y, sum, sum_weight);
+ add_sample(ssil_valueT, edgesLRTB.z, sum, sum_weight);
+ add_sample(ssil_valueB, edgesLRTB.w, sum, sum_weight);
+
+ vec4 ssil_avg = sum / sum_weight;
+
+ ssil_value = ssil_avg;
+
+ return ssil_value;
+}
+#endif
+
+#ifdef MODE_SMART
+vec4 sample_blurred(ivec2 p_pos, vec2 p_coord) {
+ vec4 vC = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 0));
+ vec4 vL = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(-1, 0));
+ vec4 vT = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, -1));
+ vec4 vR = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(1, 0));
+ vec4 vB = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 1));
+
+ float packed_edges = imageLoad(source_edges, p_pos).r;
+ vec4 edgesLRTB = unpack_edges(packed_edges);
+
+ float sum_weight = 0.5;
+ vec4 sum = vC * sum_weight;
+
+ add_sample(vL, edgesLRTB.x, sum, sum_weight);
+ add_sample(vR, edgesLRTB.y, sum, sum_weight);
+ add_sample(vT, edgesLRTB.z, sum, sum_weight);
+ add_sample(vB, edgesLRTB.w, sum, sum_weight);
+
+ vec4 ssil_avg = sum / sum_weight;
+
+ vec4 ssil_value = ssil_avg;
+
+ return ssil_value;
+}
+#endif
+
+void main() {
+ // Pixel being shaded
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+
+#ifdef MODE_NON_SMART
+
+ vec2 half_pixel = params.half_screen_pixel_size * 0.5;
+
+ vec2 uv = (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size;
+
+ vec4 centre = textureLod(source_ssil, uv, 0.0);
+
+ vec4 value = textureLod(source_ssil, vec2(uv + vec2(-half_pixel.x * 3, -half_pixel.y)), 0.0) * 0.2;
+ value += textureLod(source_ssil, vec2(uv + vec2(+half_pixel.x, -half_pixel.y * 3)), 0.0) * 0.2;
+ value += textureLod(source_ssil, vec2(uv + vec2(-half_pixel.x, +half_pixel.y * 3)), 0.0) * 0.2;
+ value += textureLod(source_ssil, vec2(uv + vec2(+half_pixel.x * 3, +half_pixel.y)), 0.0) * 0.2;
+
+ vec4 sampled = value + centre * 0.2;
+
+#else
+#ifdef MODE_SMART
+ vec4 sampled = sample_blurred(ssC, (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
+#else // MODE_WIDE
+ vec4 sampled = sample_blurred_wide(ssC, (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
+#endif
+#endif // MODE_NON_SMART
+ imageStore(dest_image, ssC, sampled);
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssil_importance_map.glsl b/servers/rendering/renderer_rd/shaders/effects/ssil_importance_map.glsl
new file mode 100644
index 0000000000..6b6b02739d
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssil_importance_map.glsl
@@ -0,0 +1,125 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+// 2021-05-27: clayjohn: convert SSAO to SSIL
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+#ifdef GENERATE_MAP
+layout(set = 0, binding = 0) uniform sampler2DArray source_texture;
+#else
+layout(set = 0, binding = 0) uniform sampler2D source_importance;
+#endif
+layout(r8, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
+
+#ifdef PROCESS_MAPB
+layout(set = 2, binding = 0, std430) buffer Counter {
+ uint sum;
+}
+counter;
+#endif
+
+layout(push_constant, std430) uniform Params {
+ vec2 half_screen_pixel_size;
+ float intensity;
+ float pad;
+}
+params;
+
+void main() {
+ // Pixel being shaded
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+
+#ifdef GENERATE_MAP
+ // importance map stuff
+ uvec2 base_position = ssC * 2;
+
+ float avg = 0.0;
+ float minV = 1.0;
+ float maxV = 0.0;
+ for (int i = 0; i < 4; i++) {
+ vec3 value_a = texelFetch(source_texture, ivec3(base_position, i), 0).rgb * params.intensity;
+ vec3 value_b = texelFetch(source_texture, ivec3(base_position, i) + ivec3(0, 1, 0), 0).rgb * params.intensity;
+ vec3 value_c = texelFetch(source_texture, ivec3(base_position, i) + ivec3(1, 0, 0), 0).rgb * params.intensity;
+ vec3 value_d = texelFetch(source_texture, ivec3(base_position, i) + ivec3(1, 1, 0), 0).rgb * params.intensity;
+
+ // Calculate luminance (black and white value)
+ float a = dot(value_a, vec3(0.2125, 0.7154, 0.0721));
+ float b = dot(value_b, vec3(0.2125, 0.7154, 0.0721));
+ float c = dot(value_c, vec3(0.2125, 0.7154, 0.0721));
+ float d = dot(value_d, vec3(0.2125, 0.7154, 0.0721));
+
+ maxV = max(maxV, max(max(a, b), max(c, d)));
+ minV = min(minV, min(min(a, b), min(c, d)));
+ }
+
+ float min_max_diff = maxV - minV;
+
+ imageStore(dest_image, ssC, vec4(pow(clamp(min_max_diff * 2.0, 0.0, 1.0), 0.6)));
+#endif
+
+#ifdef PROCESS_MAPA
+ vec2 uv = (vec2(ssC) + 0.5) * params.half_screen_pixel_size * 2.0;
+
+ float centre = textureLod(source_importance, uv, 0.0).x;
+
+ vec2 half_pixel = params.half_screen_pixel_size;
+
+ vec4 vals;
+ vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, -half_pixel.y), 0.0).x;
+ vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x, -half_pixel.y * 3), 0.0).x;
+ vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, +half_pixel.y), 0.0).x;
+ vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x, +half_pixel.y * 3), 0.0).x;
+
+ float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
+
+ imageStore(dest_image, ssC, vec4(avg));
+#endif
+
+#ifdef PROCESS_MAPB
+ vec2 uv = (vec2(ssC) + 0.5f) * params.half_screen_pixel_size * 2.0;
+
+ float centre = textureLod(source_importance, uv, 0.0).x;
+
+ vec2 half_pixel = params.half_screen_pixel_size;
+
+ vec4 vals;
+ vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x, -half_pixel.y * 3), 0.0).x;
+ vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, -half_pixel.y), 0.0).x;
+ vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x, +half_pixel.y * 3), 0.0).x;
+ vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, +half_pixel.y), 0.0).x;
+
+ float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
+
+ imageStore(dest_image, ssC, vec4(avg));
+
+ // sum the average; to avoid overflowing we assume max AO resolution is not bigger than 16384x16384; so quarter res (used here) will be 4096x4096, which leaves us with 8 bits per pixel
+ uint sum = uint(clamp(avg, 0.0, 1.0) * 255.0 + 0.5);
+
+ // save every 9th to avoid InterlockedAdd congestion - since we're blurring, this is good enough; compensated by multiplying load_counter_avg_div by 9
+ if (((ssC.x % 3) + (ssC.y % 3)) == 0) {
+ atomicAdd(counter.sum, sum);
+ }
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/ssil_interleave.glsl b/servers/rendering/renderer_rd/shaders/effects/ssil_interleave.glsl
new file mode 100644
index 0000000000..9e86ac0cf0
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/ssil_interleave.glsl
@@ -0,0 +1,122 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2016, Intel Corporation
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+// the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// File changes (yyyy-mm-dd)
+// 2016-09-07: filip.strugar@intel.com: first commit
+// 2020-12-05: clayjohn: convert to Vulkan and Godot
+// 2021-05-27: clayjohn: convert SSAO to SSIL
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#[compute]
+
+#version 450
+
+#VERSION_DEFINES
+
+layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
+
+layout(rgba16, set = 0, binding = 0) uniform restrict writeonly image2D dest_image;
+layout(set = 1, binding = 0) uniform sampler2DArray source_texture;
+layout(r8, set = 2, binding = 0) uniform restrict readonly image2DArray source_edges;
+
+layout(push_constant, std430) uniform Params {
+ float inv_sharpness;
+ uint size_modifier;
+ vec2 pixel_size;
+}
+params;
+
+vec4 unpack_edges(float p_packed_val) {
+ uint packed_val = uint(p_packed_val * 255.5);
+ vec4 edgesLRTB;
+ edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
+ edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
+ edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
+ edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
+
+ return clamp(edgesLRTB + params.inv_sharpness, 0.0, 1.0);
+}
+
+void main() {
+ ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
+ if (any(greaterThanEqual(ssC, ivec2(1.0 / params.pixel_size)))) { //too large, do nothing
+ return;
+ }
+
+#ifdef MODE_SMART
+ uvec2 pix_pos = uvec2(gl_GlobalInvocationID.xy);
+ vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
+
+ // calculate index in the four deinterleaved source array texture
+ int mx = int(pix_pos.x % 2);
+ int my = int(pix_pos.y % 2);
+ int index_center = mx + my * 2; // center index
+ int index_horizontal = (1 - mx) + my * 2; // neighbouring, horizontal
+ int index_vertical = mx + (1 - my) * 2; // neighbouring, vertical
+ int index_diagonal = (1 - mx) + (1 - my) * 2; // diagonal
+
+ vec4 color = texelFetch(source_texture, ivec3(pix_pos / uvec2(params.size_modifier), index_center), 0);
+
+ vec4 edgesLRTB = unpack_edges(imageLoad(source_edges, ivec3(pix_pos / uvec2(params.size_modifier), index_center)).r);
+
+ // convert index shifts to sampling offsets
+ float fmx = float(mx);
+ float fmy = float(my);
+
+ // in case of an edge, push sampling offsets away from the edge (towards pixel center)
+ float fmxe = (edgesLRTB.y - edgesLRTB.x);
+ float fmye = (edgesLRTB.w - edgesLRTB.z);
+
+ // calculate final sampling offsets and sample using bilinear filter
+ vec2 uv_horizontal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx + fmxe - 0.5, 0.5 - fmy)) * params.pixel_size;
+ vec4 color_horizontal = textureLod(source_texture, vec3(uv_horizontal, index_horizontal), 0.0);
+ vec2 uv_vertical = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(0.5 - fmx, fmy - 0.5 + fmye)) * params.pixel_size;
+ vec4 color_vertical = textureLod(source_texture, vec3(uv_vertical, index_vertical), 0.0);
+ vec2 uv_diagonal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx - 0.5 + fmxe, fmy - 0.5 + fmye)) * params.pixel_size;
+ vec4 color_diagonal = textureLod(source_texture, vec3(uv_diagonal, index_diagonal), 0.0);
+
+ // reduce weight for samples near edge - if the edge is on both sides, weight goes to 0
+ vec4 blendWeights;
+ blendWeights.x = 1.0;
+ blendWeights.y = (edgesLRTB.x + edgesLRTB.y) * 0.5;
+ blendWeights.z = (edgesLRTB.z + edgesLRTB.w) * 0.5;
+ blendWeights.w = (blendWeights.y + blendWeights.z) * 0.5;
+
+ // calculate weighted average
+ float blendWeightsSum = dot(blendWeights, vec4(1.0, 1.0, 1.0, 1.0));
+ color += color_horizontal * blendWeights.y;
+ color += color_vertical * blendWeights.z;
+ color += color_diagonal * blendWeights.w;
+ color /= blendWeightsSum;
+
+ imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), color);
+#else // !MODE_SMART
+
+ vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
+#ifdef MODE_HALF
+ vec4 a = textureLod(source_texture, vec3(uv, 0), 0.0);
+ vec4 d = textureLod(source_texture, vec3(uv, 3), 0.0);
+ vec4 avg = (a + d) * 0.5;
+
+#else
+ vec4 a = textureLod(source_texture, vec3(uv, 0), 0.0);
+ vec4 b = textureLod(source_texture, vec3(uv, 1), 0.0);
+ vec4 c = textureLod(source_texture, vec3(uv, 2), 0.0);
+ vec4 d = textureLod(source_texture, vec3(uv, 3), 0.0);
+ vec4 avg = (a + b + c + d) * 0.25;
+
+#endif
+ imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), avg);
+#endif
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/tonemap.glsl b/servers/rendering/renderer_rd/shaders/effects/tonemap.glsl
new file mode 100644
index 0000000000..62a7b0e7d7
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/tonemap.glsl
@@ -0,0 +1,502 @@
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#ifdef MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#extension GL_EXT_multiview : enable
+#endif
+#endif
+
+layout(location = 0) out vec2 uv_interp;
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp = base_arr[gl_VertexIndex];
+ gl_Position = vec4(uv_interp * 2.0 - 1.0, 0.0, 1.0);
+}
+
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#ifdef MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#extension GL_EXT_multiview : enable
+#define ViewIndex gl_ViewIndex
+#else // has_VK_KHR_multiview
+#define ViewIndex 0
+#endif // has_VK_KHR_multiview
+#endif //MULTIVIEW
+
+layout(location = 0) in vec2 uv_interp;
+
+#ifdef SUBPASS
+layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInput input_color;
+#elif defined(MULTIVIEW)
+layout(set = 0, binding = 0) uniform sampler2DArray source_color;
+#else
+layout(set = 0, binding = 0) uniform sampler2D source_color;
+#endif
+
+layout(set = 1, binding = 0) uniform sampler2D source_auto_exposure;
+#ifdef MULTIVIEW
+layout(set = 2, binding = 0) uniform sampler2DArray source_glow;
+#else
+layout(set = 2, binding = 0) uniform sampler2D source_glow;
+#endif
+layout(set = 2, binding = 1) uniform sampler2D glow_map;
+
+#ifdef USE_1D_LUT
+layout(set = 3, binding = 0) uniform sampler2D source_color_correction;
+#else
+layout(set = 3, binding = 0) uniform sampler3D source_color_correction;
+#endif
+
+layout(push_constant, std430) uniform Params {
+ vec3 bcs;
+ bool use_bcs;
+
+ bool use_glow;
+ bool use_auto_exposure;
+ bool use_color_correction;
+ uint tonemapper;
+
+ uvec2 glow_texture_size;
+ float glow_intensity;
+ float glow_map_strength;
+
+ uint glow_mode;
+ float glow_levels[7];
+
+ float exposure;
+ float white;
+ float auto_exposure_grey;
+ float luminance_multiplier;
+
+ vec2 pixel_size;
+ bool use_fxaa;
+ bool use_debanding;
+}
+params;
+
+layout(location = 0) out vec4 frag_color;
+
+#ifdef USE_GLOW_FILTER_BICUBIC
+// w0, w1, w2, and w3 are the four cubic B-spline basis functions
+float w0(float a) {
+ return (1.0f / 6.0f) * (a * (a * (-a + 3.0f) - 3.0f) + 1.0f);
+}
+
+float w1(float a) {
+ return (1.0f / 6.0f) * (a * a * (3.0f * a - 6.0f) + 4.0f);
+}
+
+float w2(float a) {
+ return (1.0f / 6.0f) * (a * (a * (-3.0f * a + 3.0f) + 3.0f) + 1.0f);
+}
+
+float w3(float a) {
+ return (1.0f / 6.0f) * (a * a * a);
+}
+
+// g0 and g1 are the two amplitude functions
+float g0(float a) {
+ return w0(a) + w1(a);
+}
+
+float g1(float a) {
+ return w2(a) + w3(a);
+}
+
+// h0 and h1 are the two offset functions
+float h0(float a) {
+ return -1.0f + w1(a) / (w0(a) + w1(a));
+}
+
+float h1(float a) {
+ return 1.0f + w3(a) / (w2(a) + w3(a));
+}
+
+#ifdef MULTIVIEW
+vec4 texture2D_bicubic(sampler2DArray tex, vec2 uv, int p_lod) {
+ float lod = float(p_lod);
+ vec2 tex_size = vec2(params.glow_texture_size >> p_lod);
+ vec2 pixel_size = vec2(1.0f) / tex_size;
+
+ uv = uv * tex_size + vec2(0.5f);
+
+ vec2 iuv = floor(uv);
+ vec2 fuv = fract(uv);
+
+ float g0x = g0(fuv.x);
+ float g1x = g1(fuv.x);
+ float h0x = h0(fuv.x);
+ float h1x = h1(fuv.x);
+ float h0y = h0(fuv.y);
+ float h1y = h1(fuv.y);
+
+ vec3 p0 = vec3((vec2(iuv.x + h0x, iuv.y + h0y) - vec2(0.5f)) * pixel_size, ViewIndex);
+ vec3 p1 = vec3((vec2(iuv.x + h1x, iuv.y + h0y) - vec2(0.5f)) * pixel_size, ViewIndex);
+ vec3 p2 = vec3((vec2(iuv.x + h0x, iuv.y + h1y) - vec2(0.5f)) * pixel_size, ViewIndex);
+ vec3 p3 = vec3((vec2(iuv.x + h1x, iuv.y + h1y) - vec2(0.5f)) * pixel_size, ViewIndex);
+
+ return (g0(fuv.y) * (g0x * textureLod(tex, p0, lod) + g1x * textureLod(tex, p1, lod))) +
+ (g1(fuv.y) * (g0x * textureLod(tex, p2, lod) + g1x * textureLod(tex, p3, lod)));
+}
+
+#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) texture2D_bicubic(m_tex, m_uv, m_lod)
+#else // MULTIVIEW
+
+vec4 texture2D_bicubic(sampler2D tex, vec2 uv, int p_lod) {
+ float lod = float(p_lod);
+ vec2 tex_size = vec2(params.glow_texture_size >> p_lod);
+ vec2 pixel_size = vec2(1.0f) / tex_size;
+
+ uv = uv * tex_size + vec2(0.5f);
+
+ vec2 iuv = floor(uv);
+ vec2 fuv = fract(uv);
+
+ float g0x = g0(fuv.x);
+ float g1x = g1(fuv.x);
+ float h0x = h0(fuv.x);
+ float h1x = h1(fuv.x);
+ float h0y = h0(fuv.y);
+ float h1y = h1(fuv.y);
+
+ vec2 p0 = (vec2(iuv.x + h0x, iuv.y + h0y) - vec2(0.5f)) * pixel_size;
+ vec2 p1 = (vec2(iuv.x + h1x, iuv.y + h0y) - vec2(0.5f)) * pixel_size;
+ vec2 p2 = (vec2(iuv.x + h0x, iuv.y + h1y) - vec2(0.5f)) * pixel_size;
+ vec2 p3 = (vec2(iuv.x + h1x, iuv.y + h1y) - vec2(0.5f)) * pixel_size;
+
+ return (g0(fuv.y) * (g0x * textureLod(tex, p0, lod) + g1x * textureLod(tex, p1, lod))) +
+ (g1(fuv.y) * (g0x * textureLod(tex, p2, lod) + g1x * textureLod(tex, p3, lod)));
+}
+
+#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) texture2D_bicubic(m_tex, m_uv, m_lod)
+#endif // !MULTIVIEW
+
+#else // USE_GLOW_FILTER_BICUBIC
+
+#ifdef MULTIVIEW
+#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) textureLod(m_tex, vec3(m_uv, ViewIndex), float(m_lod))
+#else // MULTIVIEW
+#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) textureLod(m_tex, m_uv, float(m_lod))
+#endif // !MULTIVIEW
+
+#endif // !USE_GLOW_FILTER_BICUBIC
+
+vec3 tonemap_filmic(vec3 color, float white) {
+ // exposure bias: input scale (color *= bias, white *= bias) to make the brightness consistent with other tonemappers
+ // also useful to scale the input to the range that the tonemapper is designed for (some require very high input values)
+ // has no effect on the curve's general shape or visual properties
+ const float exposure_bias = 2.0f;
+ const float A = 0.22f * exposure_bias * exposure_bias; // bias baked into constants for performance
+ const float B = 0.30f * exposure_bias;
+ const float C = 0.10f;
+ const float D = 0.20f;
+ const float E = 0.01f;
+ const float F = 0.30f;
+
+ vec3 color_tonemapped = ((color * (A * color + C * B) + D * E) / (color * (A * color + B) + D * F)) - E / F;
+ float white_tonemapped = ((white * (A * white + C * B) + D * E) / (white * (A * white + B) + D * F)) - E / F;
+
+ return color_tonemapped / white_tonemapped;
+}
+
+// Adapted from https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl
+// (MIT License).
+vec3 tonemap_aces(vec3 color, float white) {
+ const float exposure_bias = 1.8f;
+ const float A = 0.0245786f;
+ const float B = 0.000090537f;
+ const float C = 0.983729f;
+ const float D = 0.432951f;
+ const float E = 0.238081f;
+
+ // Exposure bias baked into transform to save shader instructions. Equivalent to `color *= exposure_bias`
+ const mat3 rgb_to_rrt = mat3(
+ vec3(0.59719f * exposure_bias, 0.35458f * exposure_bias, 0.04823f * exposure_bias),
+ vec3(0.07600f * exposure_bias, 0.90834f * exposure_bias, 0.01566f * exposure_bias),
+ vec3(0.02840f * exposure_bias, 0.13383f * exposure_bias, 0.83777f * exposure_bias));
+
+ const mat3 odt_to_rgb = mat3(
+ vec3(1.60475f, -0.53108f, -0.07367f),
+ vec3(-0.10208f, 1.10813f, -0.00605f),
+ vec3(-0.00327f, -0.07276f, 1.07602f));
+
+ color *= rgb_to_rrt;
+ vec3 color_tonemapped = (color * (color + A) - B) / (color * (C * color + D) + E);
+ color_tonemapped *= odt_to_rgb;
+
+ white *= exposure_bias;
+ float white_tonemapped = (white * (white + A) - B) / (white * (C * white + D) + E);
+
+ return color_tonemapped / white_tonemapped;
+}
+
+vec3 tonemap_reinhard(vec3 color, float white) {
+ return (white * color + color) / (color * white + white);
+}
+
+vec3 linear_to_srgb(vec3 color) {
+ //if going to srgb, clamp from 0 to 1.
+ color = clamp(color, vec3(0.0), vec3(1.0));
+ const vec3 a = vec3(0.055f);
+ return mix((vec3(1.0f) + a) * pow(color.rgb, vec3(1.0f / 2.4f)) - a, 12.92f * color.rgb, lessThan(color.rgb, vec3(0.0031308f)));
+}
+
+#define TONEMAPPER_LINEAR 0
+#define TONEMAPPER_REINHARD 1
+#define TONEMAPPER_FILMIC 2
+#define TONEMAPPER_ACES 3
+
+vec3 apply_tonemapping(vec3 color, float white) { // inputs are LINEAR, always outputs clamped [0;1] color
+ // Ensure color values passed to tonemappers are positive.
+ // They can be negative in the case of negative lights, which leads to undesired behavior.
+ if (params.tonemapper == TONEMAPPER_LINEAR) {
+ return color;
+ } else if (params.tonemapper == TONEMAPPER_REINHARD) {
+ return tonemap_reinhard(max(vec3(0.0f), color), white);
+ } else if (params.tonemapper == TONEMAPPER_FILMIC) {
+ return tonemap_filmic(max(vec3(0.0f), color), white);
+ } else { // TONEMAPPER_ACES
+ return tonemap_aces(max(vec3(0.0f), color), white);
+ }
+}
+
+#ifdef MULTIVIEW
+vec3 gather_glow(sampler2DArray tex, vec2 uv) { // sample all selected glow levels, view is added to uv later
+#else
+vec3 gather_glow(sampler2D tex, vec2 uv) { // sample all selected glow levels
+#endif // defined(MULTIVIEW)
+ vec3 glow = vec3(0.0f);
+
+ if (params.glow_levels[0] > 0.0001) {
+ glow += GLOW_TEXTURE_SAMPLE(tex, uv, 0).rgb * params.glow_levels[0];
+ }
+
+ if (params.glow_levels[1] > 0.0001) {
+ glow += GLOW_TEXTURE_SAMPLE(tex, uv, 1).rgb * params.glow_levels[1];
+ }
+
+ if (params.glow_levels[2] > 0.0001) {
+ glow += GLOW_TEXTURE_SAMPLE(tex, uv, 2).rgb * params.glow_levels[2];
+ }
+
+ if (params.glow_levels[3] > 0.0001) {
+ glow += GLOW_TEXTURE_SAMPLE(tex, uv, 3).rgb * params.glow_levels[3];
+ }
+
+ if (params.glow_levels[4] > 0.0001) {
+ glow += GLOW_TEXTURE_SAMPLE(tex, uv, 4).rgb * params.glow_levels[4];
+ }
+
+ if (params.glow_levels[5] > 0.0001) {
+ glow += GLOW_TEXTURE_SAMPLE(tex, uv, 5).rgb * params.glow_levels[5];
+ }
+
+ if (params.glow_levels[6] > 0.0001) {
+ glow += GLOW_TEXTURE_SAMPLE(tex, uv, 6).rgb * params.glow_levels[6];
+ }
+
+ return glow;
+}
+
+#define GLOW_MODE_ADD 0
+#define GLOW_MODE_SCREEN 1
+#define GLOW_MODE_SOFTLIGHT 2
+#define GLOW_MODE_REPLACE 3
+#define GLOW_MODE_MIX 4
+
+vec3 apply_glow(vec3 color, vec3 glow) { // apply glow using the selected blending mode
+ if (params.glow_mode == GLOW_MODE_ADD) {
+ return color + glow;
+ } else if (params.glow_mode == GLOW_MODE_SCREEN) {
+ //need color clamping
+ return max((color + glow) - (color * glow), vec3(0.0));
+ } else if (params.glow_mode == GLOW_MODE_SOFTLIGHT) {
+ //need color clamping
+ glow = glow * vec3(0.5f) + vec3(0.5f);
+
+ color.r = (glow.r <= 0.5f) ? (color.r - (1.0f - 2.0f * glow.r) * color.r * (1.0f - color.r)) : (((glow.r > 0.5f) && (color.r <= 0.25f)) ? (color.r + (2.0f * glow.r - 1.0f) * (4.0f * color.r * (4.0f * color.r + 1.0f) * (color.r - 1.0f) + 7.0f * color.r)) : (color.r + (2.0f * glow.r - 1.0f) * (sqrt(color.r) - color.r)));
+ color.g = (glow.g <= 0.5f) ? (color.g - (1.0f - 2.0f * glow.g) * color.g * (1.0f - color.g)) : (((glow.g > 0.5f) && (color.g <= 0.25f)) ? (color.g + (2.0f * glow.g - 1.0f) * (4.0f * color.g * (4.0f * color.g + 1.0f) * (color.g - 1.0f) + 7.0f * color.g)) : (color.g + (2.0f * glow.g - 1.0f) * (sqrt(color.g) - color.g)));
+ color.b = (glow.b <= 0.5f) ? (color.b - (1.0f - 2.0f * glow.b) * color.b * (1.0f - color.b)) : (((glow.b > 0.5f) && (color.b <= 0.25f)) ? (color.b + (2.0f * glow.b - 1.0f) * (4.0f * color.b * (4.0f * color.b + 1.0f) * (color.b - 1.0f) + 7.0f * color.b)) : (color.b + (2.0f * glow.b - 1.0f) * (sqrt(color.b) - color.b)));
+ return color;
+ } else { //replace
+ return glow;
+ }
+}
+
+vec3 apply_bcs(vec3 color, vec3 bcs) {
+ color = mix(vec3(0.0f), color, bcs.x);
+ color = mix(vec3(0.5f), color, bcs.y);
+ color = mix(vec3(dot(vec3(1.0f), color) * 0.33333f), color, bcs.z);
+
+ return color;
+}
+#ifdef USE_1D_LUT
+vec3 apply_color_correction(vec3 color) {
+ color.r = texture(source_color_correction, vec2(color.r, 0.0f)).r;
+ color.g = texture(source_color_correction, vec2(color.g, 0.0f)).g;
+ color.b = texture(source_color_correction, vec2(color.b, 0.0f)).b;
+ return color;
+}
+#else
+vec3 apply_color_correction(vec3 color) {
+ return textureLod(source_color_correction, color, 0.0).rgb;
+}
+#endif
+
+#ifndef SUBPASS
+vec3 do_fxaa(vec3 color, float exposure, vec2 uv_interp) {
+ const float FXAA_REDUCE_MIN = (1.0 / 128.0);
+ const float FXAA_REDUCE_MUL = (1.0 / 8.0);
+ const float FXAA_SPAN_MAX = 8.0;
+
+#ifdef MULTIVIEW
+ vec3 rgbNW = textureLod(source_color, vec3(uv_interp + vec2(-1.0, -1.0) * params.pixel_size, ViewIndex), 0.0).xyz * exposure * params.luminance_multiplier;
+ vec3 rgbNE = textureLod(source_color, vec3(uv_interp + vec2(1.0, -1.0) * params.pixel_size, ViewIndex), 0.0).xyz * exposure * params.luminance_multiplier;
+ vec3 rgbSW = textureLod(source_color, vec3(uv_interp + vec2(-1.0, 1.0) * params.pixel_size, ViewIndex), 0.0).xyz * exposure * params.luminance_multiplier;
+ vec3 rgbSE = textureLod(source_color, vec3(uv_interp + vec2(1.0, 1.0) * params.pixel_size, ViewIndex), 0.0).xyz * exposure * params.luminance_multiplier;
+#else
+ vec3 rgbNW = textureLod(source_color, uv_interp + vec2(-1.0, -1.0) * params.pixel_size, 0.0).xyz * exposure * params.luminance_multiplier;
+ vec3 rgbNE = textureLod(source_color, uv_interp + vec2(1.0, -1.0) * params.pixel_size, 0.0).xyz * exposure * params.luminance_multiplier;
+ vec3 rgbSW = textureLod(source_color, uv_interp + vec2(-1.0, 1.0) * params.pixel_size, 0.0).xyz * exposure * params.luminance_multiplier;
+ vec3 rgbSE = textureLod(source_color, uv_interp + vec2(1.0, 1.0) * params.pixel_size, 0.0).xyz * exposure * params.luminance_multiplier;
+#endif
+ vec3 rgbM = color;
+ vec3 luma = vec3(0.299, 0.587, 0.114);
+ float lumaNW = dot(rgbNW, luma);
+ float lumaNE = dot(rgbNE, luma);
+ float lumaSW = dot(rgbSW, luma);
+ float lumaSE = dot(rgbSE, luma);
+ float lumaM = dot(rgbM, luma);
+ float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
+ float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
+
+ vec2 dir;
+ dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
+ dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
+
+ float dirReduce = max((lumaNW + lumaNE + lumaSW + lumaSE) *
+ (0.25 * FXAA_REDUCE_MUL),
+ FXAA_REDUCE_MIN);
+
+ float rcpDirMin = 1.0 / (min(abs(dir.x), abs(dir.y)) + dirReduce);
+ dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX),
+ max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX),
+ dir * rcpDirMin)) *
+ params.pixel_size;
+
+#ifdef MULTIVIEW
+ vec3 rgbA = 0.5 * exposure * (textureLod(source_color, vec3(uv_interp + dir * (1.0 / 3.0 - 0.5), ViewIndex), 0.0).xyz + textureLod(source_color, vec3(uv_interp + dir * (2.0 / 3.0 - 0.5), ViewIndex), 0.0).xyz) * params.luminance_multiplier;
+ vec3 rgbB = rgbA * 0.5 + 0.25 * exposure * (textureLod(source_color, vec3(uv_interp + dir * -0.5, ViewIndex), 0.0).xyz + textureLod(source_color, vec3(uv_interp + dir * 0.5, ViewIndex), 0.0).xyz) * params.luminance_multiplier;
+#else
+ vec3 rgbA = 0.5 * exposure * (textureLod(source_color, uv_interp + dir * (1.0 / 3.0 - 0.5), 0.0).xyz + textureLod(source_color, uv_interp + dir * (2.0 / 3.0 - 0.5), 0.0).xyz) * params.luminance_multiplier;
+ vec3 rgbB = rgbA * 0.5 + 0.25 * exposure * (textureLod(source_color, uv_interp + dir * -0.5, 0.0).xyz + textureLod(source_color, uv_interp + dir * 0.5, 0.0).xyz) * params.luminance_multiplier;
+#endif
+
+ float lumaB = dot(rgbB, luma);
+ if ((lumaB < lumaMin) || (lumaB > lumaMax)) {
+ return rgbA;
+ } else {
+ return rgbB;
+ }
+}
+#endif // !SUBPASS
+
+// From https://alex.vlachos.com/graphics/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf
+// and https://www.shadertoy.com/view/MslGR8 (5th one starting from the bottom)
+// NOTE: `frag_coord` is in pixels (i.e. not normalized UV).
+vec3 screen_space_dither(vec2 frag_coord) {
+ // Iestyn's RGB dither (7 asm instructions) from Portal 2 X360, slightly modified for VR.
+ vec3 dither = vec3(dot(vec2(171.0, 231.0), frag_coord));
+ dither.rgb = fract(dither.rgb / vec3(103.0, 71.0, 97.0));
+
+ // Subtract 0.5 to avoid slightly brightening the whole viewport.
+ return (dither.rgb - 0.5) / 255.0;
+}
+
+void main() {
+#ifdef SUBPASS
+ // SUBPASS and MULTIVIEW can be combined but in that case we're already reading from the correct layer
+ vec4 color = subpassLoad(input_color);
+#elif defined(MULTIVIEW)
+ vec4 color = textureLod(source_color, vec3(uv_interp, ViewIndex), 0.0f);
+#else
+ vec4 color = textureLod(source_color, uv_interp, 0.0f);
+#endif
+ color.rgb *= params.luminance_multiplier;
+
+ // Exposure
+
+ float exposure = params.exposure;
+
+#ifndef SUBPASS
+ if (params.use_auto_exposure) {
+ exposure *= 1.0 / (texelFetch(source_auto_exposure, ivec2(0, 0), 0).r * params.luminance_multiplier / params.auto_exposure_grey);
+ }
+#endif
+
+ color.rgb *= exposure;
+
+ // Early Tonemap & SRGB Conversion
+#ifndef SUBPASS
+ if (params.use_fxaa) {
+ // FXAA must be performed before glow to preserve the "bleed" effect of glow.
+ color.rgb = do_fxaa(color.rgb, exposure, uv_interp);
+ }
+
+ if (params.use_glow && params.glow_mode == GLOW_MODE_MIX) {
+ vec3 glow = gather_glow(source_glow, uv_interp) * params.luminance_multiplier;
+ if (params.glow_map_strength > 0.001) {
+ glow = mix(glow, texture(glow_map, uv_interp).rgb * glow, params.glow_map_strength);
+ }
+ color.rgb = mix(color.rgb, glow, params.glow_intensity);
+ }
+#endif
+
+ if (params.use_debanding) {
+ // For best results, debanding should be done before tonemapping.
+ // Otherwise, we're adding noise to an already-quantized image.
+ color.rgb += screen_space_dither(gl_FragCoord.xy);
+ }
+
+ color.rgb = apply_tonemapping(color.rgb, params.white);
+
+ color.rgb = linear_to_srgb(color.rgb); // regular linear -> SRGB conversion
+
+#ifndef SUBPASS
+ // Glow
+ if (params.use_glow && params.glow_mode != GLOW_MODE_MIX) {
+ vec3 glow = gather_glow(source_glow, uv_interp) * params.glow_intensity * params.luminance_multiplier;
+ if (params.glow_map_strength > 0.001) {
+ glow = mix(glow, texture(glow_map, uv_interp).rgb * glow, params.glow_map_strength);
+ }
+
+ // high dynamic range -> SRGB
+ glow = apply_tonemapping(glow, params.white);
+ glow = linear_to_srgb(glow);
+
+ color.rgb = apply_glow(color.rgb, glow);
+ }
+#endif
+
+ // Additional effects
+
+ if (params.use_bcs) {
+ color.rgb = apply_bcs(color.rgb, params.bcs);
+ }
+
+ if (params.use_color_correction) {
+ color.rgb = apply_color_correction(color.rgb);
+ }
+
+ frag_color = color;
+}
diff --git a/servers/rendering/renderer_rd/shaders/effects/vrs.glsl b/servers/rendering/renderer_rd/shaders/effects/vrs.glsl
new file mode 100644
index 0000000000..5ef83c0b44
--- /dev/null
+++ b/servers/rendering/renderer_rd/shaders/effects/vrs.glsl
@@ -0,0 +1,72 @@
+#[vertex]
+
+#version 450
+
+#VERSION_DEFINES
+
+#ifdef MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#extension GL_EXT_multiview : enable
+#define ViewIndex gl_ViewIndex
+#else // has_VK_KHR_multiview
+#define ViewIndex 0
+#endif // has_VK_KHR_multiview
+#endif //MULTIVIEW
+
+#ifdef MULTIVIEW
+layout(location = 0) out vec3 uv_interp;
+#else
+layout(location = 0) out vec2 uv_interp;
+#endif
+
+void main() {
+ vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
+ uv_interp.xy = base_arr[gl_VertexIndex];
+#ifdef MULTIVIEW
+ uv_interp.z = ViewIndex;
+#endif
+
+ gl_Position = vec4(uv_interp.xy * 2.0 - 1.0, 0.0, 1.0);
+}
+
+#[fragment]
+
+#version 450
+
+#VERSION_DEFINES
+
+#ifdef MULTIVIEW
+#ifdef has_VK_KHR_multiview
+#extension GL_EXT_multiview : enable
+#define ViewIndex gl_ViewIndex
+#else // has_VK_KHR_multiview
+#define ViewIndex 0
+#endif // has_VK_KHR_multiview
+#endif //MULTIVIEW
+
+#ifdef MULTIVIEW
+layout(location = 0) in vec3 uv_interp;
+layout(set = 0, binding = 0) uniform sampler2DArray source_color;
+#else /* MULTIVIEW */
+layout(location = 0) in vec2 uv_interp;
+layout(set = 0, binding = 0) uniform sampler2D source_color;
+#endif /* MULTIVIEW */
+
+layout(location = 0) out uint frag_color;
+
+void main() {
+#ifdef MULTIVIEW
+ vec3 uv = uv_interp;
+#else
+ vec2 uv = uv_interp;
+#endif
+
+#ifdef MULTIVIEW
+ vec4 color = textureLod(source_color, uv, 0.0);
+#else /* MULTIVIEW */
+ vec4 color = textureLod(source_color, uv, 0.0);
+#endif /* MULTIVIEW */
+
+ // See if we can change the sampler to one that returns int...
+ frag_color = uint(color.r * 256.0);
+}