diff options
Diffstat (limited to 'drivers')
48 files changed, 1181 insertions, 908 deletions
diff --git a/drivers/alsa/audio_driver_alsa.cpp b/drivers/alsa/audio_driver_alsa.cpp index f86c4d82ef..f4c87da9e9 100644 --- a/drivers/alsa/audio_driver_alsa.cpp +++ b/drivers/alsa/audio_driver_alsa.cpp @@ -50,7 +50,7 @@ Error AudioDriverALSA::init_device() { // If there is a specified device check that it is really present if (device_name != "Default") { - Array list = get_device_list(); + PackedStringArray list = get_device_list(); if (list.find(device_name) == -1) { device_name = "Default"; new_device = "Default"; @@ -168,9 +168,8 @@ Error AudioDriverALSA::init() { return ERR_CANT_OPEN; } - active = false; - thread_exited = false; - exit_thread = false; + active.clear(); + exit_thread.clear(); Error err = init_device(); if (err == OK) { @@ -183,11 +182,11 @@ Error AudioDriverALSA::init() { void AudioDriverALSA::thread_func(void *p_udata) { AudioDriverALSA *ad = static_cast<AudioDriverALSA *>(p_udata); - while (!ad->exit_thread) { + while (!ad->exit_thread.is_set()) { ad->lock(); ad->start_counting_ticks(); - if (!ad->active) { + if (!ad->active.is_set()) { for (uint64_t i = 0; i < ad->period_size * ad->channels; i++) { ad->samples_out.write[i] = 0; } @@ -203,7 +202,7 @@ void AudioDriverALSA::thread_func(void *p_udata) { int todo = ad->period_size; int total = 0; - while (todo && !ad->exit_thread) { + while (todo && !ad->exit_thread.is_set()) { int16_t *src = (int16_t *)ad->samples_out.ptr(); int wrote = snd_pcm_writei(ad->pcm_handle, (void *)(src + (total * ad->channels)), todo); @@ -222,8 +221,8 @@ void AudioDriverALSA::thread_func(void *p_udata) { wrote = snd_pcm_recover(ad->pcm_handle, wrote, 0); if (wrote < 0) { ERR_PRINT("ALSA: Failed and can't recover: " + String(snd_strerror(wrote))); - ad->active = false; - ad->exit_thread = true; + ad->active.clear(); + ad->exit_thread.set(); } } } @@ -241,8 +240,8 @@ void AudioDriverALSA::thread_func(void *p_udata) { err = ad->init_device(); if (err != OK) { - ad->active = false; - ad->exit_thread = true; + ad->active.clear(); + ad->exit_thread.set(); } } } @@ -250,12 +249,10 @@ void AudioDriverALSA::thread_func(void *p_udata) { ad->stop_counting_ticks(); ad->unlock(); } - - ad->thread_exited = true; } void AudioDriverALSA::start() { - active = true; + active.set(); } int AudioDriverALSA::get_mix_rate() const { @@ -266,8 +263,8 @@ AudioDriver::SpeakerMode AudioDriverALSA::get_speaker_mode() const { return speaker_mode; } -Array AudioDriverALSA::get_device_list() { - Array list; +PackedStringArray AudioDriverALSA::get_device_list() { + PackedStringArray list; list.push_back("Default"); @@ -327,7 +324,7 @@ void AudioDriverALSA::finish_device() { } void AudioDriverALSA::finish() { - exit_thread = true; + exit_thread.set(); thread.wait_to_finish(); finish_device(); diff --git a/drivers/alsa/audio_driver_alsa.h b/drivers/alsa/audio_driver_alsa.h index dbb40fa088..fa1dba38ed 100644 --- a/drivers/alsa/audio_driver_alsa.h +++ b/drivers/alsa/audio_driver_alsa.h @@ -35,6 +35,7 @@ #include "core/os/mutex.h" #include "core/os/thread.h" +#include "core/templates/safe_refcount.h" #include "servers/audio_server.h" #include "asound-so_wrap.h" @@ -64,9 +65,8 @@ class AudioDriverALSA : public AudioDriver { snd_pcm_uframes_t period_size; int channels = 0; - bool active = false; - bool thread_exited = false; - mutable bool exit_thread = false; + SafeFlag active; + SafeFlag exit_thread; public: const char *get_name() const { @@ -77,7 +77,7 @@ public: virtual void start(); virtual int get_mix_rate() const; virtual SpeakerMode get_speaker_mode() const; - virtual Array get_device_list(); + virtual PackedStringArray get_device_list(); virtual String get_device(); virtual void set_device(String device); virtual void lock(); diff --git a/drivers/alsamidi/midi_driver_alsamidi.cpp b/drivers/alsamidi/midi_driver_alsamidi.cpp index c334146dd2..d2a0076023 100644 --- a/drivers/alsamidi/midi_driver_alsamidi.cpp +++ b/drivers/alsamidi/midi_driver_alsamidi.cpp @@ -79,7 +79,7 @@ void MIDIDriverALSAMidi::thread_func(void *p_udata) { int expected_size = 255; int bytes = 0; - while (!md->exit_thread) { + while (!md->exit_thread.is_set()) { int ret; md->lock(); @@ -149,14 +149,14 @@ Error MIDIDriverALSAMidi::open() { } snd_device_name_free_hint(hints); - exit_thread = false; + exit_thread.clear(); thread.start(MIDIDriverALSAMidi::thread_func, this); return OK; } void MIDIDriverALSAMidi::close() { - exit_thread = true; + exit_thread.set(); thread.wait_to_finish(); for (int i = 0; i < connected_inputs.size(); i++) { @@ -193,7 +193,7 @@ PackedStringArray MIDIDriverALSAMidi::get_connected_inputs() { } MIDIDriverALSAMidi::MIDIDriverALSAMidi() { - exit_thread = false; + exit_thread.clear(); } MIDIDriverALSAMidi::~MIDIDriverALSAMidi() { diff --git a/drivers/alsamidi/midi_driver_alsamidi.h b/drivers/alsamidi/midi_driver_alsamidi.h index b0fa8c297a..ac3530b1b2 100644 --- a/drivers/alsamidi/midi_driver_alsamidi.h +++ b/drivers/alsamidi/midi_driver_alsamidi.h @@ -36,6 +36,7 @@ #include "core/os/midi_driver.h" #include "core/os/mutex.h" #include "core/os/thread.h" +#include "core/templates/safe_refcount.h" #include "core/templates/vector.h" #include "../alsa/asound-so_wrap.h" @@ -47,7 +48,7 @@ class MIDIDriverALSAMidi : public MIDIDriver { Vector<snd_rawmidi_t *> connected_inputs; - bool exit_thread; + SafeFlag exit_thread; static void thread_func(void *p_udata); diff --git a/drivers/coreaudio/audio_driver_coreaudio.cpp b/drivers/coreaudio/audio_driver_coreaudio.cpp index cc38c2352f..1db85e2a60 100644 --- a/drivers/coreaudio/audio_driver_coreaudio.cpp +++ b/drivers/coreaudio/audio_driver_coreaudio.cpp @@ -215,6 +215,7 @@ OSStatus AudioDriverCoreAudio::input_callback(void *inRefCon, } ad->lock(); + ad->start_counting_ticks(); AudioBufferList bufferList; bufferList.mNumberBuffers = 1; @@ -237,6 +238,7 @@ OSStatus AudioDriverCoreAudio::input_callback(void *inRefCon, ERR_PRINT("AudioUnitRender failed, code: " + itos(result)); } + ad->stop_counting_ticks(); ad->unlock(); return result; @@ -493,8 +495,8 @@ Error AudioDriverCoreAudio::capture_stop() { #ifdef MACOS_ENABLED -Array AudioDriverCoreAudio::_get_device_list(bool capture) { - Array list; +PackedStringArray AudioDriverCoreAudio::_get_device_list(bool capture) { + PackedStringArray list; list.push_back("Default"); @@ -637,7 +639,7 @@ void AudioDriverCoreAudio::_set_device(const String &device, bool capture) { } } -Array AudioDriverCoreAudio::get_device_list() { +PackedStringArray AudioDriverCoreAudio::get_device_list() { return _get_device_list(); } @@ -659,7 +661,7 @@ void AudioDriverCoreAudio::capture_set_device(const String &p_name) { } } -Array AudioDriverCoreAudio::capture_get_device_list() { +PackedStringArray AudioDriverCoreAudio::capture_get_device_list() { return _get_device_list(true); } diff --git a/drivers/coreaudio/audio_driver_coreaudio.h b/drivers/coreaudio/audio_driver_coreaudio.h index 7fac8a99ed..aac5077bb1 100644 --- a/drivers/coreaudio/audio_driver_coreaudio.h +++ b/drivers/coreaudio/audio_driver_coreaudio.h @@ -59,7 +59,7 @@ class AudioDriverCoreAudio : public AudioDriver { Vector<int16_t> input_buf; #ifdef MACOS_ENABLED - Array _get_device_list(bool capture = false); + PackedStringArray _get_device_list(bool capture = false); void _set_device(const String &device, bool capture = false); static OSStatus input_device_address_cb(AudioObjectID inObjectID, @@ -107,11 +107,11 @@ public: void stop(); #ifdef MACOS_ENABLED - virtual Array get_device_list(); + virtual PackedStringArray get_device_list(); virtual String get_device(); virtual void set_device(String device); - virtual Array capture_get_device_list(); + virtual PackedStringArray capture_get_device_list(); virtual void capture_set_device(const String &p_name); virtual String capture_get_device(); #endif diff --git a/drivers/gles3/effects/copy_effects.cpp b/drivers/gles3/effects/copy_effects.cpp index c8e6c2b476..de0181f887 100644 --- a/drivers/gles3/effects/copy_effects.cpp +++ b/drivers/gles3/effects/copy_effects.cpp @@ -111,6 +111,7 @@ CopyEffects::~CopyEffects() { glDeleteVertexArrays(1, &screen_triangle_array); glDeleteBuffers(1, &quad); glDeleteVertexArrays(1, &quad_array); + copy.shader.version_free(copy.shader_version); } void CopyEffects::copy_to_rect(const Rect2i &p_rect) { diff --git a/drivers/gles3/rasterizer_canvas_gles3.cpp b/drivers/gles3/rasterizer_canvas_gles3.cpp index d4ac3c993a..a960fced1f 100644 --- a/drivers/gles3/rasterizer_canvas_gles3.cpp +++ b/drivers/gles3/rasterizer_canvas_gles3.cpp @@ -201,6 +201,7 @@ void RasterizerCanvasGLES3::canvas_render_items(RID p_to_render_target, Item *p_ bool material_screen_texture_found = false; Rect2 back_buffer_rect; bool backbuffer_copy = false; + bool backbuffer_gen_mipmaps = false; Item *ci = p_item_list; Item *canvas_group_owner = nullptr; @@ -225,6 +226,7 @@ void RasterizerCanvasGLES3::canvas_render_items(RID p_to_render_target, Item *p_ if (!material_screen_texture_found) { backbuffer_copy = true; back_buffer_rect = Rect2(); + backbuffer_gen_mipmaps = md->shader_data->uses_screen_texture_mipmaps; } } @@ -282,7 +284,7 @@ void RasterizerCanvasGLES3::canvas_render_items(RID p_to_render_target, Item *p_ _render_items(p_to_render_target, item_count, canvas_transform_inverse, p_light_list); item_count = 0; - texture_storage->render_target_copy_to_back_buffer(p_to_render_target, back_buffer_rect, true); + texture_storage->render_target_copy_to_back_buffer(p_to_render_target, back_buffer_rect, backbuffer_gen_mipmaps); backbuffer_copy = false; material_screen_texture_found = true; //after a backbuffer copy, screen texture makes no further copies @@ -320,6 +322,7 @@ void RasterizerCanvasGLES3::_render_items(RID p_to_render_target, int p_item_cou RID prev_material; uint32_t index = 0; GLES3::CanvasShaderData::BlendMode last_blend_mode = GLES3::CanvasShaderData::BLEND_MODE_MIX; + Color last_blend_color; GLES3::CanvasShaderData *shader_data_cache = nullptr; state.current_tex = texture_storage->texture_gl_get_default(GLES3::DEFAULT_GL_TEXTURE_WHITE); @@ -376,8 +379,80 @@ void RasterizerCanvasGLES3::_render_items(RID p_to_render_target, int p_item_cou GLES3::CanvasShaderData::BlendMode blend_mode = shader_data_cache ? shader_data_cache->blend_mode : GLES3::CanvasShaderData::BLEND_MODE_MIX; - if (last_blend_mode != blend_mode) { - if (last_blend_mode == GLES3::CanvasShaderData::BLEND_MODE_DISABLED) { + _render_item(p_to_render_target, ci, canvas_transform_inverse, current_clip, p_lights, index, blend_mode, last_blend_mode, last_blend_color); + } + // Render last command + _render_batch(index); +} + +void RasterizerCanvasGLES3::_render_item(RID p_render_target, const Item *p_item, const Transform2D &p_canvas_transform_inverse, Item *¤t_clip, Light *p_lights, uint32_t &r_index, GLES3::CanvasShaderData::BlendMode p_blend_mode, GLES3::CanvasShaderData::BlendMode &r_last_blend_mode, Color &r_last_blend_color) { + // Used by Polygon and Mesh. + static const GLenum prim[5] = { GL_POINTS, GL_LINES, GL_LINE_STRIP, GL_TRIANGLES, GL_TRIANGLE_STRIP }; + + RS::CanvasItemTextureFilter current_filter = state.default_filter; + RS::CanvasItemTextureRepeat current_repeat = state.default_repeat; + + if (p_item->texture_filter != RS::CANVAS_ITEM_TEXTURE_FILTER_DEFAULT) { + current_filter = p_item->texture_filter; + } + + if (p_item->texture_repeat != RS::CANVAS_ITEM_TEXTURE_REPEAT_DEFAULT) { + current_repeat = p_item->texture_repeat; + } + + Transform2D base_transform = p_canvas_transform_inverse * p_item->final_transform; + Transform2D draw_transform; // Used by transform command + + Color base_color = p_item->final_modulate; + + uint32_t base_flags = 0; + + bool reclip = false; + + bool skipping = false; + + const Item::Command *c = p_item->commands; + while (c) { + if (skipping && c->type != Item::Command::TYPE_ANIMATION_SLICE) { + c = c->next; + continue; + } + + if (c->type != Item::Command::TYPE_MESH) { + // For Meshes, this gets updated below. + _update_transform_2d_to_mat2x3(base_transform * draw_transform, state.instance_data_array[r_index].world); + } + + for (int i = 0; i < 4; i++) { + state.instance_data_array[r_index].modulation[i] = 0.0; + state.instance_data_array[r_index].ninepatch_margins[i] = 0.0; + state.instance_data_array[r_index].src_rect[i] = 0.0; + state.instance_data_array[r_index].dst_rect[i] = 0.0; + state.instance_data_array[r_index].lights[i] = uint32_t(0); + } + state.instance_data_array[r_index].color_texture_pixel_size[0] = 0.0; + state.instance_data_array[r_index].color_texture_pixel_size[1] = 0.0; + + state.instance_data_array[r_index].pad[0] = 0.0; + state.instance_data_array[r_index].pad[1] = 0.0; + + state.instance_data_array[r_index].flags = base_flags | (state.instance_data_array[r_index == 0 ? 0 : r_index - 1].flags & (FLAGS_DEFAULT_NORMAL_MAP_USED | FLAGS_DEFAULT_SPECULAR_MAP_USED)); //reset on each command for sanity, keep canvastexture binding config + + GLES3::CanvasShaderData::BlendMode blend_mode = p_blend_mode; + Color blend_color; + + if (c->type == Item::Command::TYPE_RECT) { + const Item::CommandRect *rect = static_cast<const Item::CommandRect *>(c); + if (rect->flags & CANVAS_RECT_LCD) { + blend_mode = GLES3::CanvasShaderData::BLEND_MODE_LCD; + blend_color = rect->modulate; + } + } + + if (r_last_blend_mode != blend_mode || r_last_blend_color != blend_color) { + _render_batch(r_index); + + if (r_last_blend_mode == GLES3::CanvasShaderData::BLEND_MODE_DISABLED) { // re-enable it glEnable(GL_BLEND); } else if (blend_mode == GLES3::CanvasShaderData::BLEND_MODE_DISABLED) { @@ -390,6 +465,16 @@ void RasterizerCanvasGLES3::_render_items(RID p_to_render_target, int p_item_cou // Nothing to do here. } break; + case GLES3::CanvasShaderData::BLEND_MODE_LCD: { + glBlendEquation(GL_FUNC_ADD); + if (state.transparent_render_target) { + glBlendFuncSeparate(GL_CONSTANT_COLOR, GL_ONE_MINUS_SRC_COLOR, GL_ONE, GL_ONE_MINUS_SRC_ALPHA); + } else { + glBlendFuncSeparate(GL_CONSTANT_COLOR, GL_ONE_MINUS_SRC_COLOR, GL_ZERO, GL_ONE); + } + glBlendColor(blend_color.r, blend_color.g, blend_color.b, blend_color.a); + + } break; case GLES3::CanvasShaderData::BLEND_MODE_MIX: { glBlendEquation(GL_FUNC_ADD); if (state.transparent_render_target) { @@ -435,68 +520,10 @@ void RasterizerCanvasGLES3::_render_items(RID p_to_render_target, int p_item_cou } break; } - last_blend_mode = blend_mode; + r_last_blend_mode = blend_mode; + r_last_blend_color = blend_color; } - _render_item(p_to_render_target, ci, canvas_transform_inverse, current_clip, p_lights, index); - } - // Render last command - _render_batch(index); -} - -void RasterizerCanvasGLES3::_render_item(RID p_render_target, const Item *p_item, const Transform2D &p_canvas_transform_inverse, Item *¤t_clip, Light *p_lights, uint32_t &r_index) { - // Used by Polygon and Mesh. - static const GLenum prim[5] = { GL_POINTS, GL_LINES, GL_LINE_STRIP, GL_TRIANGLES, GL_TRIANGLE_STRIP }; - - RS::CanvasItemTextureFilter current_filter = state.default_filter; - RS::CanvasItemTextureRepeat current_repeat = state.default_repeat; - - if (p_item->texture_filter != RS::CANVAS_ITEM_TEXTURE_FILTER_DEFAULT) { - current_filter = p_item->texture_filter; - } - - if (p_item->texture_repeat != RS::CANVAS_ITEM_TEXTURE_REPEAT_DEFAULT) { - current_repeat = p_item->texture_repeat; - } - - Transform2D base_transform = p_canvas_transform_inverse * p_item->final_transform; - Transform2D draw_transform; // Used by transform command - - Color base_color = p_item->final_modulate; - - uint32_t base_flags = 0; - - bool reclip = false; - - bool skipping = false; - - const Item::Command *c = p_item->commands; - while (c) { - if (skipping && c->type != Item::Command::TYPE_ANIMATION_SLICE) { - c = c->next; - continue; - } - - if (c->type != Item::Command::TYPE_MESH) { - // For Meshes, this gets updated below. - _update_transform_2d_to_mat2x3(base_transform * draw_transform, state.instance_data_array[r_index].world); - } - - for (int i = 0; i < 4; i++) { - state.instance_data_array[r_index].modulation[i] = 0.0; - state.instance_data_array[r_index].ninepatch_margins[i] = 0.0; - state.instance_data_array[r_index].src_rect[i] = 0.0; - state.instance_data_array[r_index].dst_rect[i] = 0.0; - state.instance_data_array[r_index].lights[i] = uint32_t(0); - } - state.instance_data_array[r_index].color_texture_pixel_size[0] = 0.0; - state.instance_data_array[r_index].color_texture_pixel_size[1] = 0.0; - - state.instance_data_array[r_index].pad[0] = 0.0; - state.instance_data_array[r_index].pad[1] = 0.0; - - state.instance_data_array[r_index].flags = base_flags | (state.instance_data_array[r_index == 0 ? 0 : r_index - 1].flags & (FLAGS_DEFAULT_NORMAL_MAP_USED | FLAGS_DEFAULT_SPECULAR_MAP_USED)); //reset on each command for sanity, keep canvastexture binding config - switch (c->type) { case Item::Command::TYPE_RECT: { const Item::CommandRect *rect = static_cast<const Item::CommandRect *>(c); @@ -567,6 +594,8 @@ void RasterizerCanvasGLES3::_render_item(RID p_render_target, const Item *p_item state.instance_data_array[r_index].msdf[1] = rect->outline; // Outline size. state.instance_data_array[r_index].msdf[2] = 0.f; // Reserved. state.instance_data_array[r_index].msdf[3] = 0.f; // Reserved. + } else if (rect->flags & CANVAS_RECT_LCD) { + state.instance_data_array[r_index].flags |= FLAGS_USE_LCD; } state.instance_data_array[r_index].modulation[0] = rect->modulate.r * base_color.r; @@ -993,7 +1022,7 @@ void RasterizerCanvasGLES3::_bind_instance_data_buffer(uint32_t p_max_index) { } glBindBufferBase(GL_UNIFORM_BUFFER, INSTANCE_UNIFORM_LOCATION, state.canvas_instance_data_buffers[state.current_buffer]); -#ifdef JAVASCRIPT_ENABLED +#ifdef WEB_ENABLED //WebGL 2.0 does not support mapping buffers, so use slow glBufferData instead glBufferData(GL_UNIFORM_BUFFER, sizeof(InstanceData) * p_max_index, state.instance_data_array, GL_DYNAMIC_DRAW); #else @@ -1608,6 +1637,8 @@ void fragment() { RasterizerCanvasGLES3::~RasterizerCanvasGLES3() { GLES3::MaterialStorage *material_storage = GLES3::MaterialStorage::get_singleton(); + memdelete_arr(state.instance_data_array); + GLES3::MaterialStorage::get_singleton()->shaders.canvas_shader.version_free(state.canvas_shader_default_version); material_storage->material_free(default_canvas_group_material); material_storage->shader_free(default_canvas_group_shader); diff --git a/drivers/gles3/rasterizer_canvas_gles3.h b/drivers/gles3/rasterizer_canvas_gles3.h index f920e37130..372ac00493 100644 --- a/drivers/gles3/rasterizer_canvas_gles3.h +++ b/drivers/gles3/rasterizer_canvas_gles3.h @@ -73,6 +73,7 @@ class RasterizerCanvasGLES3 : public RendererCanvasRender { FLAGS_DEFAULT_SPECULAR_MAP_USED = (1 << 27), FLAGS_USE_MSDF = (1 << 28), + FLAGS_USE_LCD = (1 << 29), }; enum { @@ -249,7 +250,7 @@ public: void canvas_render_items(RID p_to_render_target, Item *p_item_list, const Color &p_modulate, Light *p_light_list, Light *p_directional_list, const Transform2D &p_canvas_transform, RS::CanvasItemTextureFilter p_default_filter, RS::CanvasItemTextureRepeat p_default_repeat, bool p_snap_2d_vertices_to_pixel, bool &r_sdf_used) override; void _render_items(RID p_to_render_target, int p_item_count, const Transform2D &p_canvas_transform_inverse, Light *p_lights, bool p_to_backbuffer = false); - void _render_item(RID p_render_target, const Item *p_item, const Transform2D &p_canvas_transform_inverse, Item *¤t_clip, Light *p_lights, uint32_t &r_index); + void _render_item(RID p_render_target, const Item *p_item, const Transform2D &p_canvas_transform_inverse, Item *¤t_clip, Light *p_lights, uint32_t &r_index, GLES3::CanvasShaderData::BlendMode p_blend_mode, GLES3::CanvasShaderData::BlendMode &r_last_blend_mode, Color &r_last_blend_color); void _render_batch(uint32_t &p_max_index); void _bind_instance_data_buffer(uint32_t p_max_index); void _allocate_instance_data_buffer(); diff --git a/drivers/gles3/rasterizer_gles3.cpp b/drivers/gles3/rasterizer_gles3.cpp index 33303b1e38..7537636356 100644 --- a/drivers/gles3/rasterizer_gles3.cpp +++ b/drivers/gles3/rasterizer_gles3.cpp @@ -69,7 +69,7 @@ #endif #endif -#if !defined(IOS_ENABLED) && !defined(JAVASCRIPT_ENABLED) +#if !defined(IOS_ENABLED) && !defined(WEB_ENABLED) // We include EGL below to get debug callback on GLES2 platforms, // but EGL is not available on iOS. #define CAN_DEBUG diff --git a/drivers/gles3/rasterizer_scene_gles3.cpp b/drivers/gles3/rasterizer_scene_gles3.cpp index 279cbccb0e..cb479dda39 100644 --- a/drivers/gles3/rasterizer_scene_gles3.cpp +++ b/drivers/gles3/rasterizer_scene_gles3.cpp @@ -496,7 +496,7 @@ void RasterizerSceneGLES3::_update_dirty_skys() { while (sky) { if (sky->radiance == 0) { - sky->mipmap_count = Image::get_image_required_mipmaps(sky->radiance_size, sky->radiance_size, Image::FORMAT_RGBA8) - 2; + sky->mipmap_count = Image::get_image_required_mipmaps(sky->radiance_size, sky->radiance_size, Image::FORMAT_RGBA8) - 1; // Left uninitialized, will attach a texture at render time glGenFramebuffers(1, &sky->radiance_framebuffer); @@ -523,7 +523,7 @@ void RasterizerSceneGLES3::_update_dirty_skys() { glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_BASE_LEVEL, 0); - glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAX_LEVEL, sky->mipmap_count); + glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAX_LEVEL, sky->mipmap_count - 1); glGenTextures(1, &sky->raw_radiance); glBindTexture(GL_TEXTURE_CUBE_MAP, sky->raw_radiance); @@ -544,7 +544,8 @@ void RasterizerSceneGLES3::_update_dirty_skys() { glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_BASE_LEVEL, 0); - glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAX_LEVEL, sky->mipmap_count); + glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAX_LEVEL, sky->mipmap_count - 1); + glBindTexture(GL_TEXTURE_CUBE_MAP, 0); } @@ -647,7 +648,7 @@ void RasterizerSceneGLES3::_setup_sky(RID p_env, RID p_render_buffers, const Pag float angular_diameter = light_storage->light_get_param(base, RS::LIGHT_PARAM_SIZE); if (angular_diameter > 0.0) { - angular_diameter = Math::tan(Math::deg2rad(angular_diameter)); + angular_diameter = Math::tan(Math::deg_to_rad(angular_diameter)); } else { angular_diameter = 0.0; } @@ -701,6 +702,7 @@ void RasterizerSceneGLES3::_setup_sky(RID p_env, RID p_render_buffers, const Pag } if (!sky->radiance) { + _invalidate_sky(sky); _update_dirty_skys(); } } @@ -865,7 +867,6 @@ void RasterizerSceneGLES3::_update_sky_radiance(RID p_env, const Projection &p_p GLES3::MaterialStorage::get_singleton()->shaders.sky_shader.version_set_uniform(SkyShaderGLES3::TIME, time, shader_data->version, SkyShaderGLES3::MODE_CUBEMAP); GLES3::MaterialStorage::get_singleton()->shaders.sky_shader.version_set_uniform(SkyShaderGLES3::PROJECTION, cm.matrix[2][0], cm.matrix[0][0], cm.matrix[2][1], cm.matrix[1][1], shader_data->version, SkyShaderGLES3::MODE_CUBEMAP); - // Bind a vertex array or else OpenGL complains. We won't actually use it glBindVertexArray(sky_globals.screen_triangle_array); glViewport(0, 0, sky->radiance_size, sky->radiance_size); @@ -879,7 +880,7 @@ void RasterizerSceneGLES3::_update_sky_radiance(RID p_env, const Projection &p_p } if (update_single_frame) { - for (int i = 0; i <= max_processing_layer; i++) { + for (int i = 0; i < max_processing_layer; i++) { _filter_sky_radiance(sky, i); } } else { @@ -889,7 +890,7 @@ void RasterizerSceneGLES3::_update_sky_radiance(RID p_env, const Projection &p_p sky->reflection_dirty = false; } else { - if (sky_mode == RS::SKY_MODE_INCREMENTAL && sky->processing_layer <= max_processing_layer) { + if (sky_mode == RS::SKY_MODE_INCREMENTAL && sky->processing_layer < max_processing_layer) { _filter_sky_radiance(sky, sky->processing_layer); sky->processing_layer++; } @@ -1005,7 +1006,9 @@ void RasterizerSceneGLES3::_filter_sky_radiance(Sky *p_sky, int p_base_layer) { glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, p_sky->radiance, p_base_layer); #ifdef DEBUG_ENABLED GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); - ERR_CONTINUE(status != GL_FRAMEBUFFER_COMPLETE); + if (status != GL_FRAMEBUFFER_COMPLETE) { + WARN_PRINT("Could not bind sky radiance face: " + itos(i) + ", status: " + GLES3::TextureStorage::get_singleton()->get_framebuffer_error(status)); + } #endif material_storage->shaders.cubemap_filter_shader.version_set_uniform(CubemapFilterShaderGLES3::FACE_ID, i, scene_globals.cubemap_filter_shader_version, mode); @@ -1515,7 +1518,7 @@ void RasterizerSceneGLES3::_setup_lights(const RenderDataGLES3 *p_render_data, b light_data.color[2] = linear_col.b; float size = light_storage->light_get_param(base, RS::LIGHT_PARAM_SIZE); - light_data.size = 1.0 - Math::cos(Math::deg2rad(size)); //angle to cosine offset + light_data.size = 1.0 - Math::cos(Math::deg_to_rad(size)); //angle to cosine offset light_data.specular = light_storage->light_get_param(base, RS::LIGHT_PARAM_SPECULAR); @@ -1642,27 +1645,27 @@ void RasterizerSceneGLES3::_setup_lights(const RenderDataGLES3 *p_render_data, b light_data.inv_spot_attenuation = 1.0f / light_storage->light_get_param(base, RS::LIGHT_PARAM_SPOT_ATTENUATION); float spot_angle = light_storage->light_get_param(base, RS::LIGHT_PARAM_SPOT_ANGLE); - light_data.cos_spot_angle = Math::cos(Math::deg2rad(spot_angle)); + light_data.cos_spot_angle = Math::cos(Math::deg_to_rad(spot_angle)); light_data.specular_amount = light_storage->light_get_param(base, RS::LIGHT_PARAM_SPECULAR) * 2.0; - light_data.shadow_enabled = false; + light_data.shadow_opacity = 0.0; } // TODO, to avoid stalls, should rotate between 3 buffers based on frame index. // TODO, consider mapping the buffer as in 2D + glBindBufferBase(GL_UNIFORM_BUFFER, SCENE_OMNILIGHT_UNIFORM_LOCATION, scene_state.omni_light_buffer); if (r_omni_light_count) { - glBindBufferBase(GL_UNIFORM_BUFFER, SCENE_OMNILIGHT_UNIFORM_LOCATION, scene_state.omni_light_buffer); glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(LightData) * r_omni_light_count, scene_state.omni_lights); } + glBindBufferBase(GL_UNIFORM_BUFFER, SCENE_SPOTLIGHT_UNIFORM_LOCATION, scene_state.spot_light_buffer); if (r_spot_light_count) { - glBindBufferBase(GL_UNIFORM_BUFFER, SCENE_SPOTLIGHT_UNIFORM_LOCATION, scene_state.spot_light_buffer); glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(LightData) * r_spot_light_count, scene_state.spot_lights); } + glBindBufferBase(GL_UNIFORM_BUFFER, SCENE_DIRECTIONAL_LIGHT_UNIFORM_LOCATION, scene_state.directional_light_buffer); if (r_directional_light_count) { - glBindBufferBase(GL_UNIFORM_BUFFER, SCENE_DIRECTIONAL_LIGHT_UNIFORM_LOCATION, scene_state.directional_light_buffer); glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(DirectionalLightData) * r_directional_light_count, scene_state.directional_lights); } glBindBuffer(GL_UNIFORM_BUFFER, 0); @@ -2146,7 +2149,9 @@ void RasterizerSceneGLES3::_render_list_template(RenderListParameters *p_params, index_array_gl = mesh_storage->mesh_surface_get_index_buffer(mesh_surface, surf->lod_index); if (prev_vertex_array_gl != vertex_array_gl) { - glBindVertexArray(vertex_array_gl); + if (vertex_array_gl != 0) { + glBindVertexArray(vertex_array_gl); + } prev_vertex_array_gl = vertex_array_gl; } @@ -2303,7 +2308,7 @@ void RasterizerSceneGLES3::render_buffers_configure(RID p_render_buffers, RID p_ glGenTextures(1, &rb->depth_texture); glBindTexture(GL_TEXTURE_2D, rb->depth_texture); - glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, rt->size.x, rt->size.y, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, nullptr); + glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, rt->size.x, rt->size.y, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, nullptr); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); diff --git a/drivers/gles3/rasterizer_scene_gles3.h b/drivers/gles3/rasterizer_scene_gles3.h index e227b2df82..a54d87a3a3 100644 --- a/drivers/gles3/rasterizer_scene_gles3.h +++ b/drivers/gles3/rasterizer_scene_gles3.h @@ -165,7 +165,7 @@ private: float inv_spot_attenuation; float cos_spot_angle; float specular_amount; - uint32_t shadow_enabled; + float shadow_opacity; }; static_assert(sizeof(LightData) % 16 == 0, "LightData size must be a multiple of 16 bytes"); diff --git a/drivers/gles3/shader_gles3.cpp b/drivers/gles3/shader_gles3.cpp index 21ccef3518..033f10dbc5 100644 --- a/drivers/gles3/shader_gles3.cpp +++ b/drivers/gles3/shader_gles3.cpp @@ -472,7 +472,7 @@ String ShaderGLES3::_version_get_sha1(Version *p_version) const { bool ShaderGLES3::_load_from_cache(Version *p_version) { #if 0 String sha1 = _version_get_sha1(p_version); - String path = shader_cache_dir.plus_file(name).plus_file(base_sha256).plus_file(sha1) + ".cache"; + String path = shader_cache_dir.path_join(name).path_join(base_sha256).path_join(sha1) + ".cache"; Ref<FileAccess> f = FileAccess::open(path, FileAccess::READ); if (f.is_null()) { @@ -538,7 +538,7 @@ bool ShaderGLES3::_load_from_cache(Version *p_version) { void ShaderGLES3::_save_to_cache(Version *p_version) { #if 0 String sha1 = _version_get_sha1(p_version); - String path = shader_cache_dir.plus_file(name).plus_file(base_sha256).plus_file(sha1) + ".cache"; + String path = shader_cache_dir.path_join(name).path_join(base_sha256).path_join(sha1) + ".cache"; Ref<FileAccess> f = FileAccess::open(path, FileAccess::WRITE); ERR_FAIL_COND(f.is_null()); diff --git a/drivers/gles3/shaders/canvas.glsl b/drivers/gles3/shaders/canvas.glsl index 4df818cd4c..5ec25327be 100644 --- a/drivers/gles3/shaders/canvas.glsl +++ b/drivers/gles3/shaders/canvas.glsl @@ -473,7 +473,13 @@ void main() { float a = clamp(d * px_size + 0.5, 0.0, 1.0); color.a = a * color.a; } - + } else if (bool(draw_data[draw_data_instance].flags & FLAGS_USE_LCD)) { + vec4 lcd_sample = texture(color_texture, uv); + if (lcd_sample.a == 1.0) { + color.rgb = lcd_sample.rgb * color.a; + } else { + color = vec4(0.0, 0.0, 0.0, 0.0); + } } else { #else { diff --git a/drivers/gles3/shaders/canvas_uniforms_inc.glsl b/drivers/gles3/shaders/canvas_uniforms_inc.glsl index 852dccf415..6b61fe9375 100644 --- a/drivers/gles3/shaders/canvas_uniforms_inc.glsl +++ b/drivers/gles3/shaders/canvas_uniforms_inc.glsl @@ -25,6 +25,7 @@ #define FLAGS_DEFAULT_SPECULAR_MAP_USED uint(1 << 27) #define FLAGS_USE_MSDF uint(1 << 28) +#define FLAGS_USE_LCD uint(1 << 29) // must be always 128 bytes long struct DrawData { diff --git a/drivers/gles3/shaders/cubemap_filter.glsl b/drivers/gles3/shaders/cubemap_filter.glsl index 57f0d7d0b8..88464876f1 100644 --- a/drivers/gles3/shaders/cubemap_filter.glsl +++ b/drivers/gles3/shaders/cubemap_filter.glsl @@ -106,12 +106,12 @@ void main() { T[2] = N; for (int sample_num = 0; sample_num < sample_count; sample_num++) { - vec4 sample = sample_directions_mip[sample_num]; - vec3 L = T * sample.xyz; - vec3 val = textureLod(source_cube, L, sample.w).rgb; + vec4 sample_direction_mip = sample_directions_mip[sample_num]; + vec3 L = T * sample_direction_mip.xyz; + vec3 val = textureLod(source_cube, L, sample_direction_mip.w).rgb; // Mix using linear val = srgb_to_linear(val); - sum.rgb += val * sample.z; + sum.rgb += val * sample_direction_mip.z; } sum /= weight; diff --git a/drivers/gles3/shaders/scene.glsl b/drivers/gles3/shaders/scene.glsl index 93bb4c191d..c7fdd6ebd8 100644 --- a/drivers/gles3/shaders/scene.glsl +++ b/drivers/gles3/shaders/scene.glsl @@ -35,8 +35,8 @@ USE_RADIANCE_MAP = true /* from RenderingServer: ARRAY_VERTEX = 0, // RG32F or RGB32F (depending on 2D bit) -ARRAY_NORMAL = 1, // A2B10G10R10, A is ignored. -ARRAY_TANGENT = 2, // A2B10G10R10, A flips sign of binormal. +ARRAY_NORMAL = 1, // RG16 octahedral compression +ARRAY_TANGENT = 2, // RG16 octahedral compression, sign stored in sign of G ARRAY_COLOR = 3, // RGBA8 ARRAY_TEX_UV = 4, // RG32F ARRAY_TEX_UV2 = 5, // RG32F @@ -54,11 +54,11 @@ layout(location = 0) in highp vec3 vertex_attrib; /* clang-format on */ #ifdef NORMAL_USED -layout(location = 1) in vec3 normal_attrib; +layout(location = 1) in vec2 normal_attrib; #endif #if defined(TANGENT_USED) || defined(NORMAL_MAP_USED) || defined(LIGHT_ANISOTROPY_USED) -layout(location = 2) in vec4 tangent_attrib; +layout(location = 2) in vec2 tangent_attrib; #endif #if defined(COLOR_USED) @@ -97,6 +97,13 @@ layout(location = 10) in uvec4 bone_attrib; layout(location = 11) in vec4 weight_attrib; #endif +vec3 oct_to_vec3(vec2 e) { + vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y)); + float t = max(-v.z, 0.0); + v.xy += t * -sign(v.xy); + return v; +} + #ifdef USE_INSTANCING layout(location = 12) in highp vec4 instance_xform0; layout(location = 13) in highp vec4 instance_xform1; @@ -209,13 +216,14 @@ void main() { #endif #ifdef NORMAL_USED - vec3 normal = normal_attrib * 2.0 - 1.0; + vec3 normal = oct_to_vec3(normal_attrib * 2.0 - 1.0); #endif highp mat3 model_normal_matrix = mat3(model_matrix); #if defined(TANGENT_USED) || defined(NORMAL_MAP_USED) || defined(LIGHT_ANISOTROPY_USED) - vec3 tangent = tangent_attrib.xyz * 2.0 - 1.0; - float binormalf = tangent_attrib.a * 2.0 - 1.0; + vec2 signed_tangent_attrib = tangent_attrib * 2.0 - 1.0; + vec3 tangent = oct_to_vec3(vec2(signed_tangent_attrib.x, abs(signed_tangent_attrib.y) * 2.0 - 1.0)); + float binormalf = sign(signed_tangent_attrib.y); vec3 binormal = normalize(cross(normal, tangent) * binormalf); #endif @@ -495,7 +503,7 @@ struct LightData { //this structure needs to be as packed as possible mediump float cone_attenuation; mediump float cone_angle; mediump float specular_amount; - bool shadow_enabled; + mediump float shadow_opacity; }; #ifndef DISABLE_LIGHT_OMNI layout(std140) uniform OmniLightData { // ubo:5 @@ -754,7 +762,7 @@ void light_process_omni(uint idx, vec3 vertex, vec3 eye_vec, vec3 normal, vec3 f if (omni_lights[idx].size > 0.0) { float t = omni_lights[idx].size / max(0.001, light_length); - size_A = max(0.0, 1.0 - 1 / sqrt(1 + t * t)); + size_A = max(0.0, 1.0 - 1.0 / sqrt(1.0 + t * t)); } light_compute(normal, normalize(light_rel_vec), eye_vec, size_A, color, omni_attenuation, f0, roughness, metallic, omni_lights[idx].specular_amount, albedo, alpha, @@ -803,7 +811,7 @@ void light_process_spot(uint idx, vec3 vertex, vec3 eye_vec, vec3 normal, vec3 f if (spot_lights[idx].size > 0.0) { float t = spot_lights[idx].size / max(0.001, light_length); - size_A = max(0.0, 1.0 - 1 / sqrt(1 + t * t)); + size_A = max(0.0, 1.0 - 1.0 / sqrt(1.0 + t * t)); } light_compute(normal, normalize(light_rel_vec), eye_vec, size_A, color, spot_attenuation, f0, roughness, metallic, spot_lights[idx].specular_amount, albedo, alpha, @@ -1052,6 +1060,7 @@ void main() { #else vec3 ref_vec = reflect(-view, normal); #endif + ref_vec = mix(ref_vec, normal, roughness * roughness); float horizon = min(1.0 + dot(ref_vec, normal), 1.0); ref_vec = scene_data.radiance_inverse_xform * ref_vec; specular_light = textureLod(radiance_map, ref_vec, roughness * RADIANCE_MAX_LOD).rgb; diff --git a/drivers/gles3/storage/config.cpp b/drivers/gles3/storage/config.cpp index f2809734a9..6cc65e7bb2 100644 --- a/drivers/gles3/storage/config.cpp +++ b/drivers/gles3/storage/config.cpp @@ -64,7 +64,14 @@ Config::Config() { #else float_texture_supported = extensions.has("GL_ARB_texture_float") || extensions.has("GL_OES_texture_float"); etc2_supported = true; +#if defined(ANDROID_ENABLED) || defined(IOS_ENABLED) + // Some Android devices report support for S3TC but we don't expect that and don't export the textures. + // This could be fixed but so few devices support it that it doesn't seem useful (and makes bigger APKs). + // For good measure we do the same hack for iOS, just in case. + s3tc_supported = false; +#else s3tc_supported = extensions.has("GL_EXT_texture_compression_dxt1") || extensions.has("GL_EXT_texture_compression_s3tc") || extensions.has("WEBGL_compressed_texture_s3tc"); +#endif rgtc_supported = extensions.has("GL_EXT_texture_compression_rgtc") || extensions.has("GL_ARB_texture_compression_rgtc") || extensions.has("EXT_texture_compression_rgtc"); #endif diff --git a/drivers/gles3/storage/light_storage.cpp b/drivers/gles3/storage/light_storage.cpp index 22578c9e91..683716ca99 100644 --- a/drivers/gles3/storage/light_storage.cpp +++ b/drivers/gles3/storage/light_storage.cpp @@ -58,6 +58,7 @@ void LightStorage::_light_initialize(RID p_light, RS::LightType p_type) { light.param[RS::LIGHT_PARAM_ENERGY] = 1.0; light.param[RS::LIGHT_PARAM_INDIRECT_ENERGY] = 1.0; + light.param[RS::LIGHT_PARAM_VOLUMETRIC_FOG_ENERGY] = 1.0; light.param[RS::LIGHT_PARAM_SPECULAR] = 0.5; light.param[RS::LIGHT_PARAM_RANGE] = 1.0; light.param[RS::LIGHT_PARAM_SIZE] = 0.0; @@ -70,10 +71,10 @@ void LightStorage::_light_initialize(RID p_light, RS::LightType p_type) { light.param[RS::LIGHT_PARAM_SHADOW_SPLIT_3_OFFSET] = 0.6; light.param[RS::LIGHT_PARAM_SHADOW_FADE_START] = 0.8; light.param[RS::LIGHT_PARAM_SHADOW_NORMAL_BIAS] = 1.0; + light.param[RS::LIGHT_PARAM_SHADOW_OPACITY] = 1.0; light.param[RS::LIGHT_PARAM_SHADOW_BIAS] = 0.02; light.param[RS::LIGHT_PARAM_SHADOW_BLUR] = 0; light.param[RS::LIGHT_PARAM_SHADOW_PANCAKE_SIZE] = 20.0; - light.param[RS::LIGHT_PARAM_SHADOW_VOLUMETRIC_FOG_FADE] = 0.1; light.param[RS::LIGHT_PARAM_TRANSMITTANCE_BIAS] = 0.05; light_owner.initialize_rid(p_light, light); @@ -317,7 +318,7 @@ AABB LightStorage::light_get_aabb(RID p_light) const { switch (light->type) { case RS::LIGHT_SPOT: { float len = light->param[RS::LIGHT_PARAM_RANGE]; - float size = Math::tan(Math::deg2rad(light->param[RS::LIGHT_PARAM_SPOT_ANGLE])) * len; + float size = Math::tan(Math::deg_to_rad(light->param[RS::LIGHT_PARAM_SPOT_ANGLE])) * len; return AABB(Vector3(-size, -size, -len), Vector3(size * 2, size * 2, len)); }; case RS::LIGHT_OMNI: { diff --git a/drivers/gles3/storage/light_storage.h b/drivers/gles3/storage/light_storage.h index 857a0261fa..fa012a7b58 100644 --- a/drivers/gles3/storage/light_storage.h +++ b/drivers/gles3/storage/light_storage.h @@ -261,13 +261,6 @@ public: return light->param[RS::LIGHT_PARAM_TRANSMITTANCE_BIAS]; } - _FORCE_INLINE_ float light_get_shadow_volumetric_fog_fade(RID p_light) const { - const Light *light = light_owner.get_or_null(p_light); - ERR_FAIL_COND_V(!light, 0.0); - - return light->param[RS::LIGHT_PARAM_SHADOW_VOLUMETRIC_FOG_FADE]; - } - virtual RS::LightBakeMode light_get_bake_mode(RID p_light) override; virtual uint32_t light_get_max_sdfgi_cascade(RID p_light) override { return 0; } virtual uint64_t light_get_version(RID p_light) const override; diff --git a/drivers/gles3/storage/material_storage.cpp b/drivers/gles3/storage/material_storage.cpp index ac2f4f0019..a64c7f7200 100644 --- a/drivers/gles3/storage/material_storage.cpp +++ b/drivers/gles3/storage/material_storage.cpp @@ -1083,6 +1083,12 @@ void MaterialData::update_textures(const HashMap<StringName, Variant> &p_paramet Vector<RID> textures; + if (p_texture_uniforms[i].hint == ShaderLanguage::ShaderNode::Uniform::HINT_SCREEN_TEXTURE || + p_texture_uniforms[i].hint == ShaderLanguage::ShaderNode::Uniform::HINT_NORMAL_ROUGHNESS_TEXTURE || + p_texture_uniforms[i].hint == ShaderLanguage::ShaderNode::Uniform::HINT_DEPTH_TEXTURE) { + continue; + } + if (p_texture_uniforms[i].global) { uses_global_textures = true; @@ -1156,6 +1162,9 @@ void MaterialData::update_textures(const HashMap<StringName, Variant> &p_paramet case ShaderLanguage::ShaderNode::Uniform::HINT_DEFAULT_BLACK: { gl_texture = texture_storage->texture_gl_get_default(DEFAULT_GL_TEXTURE_BLACK); } break; + case ShaderLanguage::ShaderNode::Uniform::HINT_DEFAULT_TRANSPARENT: { + gl_texture = texture_storage->texture_gl_get_default(DEFAULT_GL_TEXTURE_TRANSPARENT); + } break; case ShaderLanguage::ShaderNode::Uniform::HINT_ANISOTROPY: { gl_texture = texture_storage->texture_gl_get_default(DEFAULT_GL_TEXTURE_ANISO); } break; @@ -1447,8 +1456,8 @@ MaterialStorage::MaterialStorage() { actions.renames["UV2"] = "uv2_interp"; actions.renames["COLOR"] = "color_interp"; actions.renames["POINT_SIZE"] = "gl_PointSize"; - actions.renames["INSTANCE_ID"] = "gl_InstanceIndex"; - actions.renames["VERTEX_ID"] = "gl_VertexIndex"; + actions.renames["INSTANCE_ID"] = "gl_InstanceID"; + actions.renames["VERTEX_ID"] = "gl_VertexID"; actions.renames["ALPHA_SCISSOR_THRESHOLD"] = "alpha_scissor_threshold"; actions.renames["ALPHA_HASH_SCALE"] = "alpha_hash_scale"; @@ -1489,9 +1498,9 @@ MaterialStorage::MaterialStorage() { actions.renames["POINT_COORD"] = "gl_PointCoord"; actions.renames["INSTANCE_CUSTOM"] = "instance_custom"; actions.renames["SCREEN_UV"] = "screen_uv"; - actions.renames["SCREEN_TEXTURE"] = "color_buffer"; - actions.renames["DEPTH_TEXTURE"] = "depth_buffer"; - actions.renames["NORMAL_ROUGHNESS_TEXTURE"] = "normal_roughness_buffer"; + //actions.renames["SCREEN_TEXTURE"] = "color_buffer"; //Not implemented in 3D yet. + //actions.renames["DEPTH_TEXTURE"] = "depth_buffer"; // Not implemented in 3D yet. + //actions.renames["NORMAL_ROUGHNESS_TEXTURE"] = "normal_roughness_buffer"; // Not implemented in 3D yet actions.renames["DEPTH"] = "gl_FragDepth"; actions.renames["OUTPUT_IS_SRGB"] = "true"; actions.renames["FOG"] = "fog"; @@ -1505,6 +1514,11 @@ MaterialStorage::MaterialStorage() { actions.renames["CUSTOM3"] = "custom3_attrib"; actions.renames["OUTPUT_IS_SRGB"] = "SHADER_IS_SRGB"; + actions.renames["NODE_POSITION_WORLD"] = "model_matrix[3].xyz"; + actions.renames["CAMERA_POSITION_WORLD"] = "scene_data.inv_view_matrix[3].xyz"; + actions.renames["CAMERA_DIRECTION_WORLD"] = "scene_data.view_matrix[3].xyz"; + actions.renames["NODE_POSITION_VIEW"] = "(model_matrix * scene_data.view_matrix)[3].xyz"; + actions.renames["VIEW_INDEX"] = "ViewIndex"; actions.renames["VIEW_MONO_LEFT"] = "0"; actions.renames["VIEW_RIGHT"] = "1"; @@ -2514,11 +2528,11 @@ String MaterialStorage::shader_get_code(RID p_shader) const { return shader->code; } -void MaterialStorage::shader_get_param_list(RID p_shader, List<PropertyInfo> *p_param_list) const { +void MaterialStorage::shader_get_shader_uniform_list(RID p_shader, List<PropertyInfo> *p_param_list) const { GLES3::Shader *shader = shader_owner.get_or_null(p_shader); ERR_FAIL_COND(!shader); if (shader->data) { - return shader->data->get_param_list(p_param_list); + return shader->data->get_shader_uniform_list(p_param_list); } } @@ -2750,14 +2764,14 @@ bool MaterialStorage::material_casts_shadows(RID p_material) { return true; //by default everything casts shadows } -void MaterialStorage::material_get_instance_shader_parameters(RID p_material, List<InstanceShaderParam> *r_parameters) { +void MaterialStorage::material_get_instance_shader_uniforms(RID p_material, List<InstanceShaderParam> *r_parameters) { GLES3::Material *material = material_owner.get_or_null(p_material); ERR_FAIL_COND(!material); if (material->shader && material->shader->data) { material->shader->data->get_instance_param_list(r_parameters); if (material->next_pass.is_valid()) { - material_get_instance_shader_parameters(material->next_pass, r_parameters); + material_get_instance_shader_uniforms(material->next_pass, r_parameters); } } } @@ -2781,6 +2795,7 @@ void CanvasShaderData::set_code(const String &p_code) { ubo_size = 0; uniforms.clear(); uses_screen_texture = false; + uses_screen_texture_mipmaps = false; uses_sdf = false; uses_time = false; @@ -2791,7 +2806,6 @@ void CanvasShaderData::set_code(const String &p_code) { ShaderCompiler::GeneratedCode gen_code; int blend_modei = BLEND_MODE_MIX; - uses_screen_texture = false; ShaderCompiler::IdentifierActions actions; actions.entry_point_stages["vertex"] = ShaderCompiler::STAGE_VERTEX; @@ -2818,6 +2832,7 @@ void CanvasShaderData::set_code(const String &p_code) { } blend_mode = BlendMode(blend_modei); + uses_screen_texture_mipmaps = gen_code.uses_screen_texture_mipmaps; #if 0 print_line("**compiling shader:"); @@ -2825,12 +2840,16 @@ void CanvasShaderData::set_code(const String &p_code) { for (int i = 0; i < gen_code.defines.size(); i++) { print_line(gen_code.defines[i]); } + + HashMap<String, String>::Iterator el = gen_code.code.begin(); + while (el) { + print_line("\n**code " + el->key + ":\n" + el->value); + ++el; + } + print_line("\n**uniforms:\n" + gen_code.uniforms); - print_line("\n**vertex_globals:\n" + gen_code.vertex_global); - print_line("\n**vertex_code:\n" + gen_code.vertex); - print_line("\n**fragment_globals:\n" + gen_code.fragment_global); - print_line("\n**fragment_code:\n" + gen_code.fragment); - print_line("\n**light_code:\n" + gen_code.light); + print_line("\n**vertex_globals:\n" + gen_code.stage_globals[ShaderCompiler::STAGE_VERTEX]); + print_line("\n**fragment_globals:\n" + gen_code.stage_globals[ShaderCompiler::STAGE_FRAGMENT]); #endif Vector<StringName> texture_uniform_names; @@ -2865,11 +2884,14 @@ void CanvasShaderData::set_default_texture_param(const StringName &p_name, RID p } } -void CanvasShaderData::get_param_list(List<PropertyInfo> *p_param_list) const { +void CanvasShaderData::get_shader_uniform_list(List<PropertyInfo> *p_param_list) const { HashMap<int, StringName> order; for (const KeyValue<StringName, ShaderLanguage::ShaderNode::Uniform> &E : uniforms) { - if (E.value.scope != ShaderLanguage::ShaderNode::Uniform::SCOPE_LOCAL) { + if (E.value.scope != ShaderLanguage::ShaderNode::Uniform::SCOPE_LOCAL || + E.value.hint == ShaderLanguage::ShaderNode::Uniform::HINT_SCREEN_TEXTURE || + E.value.hint == ShaderLanguage::ShaderNode::Uniform::HINT_NORMAL_ROUGHNESS_TEXTURE || + E.value.hint == ShaderLanguage::ShaderNode::Uniform::HINT_DEPTH_TEXTURE) { continue; } if (E.value.texture_order >= 0) { @@ -3062,12 +3084,16 @@ void SkyShaderData::set_code(const String &p_code) { for (int i = 0; i < gen_code.defines.size(); i++) { print_line(gen_code.defines[i]); } + + HashMap<String, String>::Iterator el = gen_code.code.begin(); + while (el) { + print_line("\n**code " + el->key + ":\n" + el->value); + ++el; + } + print_line("\n**uniforms:\n" + gen_code.uniforms); - // print_line("\n**vertex_globals:\n" + gen_code.vertex_global); - // print_line("\n**vertex_code:\n" + gen_code.vertex); - print_line("\n**fragment_globals:\n" + gen_code.fragment_global); - print_line("\n**fragment_code:\n" + gen_code.fragment); - print_line("\n**light_code:\n" + gen_code.light); + print_line("\n**vertex_globals:\n" + gen_code.stage_globals[ShaderCompiler::STAGE_VERTEX]); + print_line("\n**fragment_globals:\n" + gen_code.stage_globals[ShaderCompiler::STAGE_FRAGMENT]); #endif Vector<StringName> texture_uniform_names; @@ -3102,7 +3128,7 @@ void SkyShaderData::set_default_texture_param(const StringName &p_name, RID p_te } } -void SkyShaderData::get_param_list(List<PropertyInfo> *p_param_list) const { +void SkyShaderData::get_shader_uniform_list(List<PropertyInfo> *p_param_list) const { RBMap<int, StringName> order; for (const KeyValue<StringName, ShaderLanguage::ShaderNode::Uniform> &E : uniforms) { @@ -3245,7 +3271,6 @@ void SceneShaderData::set_code(const String &p_code) { valid = false; ubo_size = 0; uniforms.clear(); - uses_screen_texture = false; if (code.is_empty()) { return; //just invalid, but no error @@ -3370,6 +3395,7 @@ void SceneShaderData::set_code(const String &p_code) { vertex_input_mask |= uses_custom3 << 8; vertex_input_mask |= uses_bones << 9; vertex_input_mask |= uses_weights << 10; + uses_screen_texture_mipmaps = gen_code.uses_screen_texture_mipmaps; #if 0 print_line("**compiling shader:"); @@ -3378,11 +3404,10 @@ void SceneShaderData::set_code(const String &p_code) { print_line(gen_code.defines[i]); } - Map<String, String>::Element *el = gen_code.code.front(); + HashMap<String, String>::Iterator el = gen_code.code.begin(); while (el) { - print_line("\n**code " + el->key() + ":\n" + el->value()); - - el = el->next(); + print_line("\n**code " + el->key + ":\n" + el->value); + ++el; } print_line("\n**uniforms:\n" + gen_code.uniforms); @@ -3427,11 +3452,14 @@ void SceneShaderData::set_default_texture_param(const StringName &p_name, RID p_ } } -void SceneShaderData::get_param_list(List<PropertyInfo> *p_param_list) const { +void SceneShaderData::get_shader_uniform_list(List<PropertyInfo> *p_param_list) const { RBMap<int, StringName> order; for (const KeyValue<StringName, ShaderLanguage::ShaderNode::Uniform> &E : uniforms) { - if (E.value.scope != ShaderLanguage::ShaderNode::Uniform::SCOPE_LOCAL) { + if (E.value.scope != ShaderLanguage::ShaderNode::Uniform::SCOPE_LOCAL || + E.value.hint == ShaderLanguage::ShaderNode::Uniform::HINT_SCREEN_TEXTURE || + E.value.hint == ShaderLanguage::ShaderNode::Uniform::HINT_NORMAL_ROUGHNESS_TEXTURE || + E.value.hint == ShaderLanguage::ShaderNode::Uniform::HINT_DEPTH_TEXTURE) { continue; } diff --git a/drivers/gles3/storage/material_storage.h b/drivers/gles3/storage/material_storage.h index 8fc15ddcba..a2a7554821 100644 --- a/drivers/gles3/storage/material_storage.h +++ b/drivers/gles3/storage/material_storage.h @@ -54,7 +54,7 @@ namespace GLES3 { struct ShaderData { virtual void set_code(const String &p_Code) = 0; virtual void set_default_texture_param(const StringName &p_name, RID p_texture, int p_index) = 0; - virtual void get_param_list(List<PropertyInfo> *p_param_list) const = 0; + virtual void get_shader_uniform_list(List<PropertyInfo> *p_param_list) const = 0; virtual void get_instance_param_list(List<RendererMaterialStorage::InstanceShaderParam> *p_param_list) const = 0; virtual bool is_param_texture(const StringName &p_param) const = 0; @@ -142,6 +142,7 @@ struct CanvasShaderData : public ShaderData { BLEND_MODE_MUL, BLEND_MODE_PMALPHA, BLEND_MODE_DISABLED, + BLEND_MODE_LCD, }; bool valid; @@ -159,12 +160,13 @@ struct CanvasShaderData : public ShaderData { HashMap<StringName, HashMap<int, RID>> default_texture_params; bool uses_screen_texture = false; + bool uses_screen_texture_mipmaps = false; bool uses_sdf = false; bool uses_time = false; virtual void set_code(const String &p_Code); virtual void set_default_texture_param(const StringName &p_name, RID p_texture, int p_index); - virtual void get_param_list(List<PropertyInfo> *p_param_list) const; + virtual void get_shader_uniform_list(List<PropertyInfo> *p_param_list) const; virtual void get_instance_param_list(List<RendererMaterialStorage::InstanceShaderParam> *p_param_list) const; virtual bool is_param_texture(const StringName &p_param) const; @@ -215,7 +217,7 @@ struct SkyShaderData : public ShaderData { virtual void set_code(const String &p_Code); virtual void set_default_texture_param(const StringName &p_name, RID p_texture, int p_index); - virtual void get_param_list(List<PropertyInfo> *p_param_list) const; + virtual void get_shader_uniform_list(List<PropertyInfo> *p_param_list) const; virtual void get_instance_param_list(List<RendererMaterialStorage::InstanceShaderParam> *p_param_list) const; virtual bool is_param_texture(const StringName &p_param) const; virtual bool is_animated() const; @@ -312,6 +314,7 @@ struct SceneShaderData : public ShaderData { bool uses_sss; bool uses_transmittance; bool uses_screen_texture; + bool uses_screen_texture_mipmaps; bool uses_depth_texture; bool uses_normal_texture; bool uses_time; @@ -335,7 +338,7 @@ struct SceneShaderData : public ShaderData { virtual void set_code(const String &p_Code); virtual void set_default_texture_param(const StringName &p_name, RID p_texture, int p_index); - virtual void get_param_list(List<PropertyInfo> *p_param_list) const; + virtual void get_shader_uniform_list(List<PropertyInfo> *p_param_list) const; virtual void get_instance_param_list(List<RendererMaterialStorage::InstanceShaderParam> *p_param_list) const; virtual bool is_param_texture(const StringName &p_param) const; @@ -545,7 +548,7 @@ public: virtual void shader_set_code(RID p_shader, const String &p_code) override; virtual void shader_set_path_hint(RID p_shader, const String &p_path) override; virtual String shader_get_code(RID p_shader) const override; - virtual void shader_get_param_list(RID p_shader, List<PropertyInfo> *p_param_list) const override; + virtual void shader_get_shader_uniform_list(RID p_shader, List<PropertyInfo> *p_param_list) const override; virtual void shader_set_default_texture_param(RID p_shader, const StringName &p_name, RID p_texture, int p_index) override; virtual RID shader_get_default_texture_param(RID p_shader, const StringName &p_name, int p_index) const override; @@ -576,7 +579,7 @@ public: virtual bool material_is_animated(RID p_material) override; virtual bool material_casts_shadows(RID p_material) override; - virtual void material_get_instance_shader_parameters(RID p_material, List<InstanceShaderParam> *r_parameters) override; + virtual void material_get_instance_shader_uniforms(RID p_material, List<InstanceShaderParam> *r_parameters) override; virtual void material_update_dependency(RID p_material, DependencyTracker *p_instance) override; diff --git a/drivers/gles3/storage/mesh_storage.cpp b/drivers/gles3/storage/mesh_storage.cpp index 88b81805fa..ddf94af5b8 100644 --- a/drivers/gles3/storage/mesh_storage.cpp +++ b/drivers/gles3/storage/mesh_storage.cpp @@ -124,11 +124,11 @@ void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) } break; case RS::ARRAY_NORMAL: { - stride += sizeof(int32_t); + stride += sizeof(uint16_t) * 2; } break; case RS::ARRAY_TANGENT: { - stride += sizeof(int32_t); + stride += sizeof(uint16_t) * 2; } break; case RS::ARRAY_COLOR: { @@ -186,11 +186,13 @@ void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) s->format = p_surface.format; s->primitive = p_surface.primitive; - glGenBuffers(1, &s->vertex_buffer); - glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer); - glBufferData(GL_ARRAY_BUFFER, p_surface.vertex_data.size(), p_surface.vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW); - glBindBuffer(GL_ARRAY_BUFFER, 0); //unbind - s->vertex_buffer_size = p_surface.vertex_data.size(); + if (p_surface.vertex_data.size()) { + glGenBuffers(1, &s->vertex_buffer); + glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer); + glBufferData(GL_ARRAY_BUFFER, p_surface.vertex_data.size(), p_surface.vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW); + glBindBuffer(GL_ARRAY_BUFFER, 0); //unbind + s->vertex_buffer_size = p_surface.vertex_data.size(); + } if (p_surface.attribute_data.size()) { glGenBuffers(1, &s->attribute_buffer); @@ -214,7 +216,7 @@ void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) } if (p_surface.index_count) { - bool is_index_16 = p_surface.vertex_count <= 65536; + bool is_index_16 = p_surface.vertex_count <= 65536 && p_surface.vertex_count > 0; glGenBuffers(1, &s->index_buffer); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->index_buffer); glBufferData(GL_ELEMENT_ARRAY_BUFFER, p_surface.index_data.size(), p_surface.index_data.ptr(), GL_STATIC_DRAW); @@ -238,6 +240,8 @@ void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) } } + ERR_FAIL_COND_MSG(!p_surface.index_count && !p_surface.vertex_count, "Meshes must contain a vertex array, an index array, or both"); + s->aabb = p_surface.aabb; s->bone_aabbs = p_surface.bone_aabbs; //only really useful for returning them. @@ -255,7 +259,10 @@ void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) mesh->bone_aabbs.resize(p_surface.bone_aabbs.size()); } for (int i = 0; i < p_surface.bone_aabbs.size(); i++) { - mesh->bone_aabbs.write[i].merge_with(p_surface.bone_aabbs[i]); + const AABB &bone = p_surface.bone_aabbs[i]; + if (!bone.has_no_volume()) { + mesh->bone_aabbs.write[i].merge_with(bone); + } } mesh->aabb.merge_with(p_surface.aabb); } @@ -337,7 +344,9 @@ RS::SurfaceData MeshStorage::mesh_get_surface(RID p_mesh, int p_surface) const { RS::SurfaceData sd; sd.format = s.format; - sd.vertex_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.vertex_buffer, s.vertex_buffer_size); + if (s.vertex_buffer != 0) { + sd.vertex_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.vertex_buffer, s.vertex_buffer_size); + } if (s.attribute_buffer != 0) { sd.attribute_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.attribute_buffer, s.attribute_buffer_size); @@ -595,17 +604,16 @@ void MeshStorage::_mesh_surface_generate_version_for_input_mask(Mesh::Surface::V } break; case RS::ARRAY_NORMAL: { attribs[i].offset = vertex_stride; - // Will need to change to accommodate octahedral compression - attribs[i].size = 4; - attribs[i].type = GL_UNSIGNED_INT_2_10_10_10_REV; - vertex_stride += sizeof(float); + attribs[i].size = 2; + attribs[i].type = GL_UNSIGNED_SHORT; + vertex_stride += sizeof(uint16_t) * 2; attribs[i].normalized = GL_TRUE; } break; case RS::ARRAY_TANGENT: { attribs[i].offset = vertex_stride; - attribs[i].size = 4; - attribs[i].type = GL_UNSIGNED_INT_2_10_10_10_REV; - vertex_stride += sizeof(float); + attribs[i].size = 2; + attribs[i].type = GL_UNSIGNED_SHORT; + vertex_stride += sizeof(uint16_t) * 2; attribs[i].normalized = GL_TRUE; } break; case RS::ARRAY_COLOR: { @@ -1347,7 +1355,7 @@ void MeshStorage::multimesh_set_buffer(RID p_multimesh, const Vector<float> &p_b _multimesh_mark_all_dirty(multimesh, false, true); //update AABB } else if (multimesh->mesh.is_valid()) { //if we have a mesh set, we need to re-generate the AABB from the new data - const float *data = multimesh->data_cache.ptr(); + const float *data = p_buffer.ptr(); _multimesh_re_create_aabb(multimesh, data, multimesh->instances); multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB); diff --git a/drivers/gles3/storage/mesh_storage.h b/drivers/gles3/storage/mesh_storage.h index 339380b3b0..74f5800795 100644 --- a/drivers/gles3/storage/mesh_storage.h +++ b/drivers/gles3/storage/mesh_storage.h @@ -75,7 +75,7 @@ struct Mesh { // Cache vertex arrays so they can be created struct Version { uint32_t input_mask = 0; - GLuint vertex_array; + GLuint vertex_array = 0; Attrib attribs[RS::ARRAY_MAX]; }; @@ -92,7 +92,7 @@ struct Mesh { float edge_length = 0.0; uint32_t index_count = 0; uint32_t index_buffer_size = 0; - GLuint index_buffer; + GLuint index_buffer = 0; }; LOD *lods = nullptr; @@ -175,7 +175,7 @@ struct MultiMesh { bool *data_cache_dirty_regions = nullptr; uint32_t data_cache_used_dirty_regions = 0; - GLuint buffer; + GLuint buffer = 0; bool dirty = false; MultiMesh *dirty_list = nullptr; @@ -362,7 +362,7 @@ public: _FORCE_INLINE_ GLenum mesh_surface_get_index_type(void *p_surface) const { Mesh::Surface *s = reinterpret_cast<Mesh::Surface *>(p_surface); - return s->vertex_count <= 65536 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_INT; + return (s->vertex_count <= 65536 && s->vertex_count > 0) ? GL_UNSIGNED_SHORT : GL_UNSIGNED_INT; } // Use this to cache Vertex Array Objects so they are only generated once diff --git a/drivers/gles3/storage/texture_storage.cpp b/drivers/gles3/storage/texture_storage.cpp index c05f516548..b8ab4d6839 100644 --- a/drivers/gles3/storage/texture_storage.cpp +++ b/drivers/gles3/storage/texture_storage.cpp @@ -115,6 +115,17 @@ TextureStorage::TextureStorage() { texture_2d_layered_initialize(default_gl_textures[DEFAULT_GL_TEXTURE_CUBEMAP_BLACK], images, RS::TEXTURE_LAYERED_CUBEMAP); } + { // transparent black + Ref<Image> image; + image.instantiate(); + image->create(4, 4, true, Image::FORMAT_RGBA8); + image->fill(Color(0, 0, 0, 0)); + image->generate_mipmaps(); + + default_gl_textures[DEFAULT_GL_TEXTURE_TRANSPARENT] = texture_allocate(); + texture_2d_initialize(default_gl_textures[DEFAULT_GL_TEXTURE_TRANSPARENT], image); + } + { Ref<Image> image; image.instantiate(); @@ -1553,6 +1564,19 @@ void TextureStorage::render_target_clear_used(RID p_render_target) { rt->used_in_frame = false; } +void TextureStorage::render_target_set_msaa(RID p_render_target, RS::ViewportMSAA p_msaa) { + RenderTarget *rt = render_target_owner.get_or_null(p_render_target); + ERR_FAIL_COND(!rt); + if (p_msaa == rt->msaa) { + return; + } + + WARN_PRINT("2D MSAA is not yet supported for GLES3."); + _clear_render_target(rt); + rt->msaa = p_msaa; + _update_render_target(rt); +} + void TextureStorage::render_target_request_clear(RID p_render_target, const Color &p_clear_color) { RenderTarget *rt = render_target_owner.get_or_null(p_render_target); ERR_FAIL_COND(!rt); diff --git a/drivers/gles3/storage/texture_storage.h b/drivers/gles3/storage/texture_storage.h index 77ec1da6f5..4f4032723b 100644 --- a/drivers/gles3/storage/texture_storage.h +++ b/drivers/gles3/storage/texture_storage.h @@ -103,6 +103,7 @@ namespace GLES3 { enum DefaultGLTexture { DEFAULT_GL_TEXTURE_WHITE, DEFAULT_GL_TEXTURE_BLACK, + DEFAULT_GL_TEXTURE_TRANSPARENT, DEFAULT_GL_TEXTURE_NORMAL, DEFAULT_GL_TEXTURE_ANISO, DEFAULT_GL_TEXTURE_DEPTH, @@ -527,6 +528,7 @@ public: virtual void render_target_set_direct_to_screen(RID p_render_target, bool p_direct_to_screen) override; virtual bool render_target_was_used(RID p_render_target) override; void render_target_clear_used(RID p_render_target); + virtual void render_target_set_msaa(RID p_render_target, RS::ViewportMSAA p_msaa) override; // new void render_target_set_as_unused(RID p_render_target) override { diff --git a/drivers/png/SCsub b/drivers/png/SCsub index 39d296e7cf..fe8c8fa8cc 100644 --- a/drivers/png/SCsub +++ b/drivers/png/SCsub @@ -30,14 +30,14 @@ if env["builtin_libpng"]: thirdparty_sources = [thirdparty_dir + file for file in thirdparty_sources] env_png.Prepend(CPPPATH=[thirdparty_dir]) - # Needed for drivers includes and in platform/javascript + # Needed for drivers includes and in platform/web. env.Prepend(CPPPATH=[thirdparty_dir]) # Currently .ASM filter_neon.S does not compile on NT. import os # Enable ARM NEON instructions on 32-bit Android to compile more optimized code. - use_neon = "android_arch" in env and env["android_arch"] == "armv7" and os.name != "nt" + use_neon = env["platform"] == "android" and env["arch"] == "arm32" and os.name != "nt" if use_neon: env_png.Append(CPPDEFINES=[("PNG_ARM_NEON_OPT", 2)]) else: diff --git a/drivers/png/image_loader_png.cpp b/drivers/png/image_loader_png.cpp index 917bfec574..8d2f8a7ed6 100644 --- a/drivers/png/image_loader_png.cpp +++ b/drivers/png/image_loader_png.cpp @@ -36,7 +36,7 @@ #include <string.h> -Error ImageLoaderPNG::load_image(Ref<Image> p_image, Ref<FileAccess> f, bool p_force_linear, float p_scale) { +Error ImageLoaderPNG::load_image(Ref<Image> p_image, Ref<FileAccess> f, uint32_t p_flags, float p_scale) { const uint64_t buffer_size = f->get_length(); Vector<uint8_t> file_buffer; Error err = file_buffer.resize(buffer_size); @@ -48,7 +48,7 @@ Error ImageLoaderPNG::load_image(Ref<Image> p_image, Ref<FileAccess> f, bool p_f f->get_buffer(writer, buffer_size); } const uint8_t *reader = file_buffer.ptr(); - return PNGDriverCommon::png_to_image(reader, buffer_size, p_force_linear, p_image); + return PNGDriverCommon::png_to_image(reader, buffer_size, p_flags & FLAG_FORCE_LINEAR, p_image); } void ImageLoaderPNG::get_recognized_extensions(List<String> *p_extensions) const { diff --git a/drivers/png/image_loader_png.h b/drivers/png/image_loader_png.h index 9bcfb720d3..91c3c8925f 100644 --- a/drivers/png/image_loader_png.h +++ b/drivers/png/image_loader_png.h @@ -40,7 +40,7 @@ private: static Ref<Image> load_mem_png(const uint8_t *p_png, int p_size); public: - virtual Error load_image(Ref<Image> p_image, Ref<FileAccess> f, bool p_force_linear, float p_scale); + virtual Error load_image(Ref<Image> p_image, Ref<FileAccess> f, uint32_t p_flags, float p_scale); virtual void get_recognized_extensions(List<String> *p_extensions) const; ImageLoaderPNG(); }; diff --git a/drivers/pulseaudio/audio_driver_pulseaudio.cpp b/drivers/pulseaudio/audio_driver_pulseaudio.cpp index a6c35b6837..b25cf1d5b4 100644 --- a/drivers/pulseaudio/audio_driver_pulseaudio.cpp +++ b/drivers/pulseaudio/audio_driver_pulseaudio.cpp @@ -178,7 +178,7 @@ Error AudioDriverPulseAudio::detect_channels(bool capture) { Error AudioDriverPulseAudio::init_device() { // If there is a specified device check that it is really present if (device_name != "Default") { - Array list = get_device_list(); + PackedStringArray list = get_device_list(); if (list.find(device_name) == -1) { device_name = "Default"; new_device = "Default"; @@ -285,9 +285,8 @@ Error AudioDriverPulseAudio::init() { return ERR_CANT_OPEN; } - active = false; - thread_exited = false; - exit_thread = false; + active.clear(); + exit_thread.clear(); mix_rate = GLOBAL_GET("audio/driver/mix_rate"); @@ -384,7 +383,7 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) { size_t avail_bytes = 0; uint64_t default_device_msec = OS::get_singleton()->get_ticks_msec(); - while (!ad->exit_thread) { + while (!ad->exit_thread.is_set()) { size_t read_bytes = 0; size_t written_bytes = 0; @@ -392,7 +391,7 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) { ad->lock(); ad->start_counting_ticks(); - if (!ad->active) { + if (!ad->active.is_set()) { ad->samples_out.fill(0); } else { ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw()); @@ -462,8 +461,8 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) { err = ad->init_device(); if (err != OK) { - ad->active = false; - ad->exit_thread = true; + ad->active.clear(); + ad->exit_thread.set(); break; } } @@ -501,8 +500,8 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) { Error err = ad->init_device(); if (err != OK) { ERR_PRINT("PulseAudio: init_device error"); - ad->active = false; - ad->exit_thread = true; + ad->active.clear(); + ad->exit_thread.set(); break; } @@ -555,8 +554,8 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) { err = ad->capture_init_device(); if (err != OK) { - ad->active = false; - ad->exit_thread = true; + ad->active.clear(); + ad->exit_thread.set(); break; } } @@ -571,12 +570,10 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) { OS::get_singleton()->delay_usec(1000); } } - - ad->thread_exited = true; } void AudioDriverPulseAudio::start() { - active = true; + active.set(); } int AudioDriverPulseAudio::get_mix_rate() const { @@ -599,7 +596,7 @@ void AudioDriverPulseAudio::pa_sinklist_cb(pa_context *c, const pa_sink_info *l, ad->pa_status++; } -Array AudioDriverPulseAudio::get_device_list() { +PackedStringArray AudioDriverPulseAudio::get_device_list() { pa_devices.clear(); pa_devices.push_back("Default"); @@ -661,7 +658,7 @@ void AudioDriverPulseAudio::finish() { return; } - exit_thread = true; + exit_thread.set(); thread.wait_to_finish(); finish_device(); @@ -681,7 +678,7 @@ void AudioDriverPulseAudio::finish() { Error AudioDriverPulseAudio::capture_init_device() { // If there is a specified device check that it is really present if (capture_device_name != "Default") { - Array list = capture_get_device_list(); + PackedStringArray list = capture_get_device_list(); if (list.find(capture_device_name) == -1) { capture_device_name = "Default"; capture_new_device = "Default"; @@ -785,7 +782,7 @@ void AudioDriverPulseAudio::pa_sourcelist_cb(pa_context *c, const pa_source_info ad->pa_status++; } -Array AudioDriverPulseAudio::capture_get_device_list() { +PackedStringArray AudioDriverPulseAudio::capture_get_device_list() { pa_rec_devices.clear(); pa_rec_devices.push_back("Default"); diff --git a/drivers/pulseaudio/audio_driver_pulseaudio.h b/drivers/pulseaudio/audio_driver_pulseaudio.h index af96489972..85e328b49f 100644 --- a/drivers/pulseaudio/audio_driver_pulseaudio.h +++ b/drivers/pulseaudio/audio_driver_pulseaudio.h @@ -35,6 +35,7 @@ #include "core/os/mutex.h" #include "core/os/thread.h" +#include "core/templates/safe_refcount.h" #include "servers/audio_server.h" #include "pulse-so_wrap.h" @@ -67,12 +68,11 @@ class AudioDriverPulseAudio : public AudioDriver { int channels = 0; int pa_ready = 0; int pa_status = 0; - Array pa_devices; - Array pa_rec_devices; + PackedStringArray pa_devices; + PackedStringArray pa_rec_devices; - bool active = false; - bool thread_exited = false; - mutable bool exit_thread = false; + SafeFlag active; + SafeFlag exit_thread; float latency = 0; @@ -103,11 +103,11 @@ public: virtual int get_mix_rate() const; virtual SpeakerMode get_speaker_mode() const; - virtual Array get_device_list(); + virtual PackedStringArray get_device_list(); virtual String get_device(); virtual void set_device(String device); - virtual Array capture_get_device_list(); + virtual PackedStringArray capture_get_device_list(); virtual void capture_set_device(const String &p_name); virtual String capture_get_device(); diff --git a/drivers/unix/dir_access_unix.cpp b/drivers/unix/dir_access_unix.cpp index b8b72b8d30..55ea952696 100644 --- a/drivers/unix/dir_access_unix.cpp +++ b/drivers/unix/dir_access_unix.cpp @@ -69,7 +69,7 @@ bool DirAccessUnix::file_exists(String p_file) { GLOBAL_LOCK_FUNCTION if (p_file.is_relative_path()) { - p_file = current_dir.plus_file(p_file); + p_file = current_dir.path_join(p_file); } p_file = fix_path(p_file); @@ -88,7 +88,7 @@ bool DirAccessUnix::dir_exists(String p_dir) { GLOBAL_LOCK_FUNCTION if (p_dir.is_relative_path()) { - p_dir = get_current_dir().plus_file(p_dir); + p_dir = get_current_dir().path_join(p_dir); } p_dir = fix_path(p_dir); @@ -103,7 +103,7 @@ bool DirAccessUnix::is_readable(String p_dir) { GLOBAL_LOCK_FUNCTION if (p_dir.is_relative_path()) { - p_dir = get_current_dir().plus_file(p_dir); + p_dir = get_current_dir().path_join(p_dir); } p_dir = fix_path(p_dir); @@ -114,7 +114,7 @@ bool DirAccessUnix::is_writable(String p_dir) { GLOBAL_LOCK_FUNCTION if (p_dir.is_relative_path()) { - p_dir = get_current_dir().plus_file(p_dir); + p_dir = get_current_dir().path_join(p_dir); } p_dir = fix_path(p_dir); @@ -123,7 +123,7 @@ bool DirAccessUnix::is_writable(String p_dir) { uint64_t DirAccessUnix::get_modified_time(String p_file) { if (p_file.is_relative_path()) { - p_file = current_dir.plus_file(p_file); + p_file = current_dir.path_join(p_file); } p_file = fix_path(p_file); @@ -159,7 +159,7 @@ String DirAccessUnix::get_next() { // known if it points to a directory. stat() will resolve the link // for us. if (entry->d_type == DT_UNKNOWN || entry->d_type == DT_LNK) { - String f = current_dir.plus_file(fname); + String f = current_dir.path_join(fname); struct stat flags; if (stat(f.utf8().get_data(), &flags) == 0) { @@ -315,7 +315,7 @@ Error DirAccessUnix::make_dir(String p_dir) { GLOBAL_LOCK_FUNCTION if (p_dir.is_relative_path()) { - p_dir = get_current_dir().plus_file(p_dir); + p_dir = get_current_dir().path_join(p_dir); } p_dir = fix_path(p_dir); @@ -350,7 +350,7 @@ Error DirAccessUnix::change_dir(String p_dir) { // try_dir is the directory we are trying to change into String try_dir = ""; if (p_dir.is_relative_path()) { - String next_dir = current_dir.plus_file(p_dir); + String next_dir = current_dir.path_join(p_dir); next_dir = next_dir.simplify_path(); try_dir = next_dir; } else { @@ -394,13 +394,13 @@ String DirAccessUnix::get_current_dir(bool p_include_drive) const { Error DirAccessUnix::rename(String p_path, String p_new_path) { if (p_path.is_relative_path()) { - p_path = get_current_dir().plus_file(p_path); + p_path = get_current_dir().path_join(p_path); } p_path = fix_path(p_path); if (p_new_path.is_relative_path()) { - p_new_path = get_current_dir().plus_file(p_new_path); + p_new_path = get_current_dir().path_join(p_new_path); } p_new_path = fix_path(p_new_path); @@ -410,7 +410,7 @@ Error DirAccessUnix::rename(String p_path, String p_new_path) { Error DirAccessUnix::remove(String p_path) { if (p_path.is_relative_path()) { - p_path = get_current_dir().plus_file(p_path); + p_path = get_current_dir().path_join(p_path); } p_path = fix_path(p_path); @@ -429,7 +429,7 @@ Error DirAccessUnix::remove(String p_path) { bool DirAccessUnix::is_link(String p_file) { if (p_file.is_relative_path()) { - p_file = get_current_dir().plus_file(p_file); + p_file = get_current_dir().path_join(p_file); } p_file = fix_path(p_file); @@ -444,7 +444,7 @@ bool DirAccessUnix::is_link(String p_file) { String DirAccessUnix::read_link(String p_file) { if (p_file.is_relative_path()) { - p_file = get_current_dir().plus_file(p_file); + p_file = get_current_dir().path_join(p_file); } p_file = fix_path(p_file); @@ -461,7 +461,7 @@ String DirAccessUnix::read_link(String p_file) { Error DirAccessUnix::create_link(String p_source, String p_target) { if (p_target.is_relative_path()) { - p_target = get_current_dir().plus_file(p_target); + p_target = get_current_dir().path_join(p_target); } p_source = fix_path(p_source); diff --git a/drivers/unix/dir_access_unix.h b/drivers/unix/dir_access_unix.h index 18f435f942..f5dca7c282 100644 --- a/drivers/unix/dir_access_unix.h +++ b/drivers/unix/dir_access_unix.h @@ -52,39 +52,39 @@ protected: virtual bool is_hidden(const String &p_name); public: - virtual Error list_dir_begin(); ///< This starts dir listing - virtual String get_next(); - virtual bool current_is_dir() const; - virtual bool current_is_hidden() const; + virtual Error list_dir_begin() override; ///< This starts dir listing + virtual String get_next() override; + virtual bool current_is_dir() const override; + virtual bool current_is_hidden() const override; - virtual void list_dir_end(); ///< + virtual void list_dir_end() override; ///< - virtual int get_drive_count(); - virtual String get_drive(int p_drive); - virtual int get_current_drive(); - virtual bool drives_are_shortcuts(); + virtual int get_drive_count() override; + virtual String get_drive(int p_drive) override; + virtual int get_current_drive() override; + virtual bool drives_are_shortcuts() override; - virtual Error change_dir(String p_dir); ///< can be relative or absolute, return false on success - virtual String get_current_dir(bool p_include_drive = true) const; ///< return current dir location - virtual Error make_dir(String p_dir); + virtual Error change_dir(String p_dir) override; ///< can be relative or absolute, return false on success + virtual String get_current_dir(bool p_include_drive = true) const override; ///< return current dir location + virtual Error make_dir(String p_dir) override; - virtual bool file_exists(String p_file); - virtual bool dir_exists(String p_dir); - virtual bool is_readable(String p_dir); - virtual bool is_writable(String p_dir); + virtual bool file_exists(String p_file) override; + virtual bool dir_exists(String p_dir) override; + virtual bool is_readable(String p_dir) override; + virtual bool is_writable(String p_dir) override; virtual uint64_t get_modified_time(String p_file); - virtual Error rename(String p_path, String p_new_path); - virtual Error remove(String p_path); + virtual Error rename(String p_path, String p_new_path) override; + virtual Error remove(String p_path) override; - virtual bool is_link(String p_file); - virtual String read_link(String p_file); - virtual Error create_link(String p_source, String p_target); + virtual bool is_link(String p_file) override; + virtual String read_link(String p_file) override; + virtual Error create_link(String p_source, String p_target) override; - virtual uint64_t get_space_left(); + virtual uint64_t get_space_left() override; - virtual String get_filesystem_type() const; + virtual String get_filesystem_type() const override; DirAccessUnix(); ~DirAccessUnix(); diff --git a/drivers/unix/file_access_unix.h b/drivers/unix/file_access_unix.h index d61fc08f57..297c34e454 100644 --- a/drivers/unix/file_access_unix.h +++ b/drivers/unix/file_access_unix.h @@ -54,33 +54,33 @@ class FileAccessUnix : public FileAccess { public: static CloseNotificationFunc close_notification_func; - virtual Error _open(const String &p_path, int p_mode_flags); ///< open a file - virtual bool is_open() const; ///< true when file is open + virtual Error _open(const String &p_path, int p_mode_flags) override; ///< open a file + virtual bool is_open() const override; ///< true when file is open - virtual String get_path() const; /// returns the path for the current open file - virtual String get_path_absolute() const; /// returns the absolute path for the current open file + virtual String get_path() const override; /// returns the path for the current open file + virtual String get_path_absolute() const override; /// returns the absolute path for the current open file - virtual void seek(uint64_t p_position); ///< seek to a given position - virtual void seek_end(int64_t p_position = 0); ///< seek from the end of file - virtual uint64_t get_position() const; ///< get position in the file - virtual uint64_t get_length() const; ///< get size of the file + virtual void seek(uint64_t p_position) override; ///< seek to a given position + virtual void seek_end(int64_t p_position = 0) override; ///< seek from the end of file + virtual uint64_t get_position() const override; ///< get position in the file + virtual uint64_t get_length() const override; ///< get size of the file - virtual bool eof_reached() const; ///< reading passed EOF + virtual bool eof_reached() const override; ///< reading passed EOF - virtual uint8_t get_8() const; ///< get a byte - virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const; + virtual uint8_t get_8() const override; ///< get a byte + virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const override; - virtual Error get_error() const; ///< get last error + virtual Error get_error() const override; ///< get last error - virtual void flush(); - virtual void store_8(uint8_t p_dest); ///< store a byte - virtual void store_buffer(const uint8_t *p_src, uint64_t p_length); ///< store an array of bytes + virtual void flush() override; + virtual void store_8(uint8_t p_dest) override; ///< store a byte + virtual void store_buffer(const uint8_t *p_src, uint64_t p_length) override; ///< store an array of bytes - virtual bool file_exists(const String &p_path); ///< return true if a file exists + virtual bool file_exists(const String &p_path) override; ///< return true if a file exists - virtual uint64_t _get_modified_time(const String &p_file); - virtual uint32_t _get_unix_permissions(const String &p_file); - virtual Error _set_unix_permissions(const String &p_file, uint32_t p_permissions); + virtual uint64_t _get_modified_time(const String &p_file) override; + virtual uint32_t _get_unix_permissions(const String &p_file) override; + virtual Error _set_unix_permissions(const String &p_file, uint32_t p_permissions) override; FileAccessUnix() {} virtual ~FileAccessUnix(); diff --git a/drivers/unix/ip_unix.cpp b/drivers/unix/ip_unix.cpp index 2deeb79957..0dc2efedc1 100644 --- a/drivers/unix/ip_unix.cpp +++ b/drivers/unix/ip_unix.cpp @@ -95,12 +95,12 @@ void IPUnix::_resolve_hostname(List<IPAddress> &r_addresses, const String &p_hos int s = getaddrinfo(p_hostname.utf8().get_data(), nullptr, &hints, &result); if (s != 0) { - ERR_PRINT("getaddrinfo failed! Cannot resolve hostname."); + print_verbose("getaddrinfo failed! Cannot resolve hostname."); return; } if (result == nullptr || result->ai_addr == nullptr) { - ERR_PRINT("Invalid response from getaddrinfo"); + print_verbose("Invalid response from getaddrinfo"); if (result) { freeaddrinfo(result); } diff --git a/drivers/unix/net_socket_posix.cpp b/drivers/unix/net_socket_posix.cpp index f172f31b24..86adf33d62 100644 --- a/drivers/unix/net_socket_posix.cpp +++ b/drivers/unix/net_socket_posix.cpp @@ -50,7 +50,7 @@ #include <netinet/in.h> #include <sys/socket.h> -#ifdef JAVASCRIPT_ENABLED +#ifdef WEB_ENABLED #include <arpa/inet.h> #endif diff --git a/drivers/unix/os_unix.cpp b/drivers/unix/os_unix.cpp index 5bf14056ab..384f46c8df 100644 --- a/drivers/unix/os_unix.cpp +++ b/drivers/unix/os_unix.cpp @@ -92,7 +92,7 @@ static void _setup_clock() { _clock_start = mach_absolute_time() * _clock_scale; } #else -#if defined(CLOCK_MONOTONIC_RAW) && !defined(JAVASCRIPT_ENABLED) // This is a better clock on Linux. +#if defined(CLOCK_MONOTONIC_RAW) && !defined(WEB_ENABLED) // This is a better clock on Linux. #define GODOT_CLOCK CLOCK_MONOTONIC_RAW #else #define GODOT_CLOCK CLOCK_MONOTONIC @@ -292,7 +292,7 @@ uint64_t OS_Unix::get_ticks_usec() const { Error OS_Unix::execute(const String &p_path, const List<String> &p_arguments, String *r_pipe, int *r_exitcode, bool read_stderr, Mutex *p_pipe_mutex, bool p_open_console) { #ifdef __EMSCRIPTEN__ // Don't compile this code at all to avoid undefined references. - // Actual virtual call goes to OS_JavaScript. + // Actual virtual call goes to OS_Web. ERR_FAIL_V(ERR_BUG); #else if (r_pipe) { @@ -366,7 +366,7 @@ Error OS_Unix::execute(const String &p_path, const List<String> &p_arguments, St Error OS_Unix::create_process(const String &p_path, const List<String> &p_arguments, ProcessID *r_child_id, bool p_open_console) { #ifdef __EMSCRIPTEN__ // Don't compile this code at all to avoid undefined references. - // Actual virtual call goes to OS_JavaScript. + // Actual virtual call goes to OS_Web. ERR_FAIL_V(ERR_BUG); #else pid_t pid = fork(); @@ -454,12 +454,12 @@ Error OS_Unix::open_dynamic_library(const String p_path, void *&p_library_handle if (!FileAccess::exists(path)) { // This code exists so GDExtension can load .so files from within the executable path. - path = get_executable_path().get_base_dir().plus_file(p_path.get_file()); + path = get_executable_path().get_base_dir().path_join(p_path.get_file()); } if (!FileAccess::exists(path)) { // This code exists so GDExtension can load .so files from a standard unix location. - path = get_executable_path().get_base_dir().plus_file("../lib").plus_file(p_path.get_file()); + path = get_executable_path().get_base_dir().path_join("../lib").path_join(p_path.get_file()); } p_library_handle = dlopen(path.utf8().get_data(), RTLD_NOW); @@ -526,13 +526,13 @@ String OS_Unix::get_user_data_dir() const { if (custom_dir.is_empty()) { custom_dir = appname; } - return get_data_path().plus_file(custom_dir); + return get_data_path().path_join(custom_dir); } else { - return get_data_path().plus_file(get_godot_dir_name()).plus_file("app_userdata").plus_file(appname); + return get_data_path().path_join(get_godot_dir_name()).path_join("app_userdata").path_join(appname); } } - return get_data_path().plus_file(get_godot_dir_name()).plus_file("app_userdata").plus_file("[unnamed project]"); + return get_data_path().path_join(get_godot_dir_name()).path_join("app_userdata").path_join("[unnamed project]"); } String OS_Unix::get_executable_path() const { diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp index 0979ae9e16..6a88f2c442 100644 --- a/drivers/vulkan/rendering_device_vulkan.cpp +++ b/drivers/vulkan/rendering_device_vulkan.cpp @@ -46,7 +46,7 @@ static const uint32_t SMALL_ALLOCATION_MAX_SIZE = 4096; -// Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values) +// Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values). RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, uint32_t p_post_barrier) { Buffer *buffer = nullptr; if (vertex_buffer_owner.owns(p_buffer)) { @@ -108,8 +108,8 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID } static void update_external_dependency_for_store(VkSubpassDependency2KHR &dependency, bool is_sampled, bool is_storage, bool is_depth) { - // Transitioning from write to read, protect the shaders that may use this next - // Allow for copies/image layout transitions + // Transitioning from write to read, protect the shaders that may use this next. + // Allow for copies/image layout transitions. dependency.dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT; dependency.dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT; @@ -125,7 +125,7 @@ static void update_external_dependency_for_store(VkSubpassDependency2KHR &depend } if (is_depth) { - // Depth resources have additional stages that may be interested in them + // Depth resources have additional stages that may be interested in them. dependency.dstStageMask |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; dependency.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; } @@ -146,7 +146,7 @@ void RenderingDeviceVulkan::_add_dependency(RID p_id, RID p_depends_on) { } void RenderingDeviceVulkan::_free_dependencies(RID p_id) { - //direct dependencies must be freed + // Direct dependencies must be freed. HashMap<RID, HashSet<RID>>::Iterator E = dependency_map.find(p_id); if (E) { @@ -156,7 +156,7 @@ void RenderingDeviceVulkan::_free_dependencies(RID p_id) { dependency_map.remove(E); } - //reverse dependencies must be unreferenced + // Reverse dependencies must be unreferenced. E = reverse_dependency_map.find(p_id); if (E) { @@ -860,7 +860,7 @@ uint32_t RenderingDeviceVulkan::get_image_format_pixel_size(DataFormat p_format) case DATA_FORMAT_D24_UNORM_S8_UINT: return 4; case DATA_FORMAT_D32_SFLOAT_S8_UINT: - return 5; //? + return 5; // ? case DATA_FORMAT_BC1_RGB_UNORM_BLOCK: case DATA_FORMAT_BC1_RGB_SRGB_BLOCK: case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK: @@ -995,7 +995,7 @@ void RenderingDeviceVulkan::get_compressed_image_format_block_dimensions(DataFor case DATA_FORMAT_EAC_R11_SNORM_BLOCK: case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK: case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK: - case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: //again, not sure about astc + case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: // Again, not sure about astc. case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK: case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK: case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK: @@ -1073,7 +1073,7 @@ uint32_t RenderingDeviceVulkan::get_compressed_image_format_block_byte_size(Data case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK: case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK: return 16; - case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: //again, not sure about astc + case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: // Again, not sure about astc. case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK: case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK: case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK: @@ -1101,7 +1101,7 @@ uint32_t RenderingDeviceVulkan::get_compressed_image_format_block_byte_size(Data case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK: case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK: case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK: - return 8; //wrong + return 8; // Wrong. default: { } } @@ -1110,7 +1110,7 @@ uint32_t RenderingDeviceVulkan::get_compressed_image_format_block_byte_size(Data uint32_t RenderingDeviceVulkan::get_compressed_image_format_pixel_rshift(DataFormat p_format) { switch (p_format) { - case DATA_FORMAT_BC1_RGB_UNORM_BLOCK: //these formats are half byte size, so rshift is 1 + case DATA_FORMAT_BC1_RGB_UNORM_BLOCK: // These formats are half byte size, so rshift is 1. case DATA_FORMAT_BC1_RGB_SRGB_BLOCK: case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK: case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK: @@ -1184,7 +1184,7 @@ uint32_t RenderingDeviceVulkan::get_image_format_required_size(DataFormat p_form } uint32_t RenderingDeviceVulkan::get_image_required_mipmaps(uint32_t p_width, uint32_t p_height, uint32_t p_depth) { - //formats and block size don't really matter here since they can all go down to 1px (even if block is larger) + // Formats and block size don't really matter here since they can all go down to 1px (even if block is larger). uint32_t w = p_width; uint32_t h = p_height; uint32_t d = p_depth; @@ -1402,16 +1402,16 @@ Error RenderingDeviceVulkan::_insert_staging_block() { } Error RenderingDeviceVulkan::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment) { - //determine a block to use + // Determine a block to use. r_alloc_size = p_amount; while (true) { r_alloc_offset = 0; - //see if we can use current block + // See if we can use current block. if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) { - //we used this block this frame, let's see if there is still room + // We used this block this frame, let's see if there is still room. uint32_t write_from = staging_buffer_blocks[staging_buffer_current].fill_amount; @@ -1425,107 +1425,107 @@ Error RenderingDeviceVulkan::_staging_buffer_allocate(uint32_t p_amount, uint32_ int32_t available_bytes = int32_t(staging_buffer_block_size) - int32_t(write_from); if ((int32_t)p_amount < available_bytes) { - //all is good, we should be ok, all will fit + // All is good, we should be ok, all will fit. r_alloc_offset = write_from; } else if (p_can_segment && available_bytes >= (int32_t)p_required_align) { - //ok all won't fit but at least we can fit a chunkie - //all is good, update what needs to be written to + // Ok all won't fit but at least we can fit a chunkie. + // All is good, update what needs to be written to. r_alloc_offset = write_from; r_alloc_size = available_bytes - (available_bytes % p_required_align); } else { - //can't fit it into this buffer. - //will need to try next buffer + // Can't fit it into this buffer. + // Will need to try next buffer. staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size(); - // before doing anything, though, let's check that we didn't manage to fill all blocks - // possible in a single frame + // Before doing anything, though, let's check that we didn't manage to fill all blocks. + // Possible in a single frame. if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) { - //guess we did.. ok, let's see if we can insert a new block.. + // Guess we did.. ok, let's see if we can insert a new block. if ((uint64_t)staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) { - //we can, so we are safe + // We can, so we are safe. Error err = _insert_staging_block(); if (err) { return err; } - //claim for this frame + // Claim for this frame. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn; } else { // Ok, worst case scenario, all the staging buffers belong to this frame // and this frame is not even done. - // If this is the main thread, it means the user is likely loading a lot of resources at once, - // otherwise, the thread should just be blocked until the next frame (currently unimplemented) + // If this is the main thread, it means the user is likely loading a lot of resources at once,. + // Otherwise, the thread should just be blocked until the next frame (currently unimplemented). - if (false) { //separate thread from render + if (false) { // Separate thread from render. //block_until_next_frame() continue; } else { - //flush EVERYTHING including setup commands. IF not immediate, also need to flush the draw commands + // Flush EVERYTHING including setup commands. IF not immediate, also need to flush the draw commands. _flush(true); - //clear the whole staging buffer + // Clear the whole staging buffer. for (int i = 0; i < staging_buffer_blocks.size(); i++) { staging_buffer_blocks.write[i].frame_used = 0; staging_buffer_blocks.write[i].fill_amount = 0; } - //claim current + // Claim current. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn; } } } else { - //not from current frame, so continue and try again + // Not from current frame, so continue and try again. continue; } } } else if (staging_buffer_blocks[staging_buffer_current].frame_used <= frames_drawn - frame_count) { - //this is an old block, which was already processed, let's reuse + // This is an old block, which was already processed, let's reuse. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn; staging_buffer_blocks.write[staging_buffer_current].fill_amount = 0; } else { - //this block may still be in use, let's not touch it unless we have to, so.. can we create a new one? + // This block may still be in use, let's not touch it unless we have to, so.. can we create a new one? if ((uint64_t)staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) { - //we are still allowed to create a new block, so let's do that and insert it for current pos + // We are still allowed to create a new block, so let's do that and insert it for current pos. Error err = _insert_staging_block(); if (err) { return err; } - //claim for this frame + // Claim for this frame. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn; } else { - // oops, we are out of room and we can't create more. - // let's flush older frames. + // Oops, we are out of room and we can't create more. + // Let's flush older frames. // The logic here is that if a game is loading a lot of data from the main thread, it will need to be stalled anyway. // If loading from a separate thread, we can block that thread until next frame when more room is made (not currently implemented, though). if (false) { - //separate thread from render + // Separate thread from render. //block_until_next_frame() - continue; //and try again + continue; // And try again. } else { _flush(false); for (int i = 0; i < staging_buffer_blocks.size(); i++) { - //clear all blocks but the ones from this frame + // Clear all blocks but the ones from this frame. int block_idx = (i + staging_buffer_current) % staging_buffer_blocks.size(); if (staging_buffer_blocks[block_idx].frame_used == frames_drawn) { - break; //ok, we reached something from this frame, abort + break; // Ok, we reached something from this frame, abort. } staging_buffer_blocks.write[block_idx].frame_used = 0; staging_buffer_blocks.write[block_idx].fill_amount = 0; } - //claim for current frame + // Claim for current frame. staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn; } } } - //all was good, break + // All was good, break. break; } @@ -1535,7 +1535,7 @@ Error RenderingDeviceVulkan::_staging_buffer_allocate(uint32_t p_amount, uint32_ } Error RenderingDeviceVulkan::_buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer, uint32_t p_required_align) { - //submitting may get chunked for various reasons, so convert this to a task + // Submitting may get chunked for various reasons, so convert this to a task. size_t to_submit = p_data_size; size_t submit_from = 0; @@ -1548,7 +1548,7 @@ Error RenderingDeviceVulkan::_buffer_update(Buffer *p_buffer, size_t p_offset, c return err; } - //map staging buffer (It's CPU and coherent) + // Map staging buffer (It's CPU and coherent). void *data_ptr = nullptr; { @@ -1556,12 +1556,12 @@ Error RenderingDeviceVulkan::_buffer_update(Buffer *p_buffer, size_t p_offset, c ERR_FAIL_COND_V_MSG(vkerr, ERR_CANT_CREATE, "vmaMapMemory failed with error " + itos(vkerr) + "."); } - //copy to staging buffer + // Copy to staging buffer. memcpy(((uint8_t *)data_ptr) + block_write_offset, p_data + submit_from, block_write_amount); - //unmap + // Unmap. vmaUnmapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation); - //insert a command to copy this + // Insert a command to copy this. VkBufferCopy region; region.srcOffset = block_write_offset; @@ -1587,13 +1587,13 @@ void RenderingDeviceVulkan::_memory_barrier(VkPipelineStageFlags p_src_stage_mas mem_barrier.dstAccessMask = p_dst_sccess; if (p_src_stage_mask == 0 || p_dst_stage_mask == 0) { - return; //no barrier, since this is invalid + return; // No barrier, since this is invalid. } vkCmdPipelineBarrier(p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, p_src_stage_mask, p_dst_stage_mask, 0, 1, &mem_barrier, 0, nullptr, 0, nullptr); } void RenderingDeviceVulkan::_full_barrier(bool p_sync_with_draw) { - //used for debug + // Used for debug. _memory_barrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | @@ -1662,8 +1662,8 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T #ifndef ANDROID_ENABLED // vkCreateImage fails with format list on Android (VK_ERROR_OUT_OF_HOST_MEMORY) - VkImageFormatListCreateInfoKHR format_list_create_info; //keep out of the if, needed for creation - Vector<VkFormat> allowed_formats; //keep out of the if, needed for creation + VkImageFormatListCreateInfoKHR format_list_create_info; // Keep out of the if, needed for creation. + Vector<VkFormat> allowed_formats; // Keep out of the if, needed for creation. #endif if (p_format.shareable_formats.size()) { image_create_info.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; @@ -1733,10 +1733,10 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T ERR_FAIL_INDEX_V(p_format.samples, TEXTURE_SAMPLES_MAX, RID()); - image_create_info.samples = rasterization_sample_count[p_format.samples]; + image_create_info.samples = _ensure_supported_sample_count(p_format.samples); image_create_info.tiling = (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL; - //usage + // Usage. image_create_info.usage = 0; if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) { @@ -1800,7 +1800,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T } { - //validate that this image is supported for the intended use + // Validate that this image is supported for the intended use. VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(context->get_physical_device(), image_create_info.format, &properties); VkFormatFeatureFlags flags; @@ -1841,7 +1841,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T } } - //some view validation + // Some view validation. if (p_view.format_override != DATA_FORMAT_MAX) { ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID()); @@ -1851,7 +1851,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T ERR_FAIL_INDEX_V(p_view.swizzle_b, TEXTURE_SWIZZLE_MAX, RID()); ERR_FAIL_INDEX_V(p_view.swizzle_a, TEXTURE_SWIZZLE_MAX, RID()); - //allocate memory + // Allocate memory. uint32_t width, height; uint32_t image_size = get_image_format_required_size(p_format.format, p_format.width, p_format.height, p_format.depth, p_format.mipmaps, &width, &height); @@ -1884,23 +1884,24 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T texture.mipmaps = image_create_info.mipLevels; texture.base_mipmap = 0; texture.base_layer = 0; + texture.is_resolve_buffer = p_format.is_resolve_buffer; texture.usage_flags = p_format.usage_bits; texture.samples = p_format.samples; texture.allowed_shared_formats = p_format.shareable_formats; - //set base layout based on usage priority + // Set base layout based on usage priority. if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) { - //first priority, readable + // First priority, readable. texture.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } else if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT) { - //second priority, storage + // Second priority, storage. texture.layout = VK_IMAGE_LAYOUT_GENERAL; } else if (p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) { - //third priority, color or depth + // Third priority, color or depth. texture.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; @@ -1925,7 +1926,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T texture.bound = false; - //create view + // Create view. VkImageViewCreateInfo image_view_create_info; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; @@ -1982,7 +1983,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T ERR_FAIL_V_MSG(RID(), "vkCreateImageView failed with error " + itos(err) + "."); } - //barrier to set layout + // Barrier to set layout. { VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; @@ -2004,6 +2005,9 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T } RID id = texture_owner.make_rid(texture); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif if (p_data.size()) { for (uint32_t i = 0; i < image_create_info.arrayLayers; i++) { @@ -2019,13 +2023,13 @@ RID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, RID Texture *src_texture = texture_owner.get_or_null(p_with_texture); ERR_FAIL_COND_V(!src_texture, RID()); - if (src_texture->owner.is_valid()) { //ahh this is a share + if (src_texture->owner.is_valid()) { // Ahh this is a share. p_with_texture = src_texture->owner; src_texture = texture_owner.get_or_null(src_texture->owner); - ERR_FAIL_COND_V(!src_texture, RID()); //this is a bug + ERR_FAIL_COND_V(!src_texture, RID()); // This is a bug. } - //create view + // Create view. Texture texture = *src_texture; @@ -2086,7 +2090,7 @@ RID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, RID usage_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO; usage_info.pNext = nullptr; if (p_view.format_override != DATA_FORMAT_MAX) { - //need to validate usage with vulkan + // Need to validate usage with vulkan. usage_info.usage = 0; @@ -2133,6 +2137,9 @@ RID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, RID texture.owner = p_with_texture; RID id = texture_owner.make_rid(texture); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif _add_dependency(id, p_with_texture); return id; @@ -2145,9 +2152,9 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat Texture texture; texture.image = image; - // if we leave texture.allocation as a nullptr, would that be enough to detect we don't "own" the image? - // also leave texture.allocation_info alone - // we'll set texture.view later on + // If we leave texture.allocation as a nullptr, would that be enough to detect we don't "own" the image? + // Also leave texture.allocation_info alone. + // We'll set texture.view later on. texture.type = p_type; texture.format = p_format; texture.samples = p_samples; @@ -2155,14 +2162,14 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat texture.height = p_height; texture.depth = p_depth; texture.layers = p_layers; - texture.mipmaps = 0; // maybe make this settable too? + texture.mipmaps = 0; // Maybe make this settable too? texture.usage_flags = p_flags; texture.base_mipmap = 0; texture.base_layer = 0; texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_UNORM); texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_SRGB); - // Do we need to do something with texture.layout ? + // Do we need to do something with texture.layout? if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) { texture.read_aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT; @@ -2176,7 +2183,7 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat texture.barrier_aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT; } - // Create a view for us to use + // Create a view for us to use. VkImageViewCreateInfo image_view_create_info; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; @@ -2207,7 +2214,7 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat VK_COMPONENT_SWIZZLE_A }; - // hardcode for now, maybe make this settable from outside.. + // Hardcode for now, maybe make this settable from outside. image_view_create_info.components.r = component_swizzles[TEXTURE_SWIZZLE_R]; image_view_create_info.components.g = component_swizzles[TEXTURE_SWIZZLE_G]; image_view_create_info.components.b = component_swizzles[TEXTURE_SWIZZLE_B]; @@ -2230,7 +2237,7 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat ERR_FAIL_V_MSG(RID(), "vkCreateImageView failed with error " + itos(err) + "."); } - //barrier to set layout + // Barrier to set layout. { VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; @@ -2252,6 +2259,9 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat } RID id = texture_owner.make_rid(texture); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif return id; } @@ -2262,10 +2272,10 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p Texture *src_texture = texture_owner.get_or_null(p_with_texture); ERR_FAIL_COND_V(!src_texture, RID()); - if (src_texture->owner.is_valid()) { //ahh this is a share + if (src_texture->owner.is_valid()) { // Ahh this is a share. p_with_texture = src_texture->owner; src_texture = texture_owner.get_or_null(src_texture->owner); - ERR_FAIL_COND_V(!src_texture, RID()); //this is a bug + ERR_FAIL_COND_V(!src_texture, RID()); // This is a bug. } ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_CUBEMAP && (src_texture->type != TEXTURE_TYPE_CUBE && src_texture->type != TEXTURE_TYPE_CUBE_ARRAY), RID(), @@ -2277,7 +2287,7 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_2D_ARRAY && (src_texture->type != TEXTURE_TYPE_2D_ARRAY), RID(), "Can only create an array slice from a 2D array mipmap"); - //create view + // Create view. ERR_FAIL_UNSIGNED_INDEX_V(p_mipmap, src_texture->mipmaps, RID()); ERR_FAIL_COND_V(p_mipmap + p_mipmaps > src_texture->mipmaps, RID()); @@ -2377,6 +2387,9 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p texture.owner = p_with_texture; RID id = texture_owner.make_rid(texture); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif _add_dependency(id, p_with_texture); return id; @@ -2414,7 +2427,7 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co if (texture->owner != RID()) { p_texture = texture->owner; texture = texture_owner.get_or_null(texture->owner); - ERR_FAIL_COND_V(!texture, ERR_BUG); //this is a bug + ERR_FAIL_COND_V(!texture, ERR_BUG); // This is a bug. } ERR_FAIL_COND_V_MSG(texture->bound, ERR_CANT_ACQUIRE_RESOURCE, @@ -2436,7 +2449,7 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co if (required_align == 1) { required_align = get_image_format_pixel_size(texture->format); } - if ((required_align % 4) != 0) { //alignment rules are really strange + if ((required_align % 4) != 0) { // Alignment rules are really strange. required_align *= 4; } @@ -2449,7 +2462,7 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co VkCommandBuffer command_buffer = p_use_setup_queue ? frames[frame].setup_command_buffer : frames[frame].draw_command_buffer; - //barrier to transfer + // Barrier to transfer. { VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; @@ -2483,7 +2496,7 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co const uint8_t *read_ptr_mipmap = r + mipmap_offset; image_size = image_total - mipmap_offset; - for (uint32_t z = 0; z < depth; z++) { //for 3D textures, depth may be > 0 + for (uint32_t z = 0; z < depth; z++) { // For 3D textures, depth may be > 0. const uint8_t *read_ptr = read_ptr_mipmap + image_size * z / depth; @@ -2505,7 +2518,7 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co uint8_t *write_ptr; - { //map + { // Map. void *data_ptr = nullptr; VkResult vkerr = vmaMapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation, &data_ptr); ERR_FAIL_COND_V_MSG(vkerr, ERR_CANT_CREATE, "vmaMapMemory failed with error " + itos(vkerr) + "."); @@ -2520,11 +2533,11 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co ERR_FAIL_COND_V(region_h % block_h, ERR_BUG); if (block_w != 1 || block_h != 1) { - //compressed image (blocks) - //must copy a block region + // Compressed image (blocks). + // Must copy a block region. uint32_t block_size = get_compressed_image_format_block_byte_size(texture->format); - //re-create current variables in blocky format + // Re-create current variables in blocky format. uint32_t xb = x / block_w; uint32_t yb = y / block_h; uint32_t wb = width / block_w; @@ -2533,19 +2546,19 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co uint32_t region_hb = region_h / block_h; _copy_region(read_ptr, write_ptr, xb, yb, region_wb, region_hb, wb, block_size); } else { - //regular image (pixels) - //must copy a pixel region + // Regular image (pixels). + // Must copy a pixel region. _copy_region(read_ptr, write_ptr, x, y, region_w, region_h, width, pixel_size); } - { //unmap + { // Unmap. vmaUnmapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation); } VkBufferImageCopy buffer_image_copy; buffer_image_copy.bufferOffset = alloc_offset; - buffer_image_copy.bufferRowLength = 0; //tightly packed - buffer_image_copy.bufferImageHeight = 0; //tightly packed + buffer_image_copy.bufferRowLength = 0; // Tightly packed. + buffer_image_copy.bufferImageHeight = 0; // Tightly packed. buffer_image_copy.imageSubresource.aspectMask = texture->read_aspect_mask; buffer_image_copy.imageSubresource.mipLevel = mm_i; @@ -2572,7 +2585,7 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co logic_height = MAX(1u, logic_height >> 1); } - //barrier to restore layout + // Barrier to restore layout. { uint32_t barrier_flags = 0; uint32_t access_flags = 0; @@ -2659,7 +2672,7 @@ Vector<uint8_t> RenderingDeviceVulkan::_texture_get_data_from_image(Texture *tex const uint8_t *slice_read_ptr = ((uint8_t *)img_mem) + layout.offset + z * layout.depthPitch; if (block_size > 1) { - //compressed + // Compressed. uint32_t line_width = (block_size * (width / blockw)); for (uint32_t y = 0; y < height / blockh; y++) { const uint8_t *rptr = slice_read_ptr + y * layout.rowPitch; @@ -2669,7 +2682,7 @@ Vector<uint8_t> RenderingDeviceVulkan::_texture_get_data_from_image(Texture *tex } } else { - //uncompressed + // Uncompressed. for (uint32_t y = 0; y < height; y++) { const uint8_t *rptr = slice_read_ptr + y * layout.rowPitch; uint8_t *wptr = write_ptr + y * pixel_size * width; @@ -2705,19 +2718,19 @@ Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t ERR_FAIL_COND_V(p_layer >= layer_count, Vector<uint8_t>()); if (tex->usage_flags & TEXTURE_USAGE_CPU_READ_BIT) { - //does not need anything fancy, map and read. + // Does not need anything fancy, map and read. return _texture_get_data_from_image(tex, tex->image, tex->allocation, p_layer); } else { - //compute total image size + // Compute total image size. uint32_t width, height, depth; uint32_t buffer_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, tex->mipmaps, &width, &height, &depth); - //allocate buffer - VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; //makes more sense to retrieve + // Allocate buffer. + VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; // Makes more sense to retrieve. Buffer tmp_buffer; _buffer_allocate(&tmp_buffer, buffer_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_HOST, VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT); - { //Source image barrier + { // Source image barrier. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -2773,7 +2786,7 @@ Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t offset += size; } - { //restore src + { // Restore src. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -2888,9 +2901,9 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; { - //PRE Copy the image + // PRE Copy the image. - { //Source + { // Source. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -2910,7 +2923,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } - { //Dest + { // Dest. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -2931,7 +2944,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } - //COPY + // COPY. { VkImageCopy image_copy_region; @@ -2958,7 +2971,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, vkCmdCopyImage(command_buffer, src_tex->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_tex->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy_region); } - // RESTORE LAYOUT for SRC and DST + // RESTORE LAYOUT for SRC and DST. uint32_t barrier_flags = 0; uint32_t access_flags = 0; @@ -2979,7 +2992,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, barrier_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; } - { //restore src + { // Restore src. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -2999,7 +3012,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } - { //make dst readable + { // Make dst readable. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; @@ -3066,9 +3079,9 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; { - //PRE Copy the image + // PRE Copy the image. - { //Source + { // Source. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -3088,7 +3101,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } - { //Dest + { // Dest. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -3109,7 +3122,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } - //COPY + // COPY. { VkImageResolve image_copy_region; @@ -3136,7 +3149,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID vkCmdResolveImage(command_buffer, src_tex->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_tex->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy_region); } - // RESTORE LAYOUT for SRC and DST + // RESTORE LAYOUT for SRC and DST. uint32_t barrier_flags = 0; uint32_t access_flags = 0; @@ -3157,7 +3170,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID barrier_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; } - { //restore src + { // Restore src. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -3177,7 +3190,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } - { //make dst readable + { // Make dst readable. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; @@ -3230,13 +3243,13 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, VkImageLayout clear_layout = (src_tex->layout == VK_IMAGE_LAYOUT_GENERAL) ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - // NOTE: Perhaps the valid stages/accesses for a given owner should be a property of the owner. (Here and places like _get_buffer_from_owner) + // NOTE: Perhaps the valid stages/accesses for a given owner should be a property of the owner. (Here and places like _get_buffer_from_owner.) const VkPipelineStageFlags valid_texture_stages = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; constexpr VkAccessFlags read_access = VK_ACCESS_SHADER_READ_BIT; constexpr VkAccessFlags read_write_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; const VkAccessFlags valid_texture_access = (src_tex->usage_flags & TEXTURE_USAGE_STORAGE_BIT) ? read_write_access : read_access; - { // Barrier from previous access with optional layout change (see clear_layout logic above) + { // Barrier from previous access with optional layout change (see clear_layout logic above). VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -3272,7 +3285,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, vkCmdClearColorImage(command_buffer, src_tex->image, clear_layout, &clear_color, 1, &range); - { // Barrier to post clear accesses (changing back the layout if needed) + { // Barrier to post clear accesses (changing back the layout if needed). uint32_t barrier_flags = 0; uint32_t access_flags = 0; @@ -3328,7 +3341,7 @@ bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_f _THREAD_SAFE_METHOD_ - //validate that this image is supported for the intended use + // Validate that this image is supported for the intended use. VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(context->get_physical_device(), vulkan_formats[p_format], &properties); VkFormatFeatureFlags flags; @@ -3372,12 +3385,12 @@ bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_f /********************/ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, InitialAction p_initial_action, FinalAction p_final_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, uint32_t p_view_count, Vector<TextureSamples> *r_samples) { - // Set up dependencies from/to external equivalent to the default (implicit) one, and then amend them + // Set up dependencies from/to external equivalent to the default (implicit) one, and then amend them. const VkPipelineStageFlags default_access_mask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | - VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | // From Section 7.1 of Vulkan API Spec v1.1.148 + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | // From Section 7.1 of Vulkan API Spec v1.1.148. VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR; VkPipelineStageFlags reading_stages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT; @@ -3413,27 +3426,27 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF description.pNext = nullptr; description.flags = 0; description.format = vulkan_formats[p_attachments[i].format]; - description.samples = rasterization_sample_count[p_attachments[i].samples]; + description.samples = _ensure_supported_sample_count(p_attachments[i].samples); bool is_sampled = p_attachments[i].usage_flags & TEXTURE_USAGE_SAMPLING_BIT; bool is_storage = p_attachments[i].usage_flags & TEXTURE_USAGE_STORAGE_BIT; bool is_depth = p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; // We can setup a framebuffer where we write to our VRS texture to set it up. - // We make the assumption here that if our texture is actually used as our VRS attachment, - // it is used as such for each subpass. This is fairly certain seeing the restrictions on subpasses. + // We make the assumption here that if our texture is actually used as our VRS attachment. + // It is used as such for each subpass. This is fairly certain seeing the restrictions on subpasses. bool is_vrs = p_attachments[i].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT && i == p_passes[0].vrs_attachment; if (is_vrs) { - // For VRS we only read, there is no writing to this texture + // For VRS we only read, there is no writing to this texture. description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; description.initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD; } else { - // For each UNDEFINED, assume the prior use was a *read*, as we'd be discarding the output of a write - // Also, each UNDEFINED will do an immediate layout transition (write), s.t. we must ensure execution synchronization vs. + // For each UNDEFINED, assume the prior use was a *read*, as we'd be discarding the output of a write. + // Also, each UNDEFINED will do an immediate layout transition (write), s.t. we must ensure execution synchronization vs // the read. If this is a performance issue, one could track the actual last accessor of each resource, adding only that - // stage + // stage. switch (is_depth ? p_initial_depth_action : p_initial_action) { case INITIAL_ACTION_CLEAR_REGION: @@ -3450,7 +3463,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. dependency_from_external.srcStageMask |= reading_stages; } } break; @@ -3467,7 +3480,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. dependency_from_external.srcStageMask |= reading_stages; } } break; @@ -3478,13 +3491,13 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; } else if (p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; dependency_from_external.srcStageMask |= reading_stages; } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. dependency_from_external.srcStageMask |= reading_stages; } } break; @@ -3501,12 +3514,12 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. dependency_from_external.srcStageMask |= reading_stages; } } break; default: { - ERR_FAIL_V(VK_NULL_HANDLE); //should never reach here + ERR_FAIL_V(VK_NULL_HANDLE); // Should never reach here. } } } @@ -3517,7 +3530,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF int last_pass = p_passes.size() - 1; if (is_depth) { - //likely missing depth resolve? + // Likely missing depth resolve? if (p_passes[last_pass].depth_attachment == i) { used_last = true; } @@ -3527,14 +3540,15 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } } else { if (p_passes[last_pass].resolve_attachments.size()) { - //if using resolve attachments, check resolve attachments + // If using resolve attachments, check resolve attachments. for (int j = 0; j < p_passes[last_pass].resolve_attachments.size(); j++) { if (p_passes[last_pass].resolve_attachments[j] == i) { used_last = true; break; } } - } else { + } + if (!used_last) { for (int j = 0; j < p_passes[last_pass].color_attachments.size(); j++) { if (p_passes[last_pass].color_attachments[j] == i) { used_last = true; @@ -3567,13 +3581,13 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } if (is_vrs) { - // We don't change our VRS texture during this process + // We don't change our VRS texture during this process. description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; description.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - // TODO do we need to update our external dependency ? + // TODO: Do we need to update our external dependency? // update_external_dependency_for_store(dependency_to_external, is_sampled, is_storage, false); } else { switch (is_depth ? final_depth_action : final_action) { @@ -3591,8 +3605,8 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there - // TODO: What does this mean about the next usage (and thus appropriate dependency masks + description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. + // TODO: What does this mean about the next usage (and thus appropriate dependency masks. } } break; case FINAL_ACTION_DISCARD: { @@ -3607,7 +3621,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } else { description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; - description.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + description.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. } } break; case FINAL_ACTION_CONTINUE: { @@ -3622,12 +3636,12 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } else { description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; - description.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + description.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Don't care what is there. } } break; default: { - ERR_FAIL_V(VK_NULL_HANDLE); //should never reach here + ERR_FAIL_V(VK_NULL_HANDLE); // Should never reach here. } } } @@ -3711,7 +3725,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF reference.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; attachment_last_pass[attachment] = i; } - reference.aspectMask = 0; // TODO we need to set this here, possibly VK_IMAGE_ASPECT_COLOR_BIT ?? + reference.aspectMask = 0; // TODO: We need to set this here, possibly VK_IMAGE_ASPECT_COLOR_BIT? input_references.push_back(reference); } @@ -3737,7 +3751,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF bool multisample = p_attachments[attachment].samples > TEXTURE_SAMPLES_1; ERR_FAIL_COND_V_MSG(multisample, VK_NULL_HANDLE, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachments can't be multisample."); reference.attachment = attachment_remap[attachment]; - reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL attachment_last_pass[attachment] = i; } reference.aspectMask = 0; @@ -3806,7 +3820,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), VK_NULL_HANDLE, "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), preserve attachment (" + itos(j) + ")."); if (attachment_last_pass[attachment] != i) { - //preserve can still be used to keep depth or color from being discarded after use + // Preserve can still be used to keep depth or color from being discarded after use. attachment_last_pass[attachment] = i; preserve_references.push_back(attachment); } @@ -3875,14 +3889,14 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF subpass_dependencies.push_back(dependency); } /* - // NOTE: Big Mallet Approach -- any layout transition causes a full barrier + // NOTE: Big Mallet Approach -- any layout transition causes a full barrier. if (reference.layout != description.initialLayout) { - // NOTE: this should be smarter based on the texture's knowledge of its previous role + // NOTE: This should be smarter based on the texture's knowledge of its previous role. dependency_from_external.srcStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; dependency_from_external.srcAccessMask |= VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT; } if (reference.layout != description.finalLayout) { - // NOTE: this should be smarter based on the texture's knowledge of its subsequent role + // NOTE: This should be smarter based on the texture's knowledge of its subsequent role. dependency_to_external.dstStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; dependency_to_external.dstAccessMask |= VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT; } @@ -3923,7 +3937,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF VkRenderPassMultiviewCreateInfo render_pass_multiview_create_info; if (p_view_count > 1) { - // this may no longer be needed with the new settings already including this + // This may no longer be needed with the new settings already including this. const VulkanContext::MultiviewCapabilities capabilities = context->get_multiview_capabilities(); @@ -3933,7 +3947,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF // Make sure we limit this to the number of views we support. ERR_FAIL_COND_V_MSG(p_view_count > capabilities.max_view_count, VK_NULL_HANDLE, "Hardware does not support requested number of views for Multiview render pass"); - // Set view masks for each subpass + // Set view masks for each subpass. for (uint32_t i = 0; i < subpasses.size(); i++) { view_masks.push_back(view_mask); } @@ -3971,7 +3985,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c passes.push_back(pass); return framebuffer_format_create_multipass(p_format, passes, p_view_count); } -RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, Vector<FramebufferPass> &p_passes, uint32_t p_view_count) { +RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, uint32_t p_view_count) { _THREAD_SAFE_METHOD_ FramebufferFormatKey key; @@ -3981,14 +3995,14 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key); if (E) { - //exists, return + // Exists, return. return E->get(); } Vector<TextureSamples> samples; - VkRenderPass render_pass = _render_pass_create(p_attachments, p_passes, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, p_view_count, &samples); //actions don't matter for this use case + VkRenderPass render_pass = _render_pass_create(p_attachments, p_passes, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, p_view_count, &samples); // Actions don't matter for this use case. - if (render_pass == VK_NULL_HANDLE) { //was likely invalid + if (render_pass == VK_NULL_HANDLE) { // Was likely invalid. return INVALID_ID; } FramebufferFormatID id = FramebufferFormatID(framebuffer_format_cache.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT)); @@ -4009,7 +4023,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key); if (E) { - //exists, return + // Exists, return. return E->get(); } @@ -4019,7 +4033,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c subpass.flags = 0; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.viewMask = 0; - subpass.inputAttachmentCount = 0; //unsupported for now + subpass.inputAttachmentCount = 0; // Unsupported for now. subpass.pInputAttachments = nullptr; subpass.colorAttachmentCount = 0; subpass.pColorAttachments = nullptr; @@ -4046,7 +4060,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c ERR_FAIL_COND_V_MSG(res, 0, "vkCreateRenderPass2KHR for empty fb failed with error " + itos(res) + "."); - if (render_pass == VK_NULL_HANDLE) { //was likely invalid + if (render_pass == VK_NULL_HANDLE) { // Was likely invalid. return INVALID_ID; } @@ -4082,7 +4096,11 @@ RID RenderingDeviceVulkan::framebuffer_create_empty(const Size2i &p_size, Textur framebuffer.size = p_size; framebuffer.view_count = 1; - return framebuffer_owner.make_rid(framebuffer); + RID id = framebuffer_owner.make_rid(framebuffer); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + return id; } RID RenderingDeviceVulkan::framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check, uint32_t p_view_count) { @@ -4100,7 +4118,11 @@ RID RenderingDeviceVulkan::framebuffer_create(const Vector<RID> &p_texture_attac } else if (texture && texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) { pass.vrs_attachment = i; } else { - pass.color_attachments.push_back(texture ? i : FramebufferPass::ATTACHMENT_UNUSED); + if (texture && texture->is_resolve_buffer) { + pass.resolve_attachments.push_back(i); + } else { + pass.color_attachments.push_back(texture ? i : FramebufferPass::ATTACHMENT_UNUSED); + } } } @@ -4110,7 +4132,7 @@ RID RenderingDeviceVulkan::framebuffer_create(const Vector<RID> &p_texture_attac return framebuffer_create_multipass(p_texture_attachments, passes, p_format_check, p_view_count); } -RID RenderingDeviceVulkan::framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check, uint32_t p_view_count) { +RID RenderingDeviceVulkan::framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, const Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check, uint32_t p_view_count) { _THREAD_SAFE_METHOD_ Vector<AttachmentFormat> attachments; @@ -4130,9 +4152,9 @@ RID RenderingDeviceVulkan::framebuffer_create_multipass(const Vector<RID> &p_tex size.height = texture->height; size_set = true; } else if (texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) { - // If this is not the first attachement we assume this is used as the VRS attachment - // in this case this texture will be 1/16th the size of the color attachement. - // So we skip the size check + // If this is not the first attachement we assume this is used as the VRS attachment. + // In this case this texture will be 1/16th the size of the color attachement. + // So we skip the size check. } else { ERR_FAIL_COND_V_MSG((uint32_t)size.width != texture->width || (uint32_t)size.height != texture->height, RID(), "All textures in a framebuffer should be the same size."); @@ -4162,6 +4184,9 @@ RID RenderingDeviceVulkan::framebuffer_create_multipass(const Vector<RID> &p_tex framebuffer.view_count = p_view_count; RID id = framebuffer_owner.make_rid(framebuffer); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif for (int i = 0; i < p_texture_attachments.size(); i++) { if (p_texture_attachments[i].is_valid()) { @@ -4181,6 +4206,22 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_get_form return framebuffer->format_id; } +bool RenderingDeviceVulkan::framebuffer_is_valid(RID p_framebuffer) const { + _THREAD_SAFE_METHOD_ + + return framebuffer_owner.owns(p_framebuffer); +} + +void RenderingDeviceVulkan::framebuffer_set_invalidation_callback(RID p_framebuffer, InvalidationCallback p_callback, void *p_userdata) { + _THREAD_SAFE_METHOD_ + + Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer); + ERR_FAIL_COND(!framebuffer); + + framebuffer->invalidated_callback = p_callback; + framebuffer->invalidated_callback_userdata = p_userdata; +} + /*****************/ /**** SAMPLER ****/ /*****************/ @@ -4223,7 +4264,11 @@ RID RenderingDeviceVulkan::sampler_create(const SamplerState &p_state) { VkResult res = vkCreateSampler(device, &sampler_create_info, nullptr, &sampler); ERR_FAIL_COND_V_MSG(res, RID(), "vkCreateSampler failed with error " + itos(res) + "."); - return sampler_owner.make_rid(sampler); + RID id = sampler_owner.make_rid(sampler); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + return id; } /**********************/ @@ -4252,10 +4297,14 @@ RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vec _buffer_memory_barrier(buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, false); } - return vertex_buffer_owner.make_rid(buffer); + RID id = vertex_buffer_owner.make_rid(buffer); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + return id; } -// Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated +// Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated. RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(const Vector<VertexAttribute> &p_vertex_formats) { _THREAD_SAFE_METHOD_ @@ -4267,7 +4316,7 @@ RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(cons return *idptr; } - //does not exist, create one and cache it + // Does not exist, create one and cache it. VertexDescriptionCache vdcache; vdcache.bindings = memnew_arr(VkVertexInputBindingDescription, p_vertex_formats.size()); vdcache.attributes = memnew_arr(VkVertexInputAttributeDescription, p_vertex_formats.size()); @@ -4323,25 +4372,25 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo vertex_array.vertex_count = p_vertex_count; vertex_array.description = p_vertex_format; - vertex_array.max_instances_allowed = 0xFFFFFFFF; //by default as many as you want + vertex_array.max_instances_allowed = 0xFFFFFFFF; // By default as many as you want. for (int i = 0; i < p_src_buffers.size(); i++) { Buffer *buffer = vertex_buffer_owner.get_or_null(p_src_buffers[i]); - //validate with buffer + // Validate with buffer. { const VertexAttribute &atf = vd.vertex_formats[i]; uint32_t element_size = get_format_vertex_size(atf.format); - ERR_FAIL_COND_V(element_size == 0, RID()); //should never happens since this was prevalidated + ERR_FAIL_COND_V(element_size == 0, RID()); // Should never happens since this was prevalidated. if (atf.frequency == VERTEX_FREQUENCY_VERTEX) { - //validate size for regular drawing + // Validate size for regular drawing. uint64_t total_size = uint64_t(atf.stride) * (p_vertex_count - 1) + atf.offset + element_size; ERR_FAIL_COND_V_MSG(total_size > buffer->size, RID(), "Attachment (" + itos(i) + ") will read past the end of the buffer."); } else { - //validate size for instances drawing + // Validate size for instances drawing. uint64_t available = buffer->size - atf.offset; ERR_FAIL_COND_V_MSG(available < element_size, RID(), "Attachment (" + itos(i) + ") uses instancing, but it's just too small."); @@ -4352,7 +4401,7 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo } vertex_array.buffers.push_back(buffer->buffer); - vertex_array.offsets.push_back(0); //offset unused, but passing anyway + vertex_array.offsets.push_back(0); // Offset unused, but passing anyway. } RID id = vertex_array_owner.make_rid(vertex_array); @@ -4387,7 +4436,7 @@ RID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBuff const uint16_t *index16 = (const uint16_t *)r; for (uint32_t i = 0; i < p_index_count; i++) { if (p_use_restart_indices && index16[i] == 0xFFFF) { - continue; //restart index, ignore + continue; // Restart index, ignore. } index_buffer.max_index = MAX(index16[i], index_buffer.max_index); } @@ -4395,7 +4444,7 @@ RID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBuff const uint32_t *index32 = (const uint32_t *)r; for (uint32_t i = 0; i < p_index_count; i++) { if (p_use_restart_indices && index32[i] == 0xFFFFFFFF) { - continue; //restart index, ignore + continue; // Restart index, ignore. } index_buffer.max_index = MAX(index32[i], index_buffer.max_index); } @@ -4413,7 +4462,11 @@ RID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBuff _buffer_update(&index_buffer, 0, r, data_size); _buffer_memory_barrier(index_buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT, false); } - return index_buffer_owner.make_rid(index_buffer); + RID id = index_buffer_owner.make_rid(index_buffer); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + return id; } RID RenderingDeviceVulkan::index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count) { @@ -4490,7 +4543,7 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa case glslang::EbtSampler: { //print_line("DEBUG: IsSampler"); if (reflection.getType()->getSampler().dim == glslang::EsdBuffer) { - //texture buffers + // Texture buffers. if (reflection.getType()->getSampler().isCombined()) { layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER; @@ -4590,7 +4643,7 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa } break;*/ default: { if (reflection.getType()->getQualifier().hasOffset() || reflection.name.find(".") != std::string::npos) { - //member of uniform block? + // Member of uniform block? return true; } @@ -4627,10 +4680,10 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa uint32_t binding = reflection.getType()->getQualifier().layoutBinding; if (set < (uint32_t)bindings.size()) { - //check if this already exists + // Check if this already exists. for (int i = 0; i < bindings[set].size(); i++) { if (bindings[set][i].binding == binding) { - //already exists, verify that it's the same type + // Already exists, verify that it's the same type. if (bindings[set][i].descriptorType != layout_binding.descriptorType) { if (r_error) { *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform type."; @@ -4638,7 +4691,7 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa return false; } - //also, verify that it's the same size + // Also, verify that it's the same size. if (bindings[set][i].descriptorCount != layout_binding.descriptorCount || uniform_infos[set][i].length != info.length) { if (r_error) { *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform size."; @@ -4646,7 +4699,7 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa return false; } - //just append stage mask and return + // Just append stage mask and return. bindings.write[set].write[i].stageFlags |= shader_stage_masks[p_stage]; uniform_infos.write[set].write[i].stages |= 1 << p_stage; return true; @@ -4655,7 +4708,7 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa } layout_binding.binding = binding; layout_binding.stageFlags = shader_stage_masks[p_stage]; - layout_binding.pImmutableSamplers = nullptr; //no support for this yet + layout_binding.pImmutableSamplers = nullptr; // No support for this yet. info.stages = 1 << p_stage; info.binding = binding; @@ -4674,9 +4727,9 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa } #endif -//version 1: initial -//version 2: Added shader name -//version 3: Added writable +// Version 1: initial. +// Version 2: Added shader name. +// Version 3: Added writable. #define SHADER_BINARY_VERSION 3 @@ -4688,7 +4741,7 @@ struct RenderingDeviceVulkanShaderBinaryDataBinding { uint32_t type; uint32_t binding; uint32_t stages; - uint32_t length; //size of arrays (in total elements), or ubos (in bytes * total elements) + uint32_t length; // Size of arrays (in total elements), or ubos (in bytes * total elements). uint32_t writable; }; @@ -4729,7 +4782,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve binary_data.push_constant_size = 0; binary_data.push_constants_vk_stage = 0; - Vector<Vector<RenderingDeviceVulkanShaderBinaryDataBinding>> uniform_info; //set bindings + Vector<Vector<RenderingDeviceVulkanShaderBinaryDataBinding>> uniform_info; // Set bindings. Vector<RenderingDeviceVulkanShaderBinarySpecializationConstant> specialization_constants; uint32_t stages_processed = 0; @@ -4763,7 +4816,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve uint32_t stage = p_spirv[i].shader_stage; if (binding_count > 0) { - //Parse bindings + // Parse bindings. Vector<SpvReflectDescriptorBinding *> bindings; bindings.resize(binding_count); @@ -4870,23 +4923,23 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ")."); if (set < (uint32_t)uniform_info.size()) { - //check if this already exists + // Check if this already exists. bool exists = false; for (int k = 0; k < uniform_info[set].size(); k++) { if (uniform_info[set][k].binding == (uint32_t)info.binding) { - //already exists, verify that it's the same type + // Already exists, verify that it's the same type. ERR_FAIL_COND_V_MSG(uniform_info[set][k].type != info.type, Vector<uint8_t>(), "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform type."); - //also, verify that it's the same size + // Also, verify that it's the same size. ERR_FAIL_COND_V_MSG(uniform_info[set][k].length != info.length, Vector<uint8_t>(), "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform size."); - //also, verify that it has the same writability + // Also, verify that it has the same writability. ERR_FAIL_COND_V_MSG(uniform_info[set][k].writable != info.writable, Vector<uint8_t>(), "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different writability."); - //just append stage mask and return + // Just append stage mask and return. uniform_info.write[set].write[k].stages |= 1 << stage; exists = true; break; @@ -4894,7 +4947,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve } if (exists) { - continue; //merged + continue; // Merged. } } @@ -4909,7 +4962,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve } { - //specialization constants + // Specialization constants. uint32_t sc_count = 0; result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, nullptr); @@ -4930,7 +4983,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve SpvReflectSpecializationConstant *spc = spec_constants[j]; sconst.constant_id = spc->constant_id; - sconst.int_value = 0.0; // clear previous value JIC + sconst.int_value = 0.0; // Clear previous value JIC. switch (spc->constant_type) { case SPV_REFLECT_SPECIALIZATION_CONSTANT_BOOL: { sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL; @@ -4980,7 +5033,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining input variables."); for (uint32_t j = 0; j < iv_count; j++) { - if (input_vars[j] && input_vars[j]->decoration_flags == 0) { //regular input + if (input_vars[j] && input_vars[j]->decoration_flags == 0) { // Regular input. binary_data.vertex_input_mask |= (1 << uint32_t(input_vars[j]->location)); } } @@ -5049,7 +5102,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve Vector<Vector<uint8_t>> compressed_stages; Vector<uint32_t> smolv_size; - Vector<uint32_t> zstd_size; //if 0, stdno t used + Vector<uint32_t> zstd_size; // If 0, zstd not used. uint32_t stages_binary_size = 0; @@ -5061,7 +5114,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve ERR_FAIL_V_MSG(Vector<uint8_t>(), "Error compressing shader stage :" + String(shader_stage_names[p_spirv[i].shader_stage])); } else { smolv_size.push_back(smolv.size()); - { //zstd + { // zstd. Vector<uint8_t> zstd; zstd.resize(Compression::get_max_compressed_buffer_size(smolv.size(), Compression::MODE_ZSTD)); int dst_size = Compression::compress(zstd.ptrw(), &smolv[0], smolv.size(), Compression::MODE_ZSTD); @@ -5074,7 +5127,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve Vector<uint8_t> smv; smv.resize(smolv.size()); memcpy(smv.ptrw(), &smolv[0], smolv.size()); - zstd_size.push_back(0); //not using zstd + zstd_size.push_back(0); // Not using zstd. compressed_stages.push_back(smv); } } @@ -5094,12 +5147,12 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve binary_data.shader_name_len = shader_name_utf.length(); - uint32_t total_size = sizeof(uint32_t) * 3; //header + version + main datasize; + uint32_t total_size = sizeof(uint32_t) * 3; // Header + version + main datasize;. total_size += sizeof(RenderingDeviceVulkanShaderBinaryData); total_size += binary_data.shader_name_len; - if ((binary_data.shader_name_len % 4) != 0) { //alignment rules are really strange + if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange. total_size += 4 - (binary_data.shader_name_len % 4); } @@ -5110,7 +5163,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve total_size += sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant) * specialization_constants.size(); - total_size += compressed_stages.size() * sizeof(uint32_t) * 3; //sizes + total_size += compressed_stages.size() * sizeof(uint32_t) * 3; // Sizes. total_size += stages_binary_size; Vector<uint8_t> ret; @@ -5121,7 +5174,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve binptr[0] = 'G'; binptr[1] = 'V'; binptr[2] = 'B'; - binptr[3] = 'D'; //godot vulkan binary data + binptr[3] = 'D'; // Godot vulkan binary data. offset += 4; encode_uint32(SHADER_BINARY_VERSION, binptr + offset); offset += sizeof(uint32_t); @@ -5132,7 +5185,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve memcpy(binptr + offset, shader_name_utf.ptr(), binary_data.shader_name_len); offset += binary_data.shader_name_len; - if ((binary_data.shader_name_len % 4) != 0) { //alignment rules are really strange + if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange. offset += 4 - (binary_data.shader_name_len % 4); } @@ -5180,7 +5233,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ uint32_t binsize = p_shader_binary.size(); uint32_t read_offset = 0; - //consistency check + // Consistency check. ERR_FAIL_COND_V(binsize < sizeof(uint32_t) * 3 + sizeof(RenderingDeviceVulkanShaderBinaryData), RID()); ERR_FAIL_COND_V(binptr[0] != 'G' || binptr[1] != 'V' || binptr[2] != 'B' || binptr[3] != 'D', RID()); @@ -5210,7 +5263,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ if (binary_data.shader_name_len) { name.parse_utf8((const char *)(binptr + read_offset), binary_data.shader_name_len); read_offset += binary_data.shader_name_len; - if ((binary_data.shader_name_len % 4) != 0) { //alignment rules are really strange + if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange. read_offset += 4 - (binary_data.shader_name_len % 4); } } @@ -5327,7 +5380,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ const uint8_t *src_smolv = nullptr; if (zstd_size > 0) { - //decompress to smolv + // Decompress to smolv. smolv.resize(smolv_size); int dec_smolv_size = Compression::decompress(smolv.ptrw(), smolv.size(), binptr + read_offset, zstd_size, Compression::MODE_ZSTD); ERR_FAIL_COND_V(dec_smolv_size != (int32_t)smolv_size, RID()); @@ -5356,7 +5409,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ ERR_FAIL_COND_V(read_offset != binsize, RID()); - //all good, let's create modules + // All good, let's create modules. _THREAD_SAFE_METHOD_ @@ -5412,11 +5465,11 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ shader.pipeline_stages.push_back(shader_stage); } - //proceed to create descriptor sets + // Proceed to create descriptor sets. if (success) { for (int i = 0; i < set_bindings.size(); i++) { - //empty ones are fine if they were not used according to spec (binding count will be 0) + // Empty ones are fine if they were not used according to spec (binding count will be 0). VkDescriptorSetLayoutCreateInfo layout_create_info; layout_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; layout_create_info.pNext = nullptr; @@ -5435,13 +5488,13 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ Shader::Set set; set.descriptor_set_layout = layout; set.uniform_info = uniform_info[i]; - //sort and hash + // Sort and hash. set.uniform_info.sort(); - uint32_t format = 0; //no format, default + uint32_t format = 0; // No format, default. if (set.uniform_info.size()) { - //has data, needs an actual format; + // Has data, needs an actual format. UniformSetFormat usformat; usformat.uniform_info = set.uniform_info; RBMap<UniformSetFormat, uint32_t>::Element *E = uniform_set_format_cache.find(usformat); @@ -5459,7 +5512,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ } if (success) { - //create pipeline layout + // Create pipeline layout. VkPipelineLayoutCreateInfo pipeline_layout_create_info; pipeline_layout_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_create_info.pNext = nullptr; @@ -5498,7 +5551,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ } if (!success) { - //clean up if failed + // Clean up if failed. for (int i = 0; i < shader.pipeline_stages.size(); i++) { vkDestroyShaderModule(device, shader.pipeline_stages[i].module, nullptr); } @@ -5510,7 +5563,11 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ ERR_FAIL_V_MSG(RID(), error_text); } - return shader_owner.make_rid(shader); + RID id = shader_owner.make_rid(shader); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + return id; } uint32_t RenderingDeviceVulkan::shader_get_vertex_input_attribute_mask(RID p_shader) { @@ -5543,7 +5600,11 @@ RID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const Ve _buffer_update(&buffer, 0, r, data_size); _buffer_memory_barrier(buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_UNIFORM_READ_BIT, false); } - return uniform_buffer_owner.make_rid(buffer); + RID id = uniform_buffer_owner.make_rid(buffer); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + return id; } RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, uint32_t p_usage) { @@ -5613,8 +5674,12 @@ RID RenderingDeviceVulkan::texture_buffer_create(uint32_t p_size_elements, DataF ERR_FAIL_V_MSG(RID(), "Unable to create buffer view, error " + itos(res) + "."); } - //allocate the view - return texture_buffer_owner.make_rid(texture_buffer); + // Allocate the view. + RID id = texture_buffer_owner.make_rid(texture_buffer); +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + return id; } RenderingDeviceVulkan::DescriptorPool *RenderingDeviceVulkan::_descriptor_pool_allocate(const DescriptorPoolKey &p_key) { @@ -5632,17 +5697,17 @@ RenderingDeviceVulkan::DescriptorPool *RenderingDeviceVulkan::_descriptor_pool_a } if (!pool) { - //create a new one + // Create a new one. pool = memnew(DescriptorPool); pool->usage = 0; VkDescriptorPoolCreateInfo descriptor_pool_create_info; descriptor_pool_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool_create_info.pNext = nullptr; - descriptor_pool_create_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // can't think how somebody may NOT need this flag.. + descriptor_pool_create_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // Can't think how somebody may NOT need this flag. descriptor_pool_create_info.maxSets = max_descriptors_per_pool; Vector<VkDescriptorPoolSize> sizes; - //here comes more vulkan API strangeness + // Here comes more vulkan API strangeness. if (p_key.uniform_type[UNIFORM_TYPE_SAMPLER]) { VkDescriptorPoolSize s; @@ -5742,7 +5807,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, ERR_FAIL_COND_V_MSG(p_shader_set >= (uint32_t)shader->sets.size() || shader->sets[p_shader_set].uniform_info.size() == 0, RID(), "Desired set (" + itos(p_shader_set) + ") not used by shader."); - //see that all sets in shader are satisfied + // See that all sets in shader are satisfied. const Shader::Set &set = shader->sets[p_shader_set]; @@ -5755,11 +5820,11 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, Vector<VkWriteDescriptorSet> writes; DescriptorPoolKey pool_key; - //to keep them alive until update call + // To keep them alive until update call. List<Vector<VkDescriptorBufferInfo>> buffer_infos; List<Vector<VkBufferView>> buffer_views; List<Vector<VkDescriptorImageInfo>> image_infos; - //used for verification to make sure a uniform set does not use a framebuffer bound texture + // Used for verification to make sure a uniform set does not use a framebuffer bound texture. LocalVector<UniformSet::AttachableTexture> attachable_textures; Vector<Texture *> mutable_sampled_textures; Vector<Texture *> mutable_storage_textures; @@ -5780,14 +5845,14 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, ERR_FAIL_COND_V_MSG(uniform.uniform_type != set_uniform.type, RID(), "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.uniform_type] + "'."); - VkWriteDescriptorSet write; //common header + VkWriteDescriptorSet write; // Common header. write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write.pNext = nullptr; - write.dstSet = VK_NULL_HANDLE; //will assign afterwards when everything is valid + write.dstSet = VK_NULL_HANDLE; // Will assign afterwards when everything is valid. write.dstBinding = set_uniform.binding; write.dstArrayElement = 0; write.descriptorCount = 0; - write.descriptorType = VK_DESCRIPTOR_TYPE_MAX_ENUM; //Invalid value. + write.descriptorType = VK_DESCRIPTOR_TYPE_MAX_ENUM; // Invalid value. write.pImageInfo = nullptr; write.pBufferInfo = nullptr; write.pTexelBufferView = nullptr; @@ -5860,12 +5925,12 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, } if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) { - //can also be used as storage, add to mutable sampled + // Can also be used as storage, add to mutable sampled. mutable_sampled_textures.push_back(texture); } if (texture->owner.is_valid()) { texture = texture_owner.get_or_null(texture->owner); - ERR_FAIL_COND_V(!texture, RID()); //bug, should never happen + ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen. } img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; @@ -5913,13 +5978,13 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, } if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) { - //can also be used as storage, add to mutable sampled + // Can also be used as storage, add to mutable sampled. mutable_sampled_textures.push_back(texture); } if (texture->owner.is_valid()) { texture = texture_owner.get_or_null(texture->owner); - ERR_FAIL_COND_V(!texture, RID()); //bug, should never happen + ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen. } img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; @@ -5961,13 +6026,13 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, img_info.imageView = texture->view; if (texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT) { - //can also be used as storage, add to mutable sampled + // Can also be used as storage, add to mutable sampled. mutable_storage_textures.push_back(texture); } if (texture->owner.is_valid()) { texture = texture_owner.get_or_null(texture->owner); - ERR_FAIL_COND_V(!texture, RID()); //bug, should never happen + ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen. } img_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; @@ -6057,7 +6122,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, type_size = uniform.get_id_count() / 2; } break; case UNIFORM_TYPE_IMAGE_BUFFER: { - //todo + // Todo. } break; case UNIFORM_TYPE_UNIFORM_BUFFER: { @@ -6093,7 +6158,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, } ERR_FAIL_COND_V_MSG(!buffer, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") is invalid."); - //if 0, then it's sized on link time + // If 0, then it's sized on link time. ERR_FAIL_COND_V_MSG(set_uniform.length > 0 && buffer->size != (uint32_t)set_uniform.length, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " does not match size of shader uniform: (" + itos(set_uniform.length) + ")."); @@ -6132,7 +6197,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, if (texture->owner.is_valid()) { texture = texture_owner.get_or_null(texture->owner); - ERR_FAIL_COND_V(!texture, RID()); //bug, should never happen + ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen. } img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; @@ -6160,7 +6225,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, pool_key.uniform_type[set_uniform.type] += type_size; } - //need a descriptor pool + // Need a descriptor pool. DescriptorPool *pool = _descriptor_pool_allocate(pool_key); ERR_FAIL_COND_V(!pool, RID()); @@ -6177,7 +6242,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, VkResult res = vkAllocateDescriptorSets(device, &descriptor_set_allocate_info, &descriptor_set); if (res) { - _descriptor_pool_free(pool_key, pool); // meh + _descriptor_pool_free(pool_key, pool); // Meh. ERR_FAIL_V_MSG(RID(), "Cannot allocate descriptor sets, error " + itos(res) + "."); } @@ -6193,7 +6258,10 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, uniform_set.shader_id = p_shader; RID id = uniform_set_owner.make_rid(uniform_set); - //add dependencies +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + // Add dependencies. _add_dependency(id, p_shader); for (uint32_t i = 0; i < uniform_count; i++) { const Uniform &uniform = uniforms[i]; @@ -6203,7 +6271,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, } } - //write the contents + // Write the contents. if (writes.size()) { for (int i = 0; i < writes.size(); i++) { writes.write[i].dstSet = descriptor_set; @@ -6218,7 +6286,7 @@ bool RenderingDeviceVulkan::uniform_set_is_valid(RID p_uniform_set) { return uniform_set_owner.owns(p_uniform_set); } -void RenderingDeviceVulkan::uniform_set_set_invalidation_callback(RID p_uniform_set, UniformSetInvalidatedCallback p_callback, void *p_userdata) { +void RenderingDeviceVulkan::uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata) { UniformSet *us = uniform_set_owner.get_or_null(p_uniform_set); ERR_FAIL_COND(!us); us->invalidated_callback = p_callback; @@ -6236,7 +6304,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint VkPipelineStageFlags dst_stage_mask = 0; VkAccessFlags dst_access = 0; if (p_post_barrier & BARRIER_MASK_TRANSFER) { - // Protect subsequent updates... + // Protect subsequent updates. dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; } @@ -6248,7 +6316,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER, "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end."); - // no barrier should be needed here + // No barrier should be needed here. // _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, true); Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_post_barrier); @@ -6284,7 +6352,7 @@ Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint3 VkPipelineStageFlags dst_stage_mask = 0; VkAccessFlags dst_access = 0; if (p_post_barrier & BARRIER_MASK_TRANSFER) { - // Protect subsequent updates... + // Protect subsequent updates. dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; } @@ -6297,7 +6365,7 @@ Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint3 ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER, "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end."); - // should not be needed + // Should not be needed. // _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, p_post_barrier); vkCmdFillBuffer(frames[frame].draw_command_buffer, buffer->buffer, p_offset, p_size, 0); @@ -6318,10 +6386,10 @@ Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint3 Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { _THREAD_SAFE_METHOD_ - // It could be this buffer was just created + // It could be this buffer was just created. VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; - // Get the vulkan buffer and the potential stage/access possible + // Get the vulkan buffer and the potential stage/access possible. Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL); if (!buffer) { ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving."); @@ -6338,8 +6406,8 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { region.srcOffset = 0; region.dstOffset = 0; region.size = buffer->size; - vkCmdCopyBuffer(command_buffer, buffer->buffer, tmp_buffer.buffer, 1, ®ion); //dst buffer is in CPU, but I wonder if src buffer needs a barrier for this.. - //flush everything so memory can be safely mapped + vkCmdCopyBuffer(command_buffer, buffer->buffer, tmp_buffer.buffer, 1, ®ion); // Dst buffer is in CPU, but I wonder if src buffer needs a barrier for this. + // Flush everything so memory can be safely mapped. _flush(true); void *buffer_mem; @@ -6367,7 +6435,7 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags, uint32_t p_for_render_pass, const Vector<PipelineSpecializationConstant> &p_specialization_constants) { _THREAD_SAFE_METHOD_ - //needs a shader + // Needs a shader. Shader *shader = shader_owner.get_or_null(p_shader); ERR_FAIL_COND_V(!shader, RID()); @@ -6375,13 +6443,13 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma "Compute shaders can't be used in render pipelines"); if (p_framebuffer_format == INVALID_ID) { - //if nothing provided, use an empty one (no attachments) + // If nothing provided, use an empty one (no attachments). p_framebuffer_format = framebuffer_format_create(Vector<AttachmentFormat>()); } ERR_FAIL_COND_V(!framebuffer_formats.has(p_framebuffer_format), RID()); const FramebufferFormat &fb_format = framebuffer_formats[p_framebuffer_format]; - { //validate shader vs framebuffer + { // Validate shader vs framebuffer. ERR_FAIL_COND_V_MSG(p_for_render_pass >= uint32_t(fb_format.E->key().passes.size()), RID(), "Render pass requested for pipeline creation (" + itos(p_for_render_pass) + ") is out of bounds"); const FramebufferPass &pass = fb_format.E->key().passes[p_for_render_pass]; @@ -6394,17 +6462,17 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma ERR_FAIL_COND_V_MSG(shader->fragment_output_mask != output_mask, RID(), "Mismatch fragment shader output mask (" + itos(shader->fragment_output_mask) + ") and framebuffer color output mask (" + itos(output_mask) + ") when binding both in render pipeline."); } - //vertex + // Vertex. VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info; if (p_vertex_format != INVALID_ID) { - //uses vertices, else it does not + // Uses vertices, else it does not. ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID()); const VertexDescriptionCache &vd = vertex_formats[p_vertex_format]; pipeline_vertex_input_state_create_info = vd.create_info; - //validate with inputs + // Validate with inputs. for (uint32_t i = 0; i < 32; i++) { if (!(shader->vertex_input_mask & (1UL << i))) { continue; @@ -6421,7 +6489,7 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma } } else { - //does not use vertices + // Does not use vertices. pipeline_vertex_input_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; pipeline_vertex_input_state_create_info.pNext = nullptr; pipeline_vertex_input_state_create_info.flags = 0; @@ -6433,7 +6501,7 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma ERR_FAIL_COND_V_MSG(shader->vertex_input_mask != 0, RID(), "Shader contains vertex inputs, but no vertex input description was provided for pipeline creation."); } - //input assembly + // Input assembly. ERR_FAIL_INDEX_V(p_render_primitive, RENDER_PRIMITIVE_MAX, RID()); @@ -6459,7 +6527,7 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma input_assembly_create_info.topology = topology_list[p_render_primitive]; input_assembly_create_info.primitiveRestartEnable = (p_render_primitive == RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX); - //tessellation + // Tessellation. VkPipelineTessellationStateCreateInfo tessellation_create_info; tessellation_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tessellation_create_info.pNext = nullptr; @@ -6471,12 +6539,12 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma viewport_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state_create_info.pNext = nullptr; viewport_state_create_info.flags = 0; - viewport_state_create_info.viewportCount = 1; //if VR extensions are supported at some point, this will have to be customizable in the framebuffer format + viewport_state_create_info.viewportCount = 1; // If VR extensions are supported at some point, this will have to be customizable in the framebuffer format. viewport_state_create_info.pViewports = nullptr; viewport_state_create_info.scissorCount = 1; viewport_state_create_info.pScissors = nullptr; - //rasterization + // Rasterization. VkPipelineRasterizationStateCreateInfo rasterization_state_create_info; rasterization_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state_create_info.pNext = nullptr; @@ -6499,18 +6567,18 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma rasterization_state_create_info.depthBiasSlopeFactor = p_rasterization_state.depth_bias_slope_factor; rasterization_state_create_info.lineWidth = p_rasterization_state.line_width; - //multisample + // Multisample. VkPipelineMultisampleStateCreateInfo multisample_state_create_info; multisample_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state_create_info.pNext = nullptr; multisample_state_create_info.flags = 0; - multisample_state_create_info.rasterizationSamples = rasterization_sample_count[p_multisample_state.sample_count]; + multisample_state_create_info.rasterizationSamples = _ensure_supported_sample_count(p_multisample_state.sample_count); multisample_state_create_info.sampleShadingEnable = p_multisample_state.enable_sample_shading; multisample_state_create_info.minSampleShading = p_multisample_state.min_sample_shading; Vector<VkSampleMask> sample_mask; if (p_multisample_state.sample_mask.size()) { - //use sample mask + // Use sample mask. const int rasterization_sample_mask_expected_size[TEXTURE_SAMPLES_MAX] = { 1, 2, 4, 8, 16, 32, 64 }; @@ -6528,7 +6596,7 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma multisample_state_create_info.alphaToCoverageEnable = p_multisample_state.enable_alpha_to_coverage; multisample_state_create_info.alphaToOneEnable = p_multisample_state.enable_alpha_to_one; - //depth stencil + // Depth stencil. VkPipelineDepthStencilStateCreateInfo depth_stencil_state_create_info; depth_stencil_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; @@ -6568,7 +6636,7 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma depth_stencil_state_create_info.minDepthBounds = p_depth_stencil_state.depth_range_min; depth_stencil_state_create_info.maxDepthBounds = p_depth_stencil_state.depth_range_max; - //blend state + // Blend state. VkPipelineColorBlendStateCreateInfo color_blend_state_create_info; color_blend_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; color_blend_state_create_info.pNext = nullptr; @@ -6639,15 +6707,15 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma color_blend_state_create_info.blendConstants[2] = p_blend_state.blend_constant.b; color_blend_state_create_info.blendConstants[3] = p_blend_state.blend_constant.a; - //dynamic state + // Dynamic state. VkPipelineDynamicStateCreateInfo dynamic_state_create_info; dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dynamic_state_create_info.pNext = nullptr; dynamic_state_create_info.flags = 0; - Vector<VkDynamicState> dynamic_states; //vulkan is weird.. + Vector<VkDynamicState> dynamic_states; // Vulkan is weird. - dynamic_states.push_back(VK_DYNAMIC_STATE_VIEWPORT); //viewport and scissor are always dynamic + dynamic_states.push_back(VK_DYNAMIC_STATE_VIEWPORT); // Viewport and scissor are always dynamic. dynamic_states.push_back(VK_DYNAMIC_STATE_SCISSOR); if (p_dynamic_state_flags & DYNAMIC_STATE_LINE_WIDTH) { @@ -6686,19 +6754,19 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma VkPipelineFragmentShadingRateStateCreateInfoKHR vrs_create_info; if (context->get_vrs_capabilities().attachment_vrs_supported) { // If VRS is used, this defines how the different VRS types are combined. - // combinerOps[0] decides how we use the output of pipeline and primitive (drawcall) VRS - // combinerOps[1] decides how we use the output of combinerOps[0] and our attachment VRS + // combinerOps[0] decides how we use the output of pipeline and primitive (drawcall) VRS. + // combinerOps[1] decides how we use the output of combinerOps[0] and our attachment VRS. vrs_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR; vrs_create_info.pNext = nullptr; vrs_create_info.fragmentSize = { 4, 4 }; - vrs_create_info.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; // We don't use pipeline/primitive VRS so this really doesn't matter - vrs_create_info.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR; // always use the outcome of attachment VRS if enabled + vrs_create_info.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; // We don't use pipeline/primitive VRS so this really doesn't matter. + vrs_create_info.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR; // Always use the outcome of attachment VRS if enabled. graphics_pipeline_nextptr = &vrs_create_info; } - //finally, pipeline create info + // Finally, pipeline create info. VkGraphicsPipelineCreateInfo graphics_pipeline_create_info; graphics_pipeline_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; @@ -6716,9 +6784,9 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma specialization_info.resize(pipeline_stages.size()); specialization_map_entries.resize(pipeline_stages.size()); for (int i = 0; i < shader->specialization_constants.size(); i++) { - //see if overridden + // See if overridden. const Shader::SpecializationConstant &sc = shader->specialization_constants[i]; - data_ptr[i] = sc.constant.int_value; //just copy the 32 bits + data_ptr[i] = sc.constant.int_value; // Just copy the 32 bits. for (int j = 0; j < p_specialization_constants.size(); j++) { const PipelineSpecializationConstant &psc = p_specialization_constants[j]; @@ -6813,9 +6881,12 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma }; pipeline.validation.primitive_minimum = primitive_minimum[p_render_primitive]; #endif - //create ID to associate with this pipeline + // Create ID to associate with this pipeline. RID id = render_pipeline_owner.make_rid(pipeline); - //now add all the dependencies +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + // Now add all the dependencies. _add_dependency(id, p_shader); return id; } @@ -6832,14 +6903,14 @@ bool RenderingDeviceVulkan::render_pipeline_is_valid(RID p_pipeline) { RID RenderingDeviceVulkan::compute_pipeline_create(RID p_shader, const Vector<PipelineSpecializationConstant> &p_specialization_constants) { _THREAD_SAFE_METHOD_ - //needs a shader + // Needs a shader. Shader *shader = shader_owner.get_or_null(p_shader); ERR_FAIL_COND_V(!shader, RID()); ERR_FAIL_COND_V_MSG(!shader->is_compute, RID(), "Non-compute shaders can't be used in compute pipelines"); - //finally, pipeline create info + // Finally, pipeline create info. VkComputePipelineCreateInfo compute_pipeline_create_info; compute_pipeline_create_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; @@ -6859,9 +6930,9 @@ RID RenderingDeviceVulkan::compute_pipeline_create(RID p_shader, const Vector<Pi specialization_constant_data.resize(shader->specialization_constants.size()); uint32_t *data_ptr = specialization_constant_data.ptrw(); for (int i = 0; i < shader->specialization_constants.size(); i++) { - //see if overridden + // See if overridden. const Shader::SpecializationConstant &sc = shader->specialization_constants[i]; - data_ptr[i] = sc.constant.int_value; //just copy the 32 bits + data_ptr[i] = sc.constant.int_value; // Just copy the 32 bits. for (int j = 0; j < p_specialization_constants.size(); j++) { const PipelineSpecializationConstant &psc = p_specialization_constants[j]; @@ -6902,9 +6973,12 @@ RID RenderingDeviceVulkan::compute_pipeline_create(RID p_shader, const Vector<Pi pipeline.local_group_size[1] = shader->compute_local_size[1]; pipeline.local_group_size[2] = shader->compute_local_size[2]; - //create ID to associate with this pipeline + // Create ID to associate with this pipeline. RID id = compute_pipeline_owner.make_rid(pipeline); - //now add all the dependencies +#ifdef DEV_ENABLED + set_resource_name(id, "RID:" + itos(id.get_id())); +#endif + // Now add all the dependencies. _add_dependency(id, p_shader); return id; } @@ -6934,7 +7008,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::screen_get_framebuff _THREAD_SAFE_METHOD_ ERR_FAIL_COND_V_MSG(local_device.is_valid(), INVALID_ID, "Local devices have no screen"); - //very hacky, but not used often per frame so I guess ok + // Very hacky, but not used often per frame so I guess ok. VkFormat vkformat = context->get_screen_format(); DataFormat format = DATA_FORMAT_MAX; for (int i = 0; i < DATA_FORMAT_MAX; i++) { @@ -7036,7 +7110,7 @@ Error RenderingDeviceVulkan::_draw_list_setup_framebuffer(Framebuffer *p_framebu vk.view_count = p_framebuffer->view_count; if (!p_framebuffer->framebuffers.has(vk)) { - //need to create this version + // Need to create this version. Framebuffer::Version version; version.render_pass = _render_pass_create(framebuffer_formats[p_framebuffer->format_id].E->key().attachments, framebuffer_formats[p_framebuffer->format_id].E->key().passes, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_framebuffer->view_count); @@ -7112,7 +7186,7 @@ Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuff } if (color_index < p_clear_colors.size() && texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) { - ERR_FAIL_INDEX_V(color_index, p_clear_colors.size(), ERR_BUG); //a bug + ERR_FAIL_INDEX_V(color_index, p_clear_colors.size(), ERR_BUG); // A bug. Color clear_color = p_clear_colors[color_index]; clear_value.color.float32[0] = clear_color.r; clear_value.color.float32[1] = clear_color.g; @@ -7143,7 +7217,7 @@ Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuff ERR_CONTINUE_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), "Supplied storage texture " + itos(i) + " for draw list is not set to be used for storage."); if (texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT) { - //must change layout to general + // Must change layout to general. VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; @@ -7171,7 +7245,7 @@ Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuff vkCmdBeginRenderPass(command_buffer, &render_pass_begin, subpass_contents); - //mark textures as bound + // Mark textures as bound. draw_list_bound_textures.clear(); draw_list_unbind_color_textures = p_final_color_action != FINAL_ACTION_CONTINUE; draw_list_unbind_depth_textures = p_final_depth_action != FINAL_ACTION_CONTINUE; @@ -7248,7 +7322,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebu bool needs_clear_color = false; bool needs_clear_depth = false; - if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { //check custom region + if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { // Check custom region. Rect2i viewport(viewport_offset, viewport_size); Rect2i regioni = p_region; if (!(regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) && @@ -7277,15 +7351,17 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebu } } - if (p_initial_color_action == INITIAL_ACTION_CLEAR || needs_clear_color) { //check clear values + if (p_initial_color_action == INITIAL_ACTION_CLEAR || needs_clear_color) { // Check clear values. int color_count = 0; for (int i = 0; i < framebuffer->texture_ids.size(); i++) { Texture *texture = texture_owner.get_or_null(framebuffer->texture_ids[i]); // We only check for our VRS usage bit if this is not the first texture id. // If it is the first we're likely populating our VRS texture. - // Bit dirty but.. + // Bit dirty but... if (!texture || (!(texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(i != 0 && texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT))) { - color_count++; + if (!texture || !texture->is_resolve_buffer) { + color_count++; + } } } ERR_FAIL_COND_V_MSG(p_clear_color_values.size() != color_count, INVALID_ID, "Clear color values supplied (" + itos(p_clear_color_values.size()) + ") differ from the amount required for framebuffer color attachments (" + itos(color_count) + ")."); @@ -7355,7 +7431,7 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p bool needs_clear_color = false; bool needs_clear_depth = false; - if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { //check custom region + if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { // Check custom region. Rect2i viewport(viewport_offset, viewport_size); Rect2i regioni = p_region; if (!(regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) && @@ -7377,7 +7453,7 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p } } - if (p_initial_color_action == INITIAL_ACTION_CLEAR || needs_clear_color) { //check clear values + if (p_initial_color_action == INITIAL_ACTION_CLEAR || needs_clear_color) { // Check clear values. int color_count = 0; for (int i = 0; i < framebuffer->texture_ids.size(); i++) { @@ -7463,7 +7539,7 @@ RenderingDeviceVulkan::DrawList *RenderingDeviceVulkan::_get_draw_list_ptr(DrawL return nullptr; } - uint64_t index = p_id & ((DrawListID(1) << DrawListID(ID_BASE_SHIFT)) - 1); //mask + uint64_t index = p_id & ((DrawListID(1) << DrawListID(ID_BASE_SHIFT)) - 1); // Mask. if (index >= draw_list_count) { return nullptr; @@ -7475,6 +7551,16 @@ RenderingDeviceVulkan::DrawList *RenderingDeviceVulkan::_get_draw_list_ptr(DrawL } } +void RenderingDeviceVulkan::draw_list_set_blend_constants(DrawListID p_list, const Color &p_color) { + DrawList *dl = _get_draw_list_ptr(p_list); + ERR_FAIL_COND(!dl); +#ifdef DEBUG_ENABLED + ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified."); +#endif + + vkCmdSetBlendConstants(dl->command_buffer, p_color.components); +} + void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline) { DrawList *dl = _get_draw_list_ptr(p_list); ERR_FAIL_COND(!dl); @@ -7489,7 +7575,7 @@ void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RI #endif if (p_render_pipeline == dl->state.pipeline) { - return; //redundant state, return. + return; // Redundant state, return. } dl->state.pipeline = p_render_pipeline; @@ -7498,17 +7584,17 @@ void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RI vkCmdBindPipeline(dl->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->pipeline); if (dl->state.pipeline_shader != pipeline->shader) { - // shader changed, so descriptor sets may become incompatible. + // Shader changed, so descriptor sets may become incompatible. - //go through ALL sets, and unbind them (and all those above) if the format is different + // Go through ALL sets, and unbind them (and all those above) if the format is different. - uint32_t pcount = pipeline->set_formats.size(); //formats count in this pipeline + uint32_t pcount = pipeline->set_formats.size(); // Formats count in this pipeline. dl->state.set_count = MAX(dl->state.set_count, pcount); - const uint32_t *pformats = pipeline->set_formats.ptr(); //pipeline set formats + const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats. - bool sets_valid = true; //once invalid, all above become invalid + bool sets_valid = true; // Once invalid, all above become invalid. for (uint32_t i = 0; i < pcount; i++) { - //if a part of the format is different, invalidate it (and the rest) + // If a part of the format is different, invalidate it (and the rest). if (!sets_valid || dl->state.sets[i].pipeline_expected_format != pformats[i]) { dl->state.sets[i].bound = false; dl->state.sets[i].pipeline_expected_format = pformats[i]; @@ -7517,11 +7603,11 @@ void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RI } for (uint32_t i = pcount; i < dl->state.set_count; i++) { - //unbind the ones above (not used) if exist + // Unbind the ones above (not used) if exist. dl->state.sets[i].bound = false; } - dl->state.set_count = pcount; //update set count + dl->state.set_count = pcount; // Update set count. if (pipeline->push_constant_size) { dl->state.pipeline_push_constant_stages = pipeline->push_constant_stages; @@ -7534,7 +7620,7 @@ void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RI } #ifdef DEBUG_ENABLED - //update render pass pipeline info + // Update render pass pipeline info. dl->validation.pipeline_active = true; dl->validation.pipeline_dynamic_state = pipeline->validation.dynamic_state; dl->validation.pipeline_vertex_format = pipeline->validation.vertex_format; @@ -7564,8 +7650,8 @@ void RenderingDeviceVulkan::draw_list_bind_uniform_set(DrawListID p_list, RID p_ dl->state.set_count = p_index; } - dl->state.sets[p_index].descriptor_set = uniform_set->descriptor_set; //update set pointer - dl->state.sets[p_index].bound = false; //needs rebind + dl->state.sets[p_index].descriptor_set = uniform_set->descriptor_set; // Update set pointer. + dl->state.sets[p_index].bound = false; // Needs rebind. dl->state.sets[p_index].uniform_set_format = uniform_set->format; dl->state.sets[p_index].uniform_set = p_uniform_set; @@ -7583,7 +7669,7 @@ void RenderingDeviceVulkan::draw_list_bind_uniform_set(DrawListID p_list, RID p_ } #ifdef DEBUG_ENABLED - { //validate that textures bound are not attached as framebuffer bindings + { // Validate that textures bound are not attached as framebuffer bindings. uint32_t attachable_count = uniform_set->attachable_textures.size(); const UniformSet::AttachableTexture *attachable_ptr = uniform_set->attachable_textures.ptr(); uint32_t bound_count = draw_list_bound_textures.size(); @@ -7609,7 +7695,7 @@ void RenderingDeviceVulkan::draw_list_bind_vertex_array(DrawListID p_list, RID p ERR_FAIL_COND(!vertex_array); if (dl->state.vertex_array == p_vertex_array) { - return; //already set + return; // Already set. } dl->state.vertex_array = p_vertex_array; @@ -7633,7 +7719,7 @@ void RenderingDeviceVulkan::draw_list_bind_index_array(DrawListID p_list, RID p_ ERR_FAIL_COND(!index_array); if (dl->state.index_array == p_index_array) { - return; //already set + return; // Already set. } dl->state.index_array = p_index_array; @@ -7685,30 +7771,30 @@ void RenderingDeviceVulkan::draw_list_draw(DrawListID p_list, bool p_use_indices ERR_FAIL_COND_MSG(!dl->validation.pipeline_active, "No render pipeline was set before attempting to draw."); if (dl->validation.pipeline_vertex_format != INVALID_ID) { - //pipeline uses vertices, validate format + // Pipeline uses vertices, validate format. ERR_FAIL_COND_MSG(dl->validation.vertex_format == INVALID_ID, "No vertex array was bound, and render pipeline expects vertices."); - //make sure format is right + // Make sure format is right. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != dl->validation.vertex_format, "The vertex format used to create the pipeline does not match the vertex format bound."); - //make sure number of instances is valid + // Make sure number of instances is valid. ERR_FAIL_COND_MSG(p_instances > dl->validation.vertex_max_instances_allowed, "Number of instances requested (" + itos(p_instances) + " is larger than the maximum number supported by the bound vertex array (" + itos(dl->validation.vertex_max_instances_allowed) + ")."); } if (dl->validation.pipeline_push_constant_size > 0) { - //using push constants, check that they were supplied + // Using push constants, check that they were supplied. ERR_FAIL_COND_MSG(!dl->validation.pipeline_push_constant_supplied, "The shader in this pipeline requires a push constant to be set before drawing, but it's not present."); } #endif - //Bind descriptor sets + // Bind descriptor sets. for (uint32_t i = 0; i < dl->state.set_count; i++) { if (dl->state.sets[i].pipeline_expected_format == 0) { - continue; //nothing expected by this pipeline + continue; // Nothing expected by this pipeline. } #ifdef DEBUG_ENABLED if (dl->state.sets[i].pipeline_expected_format != dl->state.sets[i].uniform_set_format) { @@ -7723,7 +7809,7 @@ void RenderingDeviceVulkan::draw_list_draw(DrawListID p_list, bool p_use_indices } #endif if (!dl->state.sets[i].bound) { - //All good, see if this requires re-binding + // All good, see if this requires re-binding. vkCmdBindDescriptorSets(dl->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, dl->state.pipeline_layout, i, 1, &dl->state.sets[i].descriptor_set, 0, nullptr); dl->state.sets[i].bound = true; } @@ -7737,12 +7823,6 @@ void RenderingDeviceVulkan::draw_list_draw(DrawListID p_list, bool p_use_indices ERR_FAIL_COND_MSG(!dl->validation.index_array_size, "Draw command requested indices, but no index buffer was set."); - if (dl->validation.pipeline_vertex_format != INVALID_ID) { - //uses vertices, do some vertex validations - ERR_FAIL_COND_MSG(dl->validation.vertex_array_size < dl->validation.index_array_max_index, - "Index array references (max index: " + itos(dl->validation.index_array_max_index) + ") indices beyond the vertex array size (" + itos(dl->validation.vertex_array_size) + ")."); - } - ERR_FAIL_COND_MSG(dl->validation.pipeline_uses_restart_indices != dl->validation.index_buffer_uses_restart_indices, "The usage of restart indices in index buffer does not match the render primitive in the pipeline."); #endif @@ -7864,7 +7944,7 @@ Error RenderingDeviceVulkan::draw_list_switch_to_next_pass_split(uint32_t p_spli } Error RenderingDeviceVulkan::_draw_list_allocate(const Rect2i &p_viewport, uint32_t p_splits, uint32_t p_subpass) { - // Lock while draw_list is active + // Lock while draw_list is active. _THREAD_SAFE_LOCK_ if (p_splits == 0) { @@ -7891,7 +7971,7 @@ Error RenderingDeviceVulkan::_draw_list_allocate(const Rect2i &p_viewport, uint3 VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo cmdbuf; - //no command buffer exists, create it. + // No command buffer exists, create it. cmdbuf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmdbuf.pNext = nullptr; cmdbuf.commandPool = split_draw_list_allocators[i].command_pool; @@ -7910,7 +7990,7 @@ Error RenderingDeviceVulkan::_draw_list_allocate(const Rect2i &p_viewport, uint3 draw_list_split = true; for (uint32_t i = 0; i < p_splits; i++) { - //take a command buffer and initialize it + // Take a command buffer and initialize it. VkCommandBuffer command_buffer = split_draw_list_allocators[i].command_buffers[frame]; VkCommandBufferInheritanceInfo inheritance_info; @@ -7920,7 +8000,7 @@ Error RenderingDeviceVulkan::_draw_list_allocate(const Rect2i &p_viewport, uint3 inheritance_info.subpass = p_subpass; inheritance_info.framebuffer = draw_list_vkframebuffer; inheritance_info.occlusionQueryEnable = false; - inheritance_info.queryFlags = 0; //? + inheritance_info.queryFlags = 0; // ? inheritance_info.pipelineStatistics = 0; VkCommandBufferBeginInfo cmdbuf_begin; @@ -7953,7 +8033,7 @@ Error RenderingDeviceVulkan::_draw_list_allocate(const Rect2i &p_viewport, uint3 void RenderingDeviceVulkan::_draw_list_free(Rect2i *r_last_viewport) { if (draw_list_split) { - //send all command buffers + // Send all command buffers. VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * draw_list_count); for (uint32_t i = 0; i < draw_list_count; i++) { vkEndCommandBuffer(draw_list[i].command_buffer); @@ -7973,12 +8053,12 @@ void RenderingDeviceVulkan::_draw_list_free(Rect2i *r_last_viewport) { if (r_last_viewport) { *r_last_viewport = draw_list->viewport; } - //just end the list + // Just end the list. memdelete(draw_list); draw_list = nullptr; } - // draw_list is no longer active + // Draw_list is no longer active. _THREAD_SAFE_UNLOCK_ } @@ -7993,7 +8073,7 @@ void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) { for (int i = 0; i < draw_list_bound_textures.size(); i++) { Texture *texture = texture_owner.get_or_null(draw_list_bound_textures[i]); - ERR_CONTINUE(!texture); //wtf + ERR_CONTINUE(!texture); // Wtf. if (draw_list_unbind_color_textures && (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) { texture->bound = false; } @@ -8065,8 +8145,8 @@ void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) { draw_list_storage_textures.clear(); // To ensure proper synchronization, we must make sure rendering is done before: - // * Some buffer is copied - // * Another render pass happens (since we may be done) + // * Some buffer is copied. + // * Another render pass happens (since we may be done). VkMemoryBarrier mem_barrier; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; @@ -8091,7 +8171,7 @@ RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin(bool p_ ERR_FAIL_COND_V_MSG(!p_allow_draw_overlap && draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time."); ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time."); - // Lock while compute_list is active + // Lock while compute_list is active. _THREAD_SAFE_LOCK_ compute_list = memnew(ComputeList); @@ -8111,7 +8191,7 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l ERR_FAIL_COND(!pipeline); if (p_compute_pipeline == cl->state.pipeline) { - return; //redundant state, return. + return; // Redundant state, return. } cl->state.pipeline = p_compute_pipeline; @@ -8120,17 +8200,17 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l vkCmdBindPipeline(cl->command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline->pipeline); if (cl->state.pipeline_shader != pipeline->shader) { - // shader changed, so descriptor sets may become incompatible. + // Shader changed, so descriptor sets may become incompatible. - //go through ALL sets, and unbind them (and all those above) if the format is different + // Go through ALL sets, and unbind them (and all those above) if the format is different. - uint32_t pcount = pipeline->set_formats.size(); //formats count in this pipeline + uint32_t pcount = pipeline->set_formats.size(); // Formats count in this pipeline. cl->state.set_count = MAX(cl->state.set_count, pcount); - const uint32_t *pformats = pipeline->set_formats.ptr(); //pipeline set formats + const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats. - bool sets_valid = true; //once invalid, all above become invalid + bool sets_valid = true; // Once invalid, all above become invalid. for (uint32_t i = 0; i < pcount; i++) { - //if a part of the format is different, invalidate it (and the rest) + // If a part of the format is different, invalidate it (and the rest). if (!sets_valid || cl->state.sets[i].pipeline_expected_format != pformats[i]) { cl->state.sets[i].bound = false; cl->state.sets[i].pipeline_expected_format = pformats[i]; @@ -8139,11 +8219,11 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l } for (uint32_t i = pcount; i < cl->state.set_count; i++) { - //unbind the ones above (not used) if exist + // Unbind the ones above (not used) if exist. cl->state.sets[i].bound = false; } - cl->state.set_count = pcount; //update set count + cl->state.set_count = pcount; // Update set count. if (pipeline->push_constant_size) { cl->state.pipeline_push_constant_stages = pipeline->push_constant_stages; @@ -8159,7 +8239,7 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l } #ifdef DEBUG_ENABLED - //update compute pass pipeline info + // Update compute pass pipeline info. cl->validation.pipeline_active = true; cl->validation.pipeline_push_constant_size = pipeline->push_constant_size; #endif @@ -8187,8 +8267,8 @@ void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, cl->state.set_count = p_index; } - cl->state.sets[p_index].descriptor_set = uniform_set->descriptor_set; //update set pointer - cl->state.sets[p_index].bound = false; //needs rebind + cl->state.sets[p_index].descriptor_set = uniform_set->descriptor_set; // Update set pointer. + cl->state.sets[p_index].bound = false; // Needs rebind. cl->state.sets[p_index].uniform_set_format = uniform_set->format; cl->state.sets[p_index].uniform_set = p_uniform_set; @@ -8291,7 +8371,7 @@ void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, textures_to_storage[i]->layout = VK_IMAGE_LAYOUT_GENERAL; - cl->state.textures_to_sampled_layout.insert(textures_to_storage[i]); //needs to go back to sampled layout afterwards + cl->state.textures_to_sampled_layout.insert(textures_to_storage[i]); // Needs to go back to sampled layout afterwards. } } @@ -8304,7 +8384,7 @@ void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, } #if 0 - { //validate that textures bound are not attached as framebuffer bindings + { // Validate that textures bound are not attached as framebuffer bindings. uint32_t attachable_count = uniform_set->attachable_textures.size(); const RID *attachable_ptr = uniform_set->attachable_textures.ptr(); uint32_t bound_count = draw_list_bound_textures.size(); @@ -8364,18 +8444,18 @@ void RenderingDeviceVulkan::compute_list_dispatch(ComputeListID p_list, uint32_t ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw."); if (cl->validation.pipeline_push_constant_size > 0) { - //using push constants, check that they were supplied + // Using push constants, check that they were supplied. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied, "The shader in this pipeline requires a push constant to be set before drawing, but it's not present."); } #endif - //Bind descriptor sets + // Bind descriptor sets. for (uint32_t i = 0; i < cl->state.set_count; i++) { if (cl->state.sets[i].pipeline_expected_format == 0) { - continue; //nothing expected by this pipeline + continue; // Nothing expected by this pipeline. } #ifdef DEBUG_ENABLED if (cl->state.sets[i].pipeline_expected_format != cl->state.sets[i].uniform_set_format) { @@ -8390,7 +8470,7 @@ void RenderingDeviceVulkan::compute_list_dispatch(ComputeListID p_list, uint32_t } #endif if (!cl->state.sets[i].bound) { - //All good, see if this requires re-binding + // All good, see if this requires re-binding. vkCmdBindDescriptorSets(cl->command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, cl->state.pipeline_layout, i, 1, &cl->state.sets[i].descriptor_set, 0, nullptr); cl->state.sets[i].bound = true; } @@ -8416,7 +8496,7 @@ void RenderingDeviceVulkan::compute_list_dispatch_threads(ComputeListID p_list, ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw."); if (cl->validation.pipeline_push_constant_size > 0) { - //using push constants, check that they were supplied + // Using push constants, check that they were supplied. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied, "The shader in this pipeline requires a push constant to be set before drawing, but it's not present."); } @@ -8447,18 +8527,18 @@ void RenderingDeviceVulkan::compute_list_dispatch_indirect(ComputeListID p_list, ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw."); if (cl->validation.pipeline_push_constant_size > 0) { - //using push constants, check that they were supplied + // Using push constants, check that they were supplied. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied, "The shader in this pipeline requires a push constant to be set before drawing, but it's not present."); } #endif - //Bind descriptor sets + // Bind descriptor sets. for (uint32_t i = 0; i < cl->state.set_count; i++) { if (cl->state.sets[i].pipeline_expected_format == 0) { - continue; //nothing expected by this pipeline + continue; // Nothing expected by this pipeline. } #ifdef DEBUG_ENABLED if (cl->state.sets[i].pipeline_expected_format != cl->state.sets[i].uniform_set_format) { @@ -8473,7 +8553,7 @@ void RenderingDeviceVulkan::compute_list_dispatch_indirect(ComputeListID p_list, } #endif if (!cl->state.sets[i].bound) { - //All good, see if this requires re-binding + // All good, see if this requires re-binding. vkCmdBindDescriptorSets(cl->command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, cl->state.pipeline_layout, i, 1, &cl->state.sets[i].descriptor_set, 0, nullptr); cl->state.sets[i].bound = true; } @@ -8567,7 +8647,7 @@ void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) { memdelete(compute_list); compute_list = nullptr; - // compute_list is no longer active + // Compute_list is no longer active. _THREAD_SAFE_UNLOCK_ } @@ -8660,7 +8740,7 @@ void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_frame "Draw list index (" + itos(i) + ") is created with a framebuffer format incompatible with this render pass."); if (dl->validation.active) { - //needs to be closed, so close it. + // Needs to be closed, so close it. vkEndCommandBuffer(dl->command_buffer); dl->validation.active = false; } @@ -8677,7 +8757,15 @@ void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_frame #endif void RenderingDeviceVulkan::_free_internal(RID p_id) { - //push everything so it's disposed of next time this frame index is processed (means, it's safe to do it) +#ifdef DEV_ENABLED + String resource_name; + if (resource_names.has(p_id)) { + resource_name = resource_names[p_id]; + resource_names.erase(p_id); + } +#endif + + // Push everything so it's disposed of next time this frame index is processed (means, it's safe to do it). if (texture_owner.owns(p_id)) { Texture *texture = texture_owner.get_or_null(p_id); frames[frame].textures_to_dispose_of.push_back(*texture); @@ -8685,6 +8773,11 @@ void RenderingDeviceVulkan::_free_internal(RID p_id) { } else if (framebuffer_owner.owns(p_id)) { Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_id); frames[frame].framebuffers_to_dispose_of.push_back(*framebuffer); + + if (framebuffer->invalidated_callback != nullptr) { + framebuffer->invalidated_callback(framebuffer->invalidated_callback_userdata); + } + framebuffer_owner.free(p_id); } else if (sampler_owner.owns(p_id)) { VkSampler *sampler = sampler_owner.get_or_null(p_id); @@ -8741,30 +8834,34 @@ void RenderingDeviceVulkan::_free_internal(RID p_id) { frames[frame].compute_pipelines_to_dispose_of.push_back(*pipeline); compute_pipeline_owner.free(p_id); } else { +#ifdef DEV_ENABLED + ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()) + " " + resource_name); +#else ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id())); +#endif } } void RenderingDeviceVulkan::free(RID p_id) { _THREAD_SAFE_METHOD_ - _free_dependencies(p_id); //recursively erase dependencies first, to avoid potential API problems + _free_dependencies(p_id); // Recursively erase dependencies first, to avoid potential API problems. _free_internal(p_id); } -// The full list of resources that can be named is in the VkObjectType enum +// The full list of resources that can be named is in the VkObjectType enum. // We just expose the resources that are owned and can be accessed easily. void RenderingDeviceVulkan::set_resource_name(RID p_id, const String p_name) { if (texture_owner.owns(p_id)) { Texture *texture = texture_owner.get_or_null(p_id); if (texture->owner.is_null()) { - // Don't set the source texture's name when calling on a texture view + // Don't set the source texture's name when calling on a texture view. context->set_object_name(VK_OBJECT_TYPE_IMAGE, uint64_t(texture->image), p_name); } context->set_object_name(VK_OBJECT_TYPE_IMAGE_VIEW, uint64_t(texture->view), p_name + " View"); } else if (framebuffer_owner.owns(p_id)) { //Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_id); - // Not implemented for now as the relationship between Framebuffer and RenderPass is very complex + // Not implemented for now as the relationship between Framebuffer and RenderPass is very complex. } else if (sampler_owner.owns(p_id)) { VkSampler *sampler = sampler_owner.get_or_null(p_id); context->set_object_name(VK_OBJECT_TYPE_SAMPLER, uint64_t(*sampler), p_name); @@ -8803,7 +8900,11 @@ void RenderingDeviceVulkan::set_resource_name(RID p_id, const String p_name) { context->set_object_name(VK_OBJECT_TYPE_PIPELINE_LAYOUT, uint64_t(pipeline->pipeline_layout), p_name + " Layout"); } else { ERR_PRINT("Attempted to name invalid ID: " + itos(p_id.get_id())); + return; } +#ifdef DEV_ENABLED + resource_names[p_id] = p_name; +#endif } void RenderingDeviceVulkan::draw_command_begin_label(String p_label_name, const Color p_color) { @@ -8847,17 +8948,17 @@ void RenderingDeviceVulkan::_finalize_command_bufers() { ERR_PRINT("Found open compute list at the end of the frame, this should never happen (further compute will likely not work)."); } - { //complete the setup buffer (that needs to be processed before anything else) + { // Complete the setup buffer (that needs to be processed before anything else). vkEndCommandBuffer(frames[frame].setup_command_buffer); vkEndCommandBuffer(frames[frame].draw_command_buffer); } } void RenderingDeviceVulkan::_begin_frame() { - //erase pending resources + // Erase pending resources. _free_pending_resources(frame); - //create setup command buffer and set as the setup buffer + // Create setup command buffer and set as the setup buffer. { VkCommandBufferBeginInfo cmdbuf_begin; @@ -8876,13 +8977,13 @@ void RenderingDeviceVulkan::_begin_frame() { if (local_device.is_null()) { context->append_command_buffer(frames[frame].draw_command_buffer); - context->set_setup_buffer(frames[frame].setup_command_buffer); //append now so it's added before everything else + context->set_setup_buffer(frames[frame].setup_command_buffer); // Append now so it's added before everything else. } } - //advance current frame + // Advance current frame. frames_drawn++; - //advance staging buffer if used + // Advance staging buffer if used. if (staging_buffer_used) { staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size(); staging_buffer_used = false; @@ -8900,6 +9001,25 @@ void RenderingDeviceVulkan::_begin_frame() { frames[frame].index = Engine::get_singleton()->get_frames_drawn(); } +VkSampleCountFlagBits RenderingDeviceVulkan::_ensure_supported_sample_count(TextureSamples p_requested_sample_count) const { + VkSampleCountFlags sample_count_flags = limits.framebufferColorSampleCounts & limits.framebufferDepthSampleCounts; + + if (sample_count_flags & rasterization_sample_count[p_requested_sample_count]) { + // The requested sample count is supported. + return rasterization_sample_count[p_requested_sample_count]; + } else { + // Find the closest lower supported sample count. + VkSampleCountFlagBits sample_count = rasterization_sample_count[p_requested_sample_count]; + while (sample_count > VK_SAMPLE_COUNT_1_BIT) { + if (sample_count_flags & rasterization_sample_count[sample_count]) { + return sample_count; + } + sample_count = (VkSampleCountFlagBits)(sample_count >> 1); + } + } + return VK_SAMPLE_COUNT_1_BIT; +} + void RenderingDeviceVulkan::swap_buffers() { ERR_FAIL_COND_MSG(local_device.is_valid(), "Local devices can't swap buffers."); _THREAD_SAFE_METHOD_ @@ -8907,7 +9027,7 @@ void RenderingDeviceVulkan::swap_buffers() { _finalize_command_bufers(); screen_prepared = false; - //swap buffers + // Swap buffers. context->swap_buffers(); frame = (frame + 1) % frame_count; @@ -8953,15 +9073,15 @@ VmaPool RenderingDeviceVulkan::_find_or_create_small_allocs_pool(uint32_t p_mem_ pci.pMemoryAllocateNext = nullptr; VmaPool pool = VK_NULL_HANDLE; VkResult res = vmaCreatePool(allocator, &pci, &pool); - small_allocs_pools[p_mem_type_index] = pool; // Don't try to create it again if failed the first time + small_allocs_pools[p_mem_type_index] = pool; // Don't try to create it again if failed the first time. ERR_FAIL_COND_V_MSG(res, pool, "vmaCreatePool failed with error " + itos(res) + "."); return pool; } void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { - //free in dependency usage order, so nothing weird happens - //pipelines + // Free in dependency usage order, so nothing weird happens. + // Pipelines. while (frames[p_frame].render_pipelines_to_dispose_of.front()) { RenderPipeline *pipeline = &frames[p_frame].render_pipelines_to_dispose_of.front()->get(); @@ -8978,7 +9098,7 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { frames[p_frame].compute_pipelines_to_dispose_of.pop_front(); } - //uniform sets + // Uniform sets. while (frames[p_frame].uniform_sets_to_dispose_of.front()) { UniformSet *uniform_set = &frames[p_frame].uniform_sets_to_dispose_of.front()->get(); @@ -8988,7 +9108,7 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { frames[p_frame].uniform_sets_to_dispose_of.pop_front(); } - //buffer views + // Buffer views. while (frames[p_frame].buffer_views_to_dispose_of.front()) { VkBufferView buffer_view = frames[p_frame].buffer_views_to_dispose_of.front()->get(); @@ -8997,19 +9117,19 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { frames[p_frame].buffer_views_to_dispose_of.pop_front(); } - //shaders + // Shaders. while (frames[p_frame].shaders_to_dispose_of.front()) { Shader *shader = &frames[p_frame].shaders_to_dispose_of.front()->get(); - //descriptor set layout for each set + // Descriptor set layout for each set. for (int i = 0; i < shader->sets.size(); i++) { vkDestroyDescriptorSetLayout(device, shader->sets[i].descriptor_set_layout, nullptr); } - //pipeline layout + // Pipeline layout. vkDestroyPipelineLayout(device, shader->pipeline_layout, nullptr); - //shaders themselves + // Shaders themselves. for (int i = 0; i < shader->pipeline_stages.size(); i++) { vkDestroyShaderModule(device, shader->pipeline_stages[i].module, nullptr); } @@ -9017,7 +9137,7 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { frames[p_frame].shaders_to_dispose_of.pop_front(); } - //samplers + // Samplers. while (frames[p_frame].samplers_to_dispose_of.front()) { VkSampler sampler = frames[p_frame].samplers_to_dispose_of.front()->get(); @@ -9026,12 +9146,12 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { frames[p_frame].samplers_to_dispose_of.pop_front(); } - //framebuffers + // Framebuffers. while (frames[p_frame].framebuffers_to_dispose_of.front()) { Framebuffer *framebuffer = &frames[p_frame].framebuffers_to_dispose_of.front()->get(); for (const KeyValue<Framebuffer::VersionKey, Framebuffer::Version> &E : framebuffer->framebuffers) { - //first framebuffer, then render pass because it depends on it + // First framebuffer, then render pass because it depends on it. vkDestroyFramebuffer(device, E.value.framebuffer, nullptr); vkDestroyRenderPass(device, E.value.render_pass, nullptr); } @@ -9039,7 +9159,7 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { frames[p_frame].framebuffers_to_dispose_of.pop_front(); } - //textures + // Textures. while (frames[p_frame].textures_to_dispose_of.front()) { Texture *texture = &frames[p_frame].textures_to_dispose_of.front()->get(); @@ -9048,14 +9168,14 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { } vkDestroyImageView(device, texture->view, nullptr); if (texture->owner.is_null()) { - //actually owns the image and the allocation too + // Actually owns the image and the allocation too. image_memory -= texture->allocation_info.size; vmaDestroyImage(allocator, texture->image, texture->allocation); } frames[p_frame].textures_to_dispose_of.pop_front(); } - //buffers + // Buffers. while (frames[p_frame].buffers_to_dispose_of.front()) { _buffer_free(&frames[p_frame].buffers_to_dispose_of.front()->get()); @@ -9087,9 +9207,9 @@ uint64_t RenderingDeviceVulkan::get_memory_usage(MemoryType p_type) const { void RenderingDeviceVulkan::_flush(bool p_current_frame) { if (local_device.is_valid() && !p_current_frame) { - return; //flushing previous frames has no effect with local device + return; // Flushing previous frames has no effect with local device. } - //not doing this crashes RADV (undefined behavior) + // Not doing this crashes RADV (undefined behavior). if (p_current_frame) { vkEndCommandBuffer(frames[frame].setup_command_buffer); vkEndCommandBuffer(frames[frame].draw_command_buffer); @@ -9113,7 +9233,7 @@ void RenderingDeviceVulkan::_flush(bool p_current_frame) { } else { context->flush(p_current_frame, p_current_frame); - //re-create the setup command + // Re-create the setup command. if (p_current_frame) { VkCommandBufferBeginInfo cmdbuf_begin; cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; @@ -9123,7 +9243,7 @@ void RenderingDeviceVulkan::_flush(bool p_current_frame) { VkResult err = vkBeginCommandBuffer(frames[frame].setup_command_buffer, &cmdbuf_begin); ERR_FAIL_COND_MSG(err, "vkBeginCommandBuffer failed with error " + itos(err) + "."); - context->set_setup_buffer(frames[frame].setup_command_buffer); //append now so it's added before everything else + context->set_setup_buffer(frames[frame].setup_command_buffer); // Append now so it's added before everything else. } if (p_current_frame) { @@ -9141,7 +9261,7 @@ void RenderingDeviceVulkan::_flush(bool p_current_frame) { } void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_device) { - // get our device capabilities + // Get our device capabilities. { device_capabilities.version_major = p_context->get_vulkan_major(); device_capabilities.version_minor = p_context->get_vulkan_minor(); @@ -9154,12 +9274,12 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de local_device = p_context->local_device_create(); device = p_context->local_device_get_vk_device(local_device); } else { - frame_count = p_context->get_swapchain_image_count() + 1; //always need one extra to ensure it's unused at any time, without having to use a fence for this. + frame_count = p_context->get_swapchain_image_count() + 1; // Always need one extra to ensure it's unused at any time, without having to use a fence for this. } limits = p_context->get_device_limits(); max_timestamp_query_elements = 256; - { //initialize allocator + { // Initialize allocator. VmaAllocatorCreateInfo allocatorInfo; memset(&allocatorInfo, 0, sizeof(VmaAllocatorCreateInfo)); @@ -9171,11 +9291,11 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de frames.resize(frame_count); frame = 0; - //create setup and frame buffers + // Create setup and frame buffers. for (int i = 0; i < frame_count; i++) { frames[i].index = 0; - { //create command pool, one per frame is recommended + { // Create command pool, one per frame is recommended. VkCommandPoolCreateInfo cmd_pool_info; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.pNext = nullptr; @@ -9186,10 +9306,10 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de ERR_FAIL_COND_MSG(res, "vkCreateCommandPool failed with error " + itos(res) + "."); } - { //create command buffers + { // Create command buffers. VkCommandBufferAllocateInfo cmdbuf; - //no command buffer exists, create it. + // No command buffer exists, create it. cmdbuf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmdbuf.pNext = nullptr; cmdbuf.commandPool = frames[i].command_pool; @@ -9204,7 +9324,7 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de } { - //create query pool + // Create query pool. VkQueryPoolCreateInfo query_pool_create_info; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.flags = 0; @@ -9226,8 +9346,8 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de } { - //begin the first command buffer for the first frame, so - //setting up things can be done in the meantime until swap_buffers(), which is called before advance. + // Begin the first command buffer for the first frame, so + // setting up things can be done in the meantime until swap_buffers(), which is called before advance. VkCommandBufferBeginInfo cmdbuf_begin; cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmdbuf_begin.pNext = nullptr; @@ -9240,42 +9360,42 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de err = vkBeginCommandBuffer(frames[0].draw_command_buffer, &cmdbuf_begin); ERR_FAIL_COND_MSG(err, "vkBeginCommandBuffer failed with error " + itos(err) + "."); if (local_device.is_null()) { - context->set_setup_buffer(frames[0].setup_command_buffer); //append now so it's added before everything else + context->set_setup_buffer(frames[0].setup_command_buffer); // Append now so it's added before everything else. context->append_command_buffer(frames[0].draw_command_buffer); } } - // Note: If adding new project settings here, also duplicate their definition in + // NOTE: If adding new project settings here, also duplicate their definition in // rendering_server.cpp for headless doctool. staging_buffer_block_size = GLOBAL_DEF("rendering/vulkan/staging_buffer/block_size_kb", 256); staging_buffer_block_size = MAX(4u, staging_buffer_block_size); - staging_buffer_block_size *= 1024; //kb -> bytes + staging_buffer_block_size *= 1024; // Kb -> bytes. staging_buffer_max_size = GLOBAL_DEF("rendering/vulkan/staging_buffer/max_size_mb", 128); staging_buffer_max_size = MAX(1u, staging_buffer_max_size); staging_buffer_max_size *= 1024 * 1024; if (staging_buffer_max_size < staging_buffer_block_size * 4) { - //validate enough blocks + // Validate enough blocks. staging_buffer_max_size = staging_buffer_block_size * 4; } texture_upload_region_size_px = GLOBAL_DEF("rendering/vulkan/staging_buffer/texture_upload_region_size_px", 64); texture_upload_region_size_px = nearest_power_of_2_templated(texture_upload_region_size_px); - frames_drawn = frame_count; //start from frame count, so everything else is immediately old + frames_drawn = frame_count; // Start from frame count, so everything else is immediately old. - //ensure current staging block is valid and at least one per frame exists + // Ensure current staging block is valid and at least one per frame exists. staging_buffer_current = 0; staging_buffer_used = false; for (int i = 0; i < frame_count; i++) { - //staging was never used, create a block + // Staging was never used, create a block. Error err = _insert_staging_block(); ERR_CONTINUE(err != OK); } max_descriptors_per_pool = GLOBAL_DEF("rendering/vulkan/descriptor_pools/max_descriptors_per_pool", 64); - //check to make sure DescriptorPoolKey is good + // Check to make sure DescriptorPoolKey is good. static_assert(sizeof(uint64_t) * 3 >= UNIFORM_TYPE_MAX * sizeof(uint16_t)); draw_list = nullptr; @@ -9296,6 +9416,11 @@ void RenderingDeviceVulkan::_free_rids(T &p_owner, const char *p_type) { WARN_PRINT(vformat("%d RIDs of type \"%s\" were leaked.", owned.size(), p_type)); } for (const RID &E : owned) { +#ifdef DEV_ENABLED + if (resource_names.has(E)) { + print_line(String(" - ") + resource_names[E]); + } +#endif free(E); } } @@ -9305,7 +9430,7 @@ void RenderingDeviceVulkan::capture_timestamp(const String &p_name) { ERR_FAIL_COND_MSG(draw_list != nullptr, "Capturing timestamps during draw list creation is not allowed. Offending timestamp was: " + p_name); ERR_FAIL_COND(frames[frame].timestamp_count >= max_timestamp_query_elements); - //this should be optional for profiling, else it will slow things down + // This should be optional for profiling, else it will slow things down. { VkMemoryBarrier memoryBarrier; @@ -9431,7 +9556,7 @@ uint64_t RenderingDeviceVulkan::get_driver_resource(DriverResource p_resource, R return uint64_t(render_pipeline->pipeline); } break; default: { - // not supported for this driver + // Not supported for this driver. return 0; } break; } @@ -9468,9 +9593,9 @@ static void mult64to128(uint64_t u, uint64_t v, uint64_t &h, uint64_t &l) { uint64_t RenderingDeviceVulkan::get_captured_timestamp_gpu_time(uint32_t p_index) const { ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0); - // this sucks because timestampPeriod multiplier is a float, while the timestamp is 64 bits nanosecs. - // so, in cases like nvidia which give you enormous numbers and 1 as multiplier, multiplying is next to impossible - // need to do 128 bits fixed point multiplication to get the right value + // This sucks because timestampPeriod multiplier is a float, while the timestamp is 64 bits nanosecs. + // So, in cases like nvidia which give you enormous numbers and 1 as multiplier, multiplying is next to impossible. + // Need to do 128 bits fixed point multiplication to get the right value. uint64_t shift_bits = 16; @@ -9583,7 +9708,7 @@ uint64_t RenderingDeviceVulkan::limit_get(Limit p_limit) const { } void RenderingDeviceVulkan::finalize() { - //free all resources + // Free all resources. _flush(false); @@ -9601,7 +9726,7 @@ void RenderingDeviceVulkan::finalize() { _free_rids(framebuffer_owner, "Framebuffer"); _free_rids(sampler_owner, "Sampler"); { - //for textures it's a bit more difficult because they may be shared + // For textures it's a bit more difficult because they may be shared. List<RID> owned; texture_owner.get_owned_list(&owned); if (owned.size()) { @@ -9610,23 +9735,33 @@ void RenderingDeviceVulkan::finalize() { } else { WARN_PRINT(vformat("%d RIDs of type \"Texture\" were leaked.", owned.size())); } - //free shared first + // Free shared first. for (List<RID>::Element *E = owned.front(); E;) { List<RID>::Element *N = E->next(); if (texture_is_shared(E->get())) { +#ifdef DEV_ENABLED + if (resource_names.has(E->get())) { + print_line(String(" - ") + resource_names[E->get()]); + } +#endif free(E->get()); owned.erase(E); } E = N; } - //free non shared second, this will avoid an error trying to free unexisting textures due to dependencies. + // Free non shared second, this will avoid an error trying to free unexisting textures due to dependencies. for (const RID &E : owned) { +#ifdef DEV_ENABLED + if (resource_names.has(E)) { + print_line(String(" - ") + resource_names[E]); + } +#endif free(E); } } } - //free everything pending + // Free everything pending. for (int i = 0; i < frame_count; i++) { int f = (frame + i) % frame_count; _free_pending_resources(f); @@ -9662,7 +9797,7 @@ void RenderingDeviceVulkan::finalize() { } framebuffer_formats.clear(); - //all these should be clear at this point + // All these should be clear at this point. ERR_FAIL_COND(descriptor_pools.size()); ERR_FAIL_COND(dependency_map.size()); ERR_FAIL_COND(reverse_dependency_map.size()); diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h index 7c8021251f..abec1b0e1b 100644 --- a/drivers/vulkan/rendering_device_vulkan.h +++ b/drivers/vulkan/rendering_device_vulkan.h @@ -96,13 +96,13 @@ class RenderingDeviceVulkan : public RenderingDevice { ID_TYPE_SPLIT_DRAW_LIST, ID_TYPE_COMPUTE_LIST, ID_TYPE_MAX, - ID_BASE_SHIFT = 58 //5 bits for ID types + ID_BASE_SHIFT = 58 // 5 bits for ID types. }; VkDevice device = VK_NULL_HANDLE; - HashMap<RID, HashSet<RID>> dependency_map; //IDs to IDs that depend on it - HashMap<RID, HashSet<RID>> reverse_dependency_map; //same as above, but in reverse + HashMap<RID, HashSet<RID>> dependency_map; // IDs to IDs that depend on it. + HashMap<RID, HashSet<RID>> reverse_dependency_map; // Same as above, but in reverse. void _add_dependency(RID p_id, RID p_depends_on); void _free_dependencies(RID p_id); @@ -150,9 +150,11 @@ class RenderingDeviceVulkan : public RenderingDevice { bool used_in_raster = false; bool used_in_compute = false; + bool is_resolve_buffer = false; + uint32_t read_aspect_mask = 0; uint32_t barrier_aspect_mask = 0; - bool bound = false; //bound to framebffer + bool bound = false; // Bound to framebffer. RID owner; }; @@ -214,7 +216,7 @@ class RenderingDeviceVulkan : public RenderingDevice { uint32_t usage = 0; VkBuffer buffer = VK_NULL_HANDLE; VmaAllocation allocation = nullptr; - VkDescriptorBufferInfo buffer_info; //used for binding + VkDescriptorBufferInfo buffer_info; // Used for binding. Buffer() { } }; @@ -256,7 +258,7 @@ class RenderingDeviceVulkan : public RenderingDevice { const FramebufferPass *key_pass_ptr = p_key.passes.ptr(); for (uint32_t i = 0; i < pass_size; i++) { - { //compare color attachments + { // Compare color attachments. uint32_t attachment_size = pass_ptr[i].color_attachments.size(); uint32_t key_attachment_size = key_pass_ptr[i].color_attachments.size(); if (attachment_size != key_attachment_size) { @@ -271,7 +273,7 @@ class RenderingDeviceVulkan : public RenderingDevice { } } } - { //compare input attachments + { // Compare input attachments. uint32_t attachment_size = pass_ptr[i].input_attachments.size(); uint32_t key_attachment_size = key_pass_ptr[i].input_attachments.size(); if (attachment_size != key_attachment_size) { @@ -286,7 +288,7 @@ class RenderingDeviceVulkan : public RenderingDevice { } } } - { //compare resolve attachments + { // Compare resolve attachments. uint32_t attachment_size = pass_ptr[i].resolve_attachments.size(); uint32_t key_attachment_size = key_pass_ptr[i].resolve_attachments.size(); if (attachment_size != key_attachment_size) { @@ -301,7 +303,7 @@ class RenderingDeviceVulkan : public RenderingDevice { } } } - { //compare preserve attachments + { // Compare preserve attachments. uint32_t attachment_size = pass_ptr[i].preserve_attachments.size(); uint32_t key_attachment_size = key_pass_ptr[i].preserve_attachments.size(); if (attachment_size != key_attachment_size) { @@ -343,7 +345,7 @@ class RenderingDeviceVulkan : public RenderingDevice { } } - return false; //equal + return false; // Equal. } }; @@ -353,9 +355,9 @@ class RenderingDeviceVulkan : public RenderingDevice { RBMap<FramebufferFormatKey, FramebufferFormatID> framebuffer_format_cache; struct FramebufferFormat { const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E; - VkRenderPass render_pass = VK_NULL_HANDLE; //here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec) + VkRenderPass render_pass = VK_NULL_HANDLE; // Here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec). Vector<TextureSamples> pass_samples; - uint32_t view_count = 1; // number of views + uint32_t view_count = 1; // Number of views. }; HashMap<FramebufferFormatID, FramebufferFormat> framebuffer_formats; @@ -392,10 +394,12 @@ class RenderingDeviceVulkan : public RenderingDevice { uint32_t storage_mask = 0; Vector<RID> texture_ids; + InvalidationCallback invalidated_callback = nullptr; + void *invalidated_callback_userdata = nullptr; struct Version { VkFramebuffer framebuffer = VK_NULL_HANDLE; - VkRenderPass render_pass = VK_NULL_HANDLE; //this one is owned + VkRenderPass render_pass = VK_NULL_HANDLE; // This one is owned. uint32_t subpass_count = 1; }; @@ -452,7 +456,7 @@ class RenderingDeviceVulkan : public RenderingDevice { return false; } } - return true; //they are equal + return true; // They are equal. } } @@ -497,14 +501,14 @@ class RenderingDeviceVulkan : public RenderingDevice { int vertex_count = 0; uint32_t max_instances_allowed = 0; - Vector<VkBuffer> buffers; //not owned, just referenced + Vector<VkBuffer> buffers; // Not owned, just referenced. Vector<VkDeviceSize> offsets; }; RID_Owner<VertexArray, true> vertex_array_owner; struct IndexBuffer : public Buffer { - uint32_t max_index = 0; //used for validation + uint32_t max_index = 0; // Used for validation. uint32_t index_count = 0; VkIndexType index_type = VK_INDEX_TYPE_NONE_NV; bool supports_restart_indices = false; @@ -513,8 +517,8 @@ class RenderingDeviceVulkan : public RenderingDevice { RID_Owner<IndexBuffer, true> index_buffer_owner; struct IndexArray { - uint32_t max_index = 0; //remember the maximum index here too, for validation - VkBuffer buffer; //not owned, inherited from index buffer + uint32_t max_index = 0; // Remember the maximum index here too, for validation. + VkBuffer buffer; // Not owned, inherited from index buffer. uint32_t offset = 0; uint32_t indices = 0; VkIndexType index_type = VK_INDEX_TYPE_NONE_NV; @@ -548,7 +552,7 @@ class RenderingDeviceVulkan : public RenderingDevice { bool writable = false; int binding = 0; uint32_t stages = 0; - int length = 0; //size of arrays (in total elements), or ubos (in bytes * total elements) + int length = 0; // Size of arrays (in total elements), or ubos (in bytes * total elements). bool operator!=(const UniformInfo &p_info) const { return (binding != p_info.binding || type != p_info.type || writable != p_info.writable || stages != p_info.stages || length != p_info.length); @@ -620,7 +624,7 @@ class RenderingDeviceVulkan : public RenderingDevice { VkDescriptorSetLayout descriptor_set_layout = VK_NULL_HANDLE; }; - uint32_t vertex_input_mask = 0; //inputs used, this is mostly for validation + uint32_t vertex_input_mask = 0; // Inputs used, this is mostly for validation. uint32_t fragment_output_mask = 0; struct PushConstant { @@ -643,7 +647,7 @@ class RenderingDeviceVulkan : public RenderingDevice { Vector<VkPipelineShaderStageCreateInfo> pipeline_stages; Vector<SpecializationConstant> specialization_constants; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; - String name; //used for debug + String name; // Used for debug. }; String _shader_uniform_debug(RID p_shader, int p_set = -1); @@ -715,7 +719,7 @@ class RenderingDeviceVulkan : public RenderingDevice { RID_Owner<Buffer, true> uniform_buffer_owner; RID_Owner<Buffer, true> storage_buffer_owner; - //texture buffer needs a view + // Texture buffer needs a view. struct TextureBuffer { Buffer buffer; VkBufferView view = VK_NULL_HANDLE; @@ -738,16 +742,16 @@ class RenderingDeviceVulkan : public RenderingDevice { DescriptorPool *pool = nullptr; DescriptorPoolKey pool_key; VkDescriptorSet descriptor_set = VK_NULL_HANDLE; - //VkPipelineLayout pipeline_layout; //not owned, inherited from shader + //VkPipelineLayout pipeline_layout; // Not owned, inherited from shader. struct AttachableTexture { uint32_t bind; RID texture; }; - LocalVector<AttachableTexture> attachable_textures; //used for validation - Vector<Texture *> mutable_sampled_textures; //used for layout change - Vector<Texture *> mutable_storage_textures; //used for layout change - UniformSetInvalidatedCallback invalidated_callback = nullptr; + LocalVector<AttachableTexture> attachable_textures; // Used for validation. + Vector<Texture *> mutable_sampled_textures; // Used for layout change. + Vector<Texture *> mutable_storage_textures; // Used for layout change. + InvalidationCallback invalidated_callback = nullptr; void *invalidated_callback_userdata = nullptr; }; @@ -769,7 +773,7 @@ class RenderingDeviceVulkan : public RenderingDevice { // was not supplied as intended. struct RenderPipeline { - //Cached values for validation + // Cached values for validation. #ifdef DEBUG_ENABLED struct Validation { FramebufferFormatID framebuffer_format = 0; @@ -781,10 +785,10 @@ class RenderingDeviceVulkan : public RenderingDevice { uint32_t primitive_divisor = 0; } validation; #endif - //Actual pipeline + // Actual pipeline. RID shader; Vector<uint32_t> set_formats; - VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // not owned, needed for push constants + VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // Not owned, needed for push constants. VkPipeline pipeline = VK_NULL_HANDLE; uint32_t push_constant_size = 0; uint32_t push_constant_stages = 0; @@ -795,7 +799,7 @@ class RenderingDeviceVulkan : public RenderingDevice { struct ComputePipeline { RID shader; Vector<uint32_t> set_formats; - VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // not owned, needed for push constants + VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // Not owned, needed for push constants. VkPipeline pipeline = VK_NULL_HANDLE; uint32_t push_constant_size = 0; uint32_t push_constant_stages = 0; @@ -821,7 +825,7 @@ class RenderingDeviceVulkan : public RenderingDevice { struct SplitDrawListAllocator { VkCommandPool command_pool = VK_NULL_HANDLE; - Vector<VkCommandBuffer> command_buffers; //one for each frame + Vector<VkCommandBuffer> command_buffers; // One for each frame. }; Vector<SplitDrawListAllocator> split_draw_list_allocators; @@ -973,7 +977,7 @@ class RenderingDeviceVulkan : public RenderingDevice { // when the frame is cycled. struct Frame { - //list in usage order, from last to free to first to free + // List in usage order, from last to free to first to free. List<Buffer> buffers_to_dispose_of; List<Texture> textures_to_dispose_of; List<Framebuffer> framebuffers_to_dispose_of; @@ -985,8 +989,8 @@ class RenderingDeviceVulkan : public RenderingDevice { List<ComputePipeline> compute_pipelines_to_dispose_of; VkCommandPool command_pool = VK_NULL_HANDLE; - VkCommandBuffer setup_command_buffer = VK_NULL_HANDLE; //used at the beginning of every frame for set-up - VkCommandBuffer draw_command_buffer = VK_NULL_HANDLE; //used at the beginning of every frame for set-up + VkCommandBuffer setup_command_buffer = VK_NULL_HANDLE; // Used at the beginning of every frame for set-up. + VkCommandBuffer draw_command_buffer = VK_NULL_HANDLE; // Used at the beginning of every frame for set-up. struct Timestamp { String description; @@ -1007,9 +1011,9 @@ class RenderingDeviceVulkan : public RenderingDevice { uint32_t max_timestamp_query_elements = 0; - TightLocalVector<Frame> frames; //frames available, for main device they are cycled (usually 3), for local devices only 1 - int frame = 0; //current frame - int frame_count = 0; //total amount of frames + TightLocalVector<Frame> frames; // Frames available, for main device they are cycled (usually 3), for local devices only 1. + int frame = 0; // Current frame. + int frame_count = 0; // Total amount of frames. uint64_t frames_drawn = 0; RID local_device; bool local_device_processing = false; @@ -1036,6 +1040,12 @@ class RenderingDeviceVulkan : public RenderingDevice { void _finalize_command_bufers(); void _begin_frame(); +#ifdef DEV_ENABLED + HashMap<RID, String> resource_names; +#endif + + VkSampleCountFlagBits _ensure_supported_sample_count(TextureSamples p_requested_sample_count) const; + public: virtual RID texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t>> &p_data = Vector<Vector<uint8_t>>()); virtual RID texture_create_shared(const TextureView &p_view, RID p_with_texture); @@ -1059,13 +1069,15 @@ public: /*********************/ virtual FramebufferFormatID framebuffer_format_create(const Vector<AttachmentFormat> &p_format, uint32_t p_view_count = 1); - virtual FramebufferFormatID framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, Vector<FramebufferPass> &p_passes, uint32_t p_view_count = 1); + virtual FramebufferFormatID framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, uint32_t p_view_count = 1); virtual FramebufferFormatID framebuffer_format_create_empty(TextureSamples p_samples = TEXTURE_SAMPLES_1); virtual TextureSamples framebuffer_format_get_texture_samples(FramebufferFormatID p_format, uint32_t p_pass = 0); virtual RID framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1); - virtual RID framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1); + virtual RID framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, const Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1); virtual RID framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples = TEXTURE_SAMPLES_1, FramebufferFormatID p_format_check = INVALID_ID); + virtual bool framebuffer_is_valid(RID p_framebuffer) const; + virtual void framebuffer_set_invalidation_callback(RID p_framebuffer, InvalidationCallback p_callback, void *p_userdata); virtual FramebufferFormatID framebuffer_get_format(RID p_framebuffer); @@ -1081,7 +1093,7 @@ public: virtual RID vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data = Vector<uint8_t>(), bool p_use_as_storage = false); - // Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated + // Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated. virtual VertexFormatID vertex_format_create(const Vector<VertexAttribute> &p_vertex_formats); virtual RID vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers); @@ -1110,9 +1122,9 @@ public: virtual RID uniform_set_create(const Vector<Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set); virtual bool uniform_set_is_valid(RID p_uniform_set); - virtual void uniform_set_set_invalidation_callback(RID p_uniform_set, UniformSetInvalidatedCallback p_callback, void *p_userdata); + virtual void uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata); - virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); //works for any buffer + virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); // Works for any buffer. virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier = BARRIER_MASK_ALL); virtual Vector<uint8_t> buffer_get_data(RID p_buffer); @@ -1147,6 +1159,7 @@ public: virtual DrawListID draw_list_begin(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2(), const Vector<RID> &p_storage_textures = Vector<RID>()); virtual Error draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2(), const Vector<RID> &p_storage_textures = Vector<RID>()); + virtual void draw_list_set_blend_constants(DrawListID p_list, const Color &p_color); virtual void draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline); virtual void draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index); virtual void draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array); @@ -1210,10 +1223,10 @@ public: void initialize(VulkanContext *p_context, bool p_local_device = false); void finalize(); - virtual void swap_buffers(); //for main device + virtual void swap_buffers(); // For main device. - virtual void submit(); //for local device - virtual void sync(); //for local device + virtual void submit(); // For local device. + virtual void sync(); // For local device. virtual uint32_t get_frame_delay() const; diff --git a/drivers/vulkan/vulkan_context.cpp b/drivers/vulkan/vulkan_context.cpp index 0d8a3310fd..b52179b4f3 100644 --- a/drivers/vulkan/vulkan_context.cpp +++ b/drivers/vulkan/vulkan_context.cpp @@ -237,7 +237,7 @@ Error VulkanContext::_get_preferred_validation_layers(uint32_t *count, const cha { "VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation", "VK_LAYER_LUNARG_object_tracker", "VK_LAYER_LUNARG_core_validation", "VK_LAYER_GOOGLE_unique_objects" } }; - // Clear out-arguments + // Clear out-arguments. *count = 0; if (names != nullptr) { *names = nullptr; @@ -441,7 +441,7 @@ String VulkanContext::SubgroupCapabilities::supported_stages_desc() const { res += ", STAGE_MESH_NV"; } - return res.substr(2); // Remove first ", " + return res.substr(2); // Remove first ", ". } uint32_t VulkanContext::SubgroupCapabilities::supported_operations_flags_rd() const { @@ -506,7 +506,7 @@ String VulkanContext::SubgroupCapabilities::supported_operations_desc() const { res += ", FEATURE_PARTITIONED_NV"; } - return res.substr(2); // Remove first ", " + return res.substr(2); // Remove first ", ". } Error VulkanContext::_check_capabilities() { @@ -641,8 +641,8 @@ Error VulkanContext::_check_capabilities() { subgroup_capabilities.supportedStages = subgroupProperties.supportedStages; subgroup_capabilities.supportedOperations = subgroupProperties.supportedOperations; // Note: quadOperationsInAllStages will be true if: - // - supportedStages has VK_SHADER_STAGE_ALL_GRAPHICS + VK_SHADER_STAGE_COMPUTE_BIT - // - supportedOperations has VK_SUBGROUP_FEATURE_QUAD_BIT + // - supportedStages has VK_SHADER_STAGE_ALL_GRAPHICS + VK_SHADER_STAGE_COMPUTE_BIT. + // - supportedOperations has VK_SUBGROUP_FEATURE_QUAD_BIT. subgroup_capabilities.quadOperationsInAllStages = subgroupProperties.quadOperationsInAllStages; if (vrs_capabilities.pipeline_vrs_supported || vrs_capabilities.primitive_vrs_supported || vrs_capabilities.attachment_vrs_supported) { @@ -654,7 +654,7 @@ Error VulkanContext::_check_capabilities() { print_verbose(" Primitive fragment shading rate"); } if (vrs_capabilities.attachment_vrs_supported) { - // TODO expose these somehow to the end user + // TODO expose these somehow to the end user. vrs_capabilities.min_texel_size.x = vrsProperties.minFragmentShadingRateAttachmentTexelSize.width; vrs_capabilities.min_texel_size.y = vrsProperties.minFragmentShadingRateAttachmentTexelSize.height; vrs_capabilities.max_texel_size.x = vrsProperties.maxFragmentShadingRateAttachmentTexelSize.width; @@ -731,7 +731,7 @@ Error VulkanContext::_create_instance() { VkDebugUtilsMessengerCreateInfoEXT dbg_messenger_create_info; VkDebugReportCallbackCreateInfoEXT dbg_report_callback_create_info{}; if (enabled_debug_utils) { - // VK_EXT_debug_utils style + // VK_EXT_debug_utils style. dbg_messenger_create_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; dbg_messenger_create_info.pNext = nullptr; dbg_messenger_create_info.flags = 0; @@ -902,8 +902,8 @@ Error VulkanContext::_create_physical_device(VkSurfaceKHR p_surface) { } } else { // TODO: At least on Linux Laptops integrated GPUs fail with Vulkan in many instances. - // The device should really be a preference, but for now choosing a discrete GPU over the - // integrated one is better than the default. + // The device should really be a preference, but for now choosing a discrete GPU over the + // integrated one is better than the default. int type_selected = -1; print_verbose("Vulkan devices:"); @@ -1175,7 +1175,7 @@ Error VulkanContext::_create_device() { VkPhysicalDeviceFragmentShadingRateFeaturesKHR vrs_features; if (vrs_capabilities.pipeline_vrs_supported || vrs_capabilities.primitive_vrs_supported || vrs_capabilities.attachment_vrs_supported) { - // insert into our chain to enable these features if they are available + // Insert into our chain to enable these features if they are available. vrs_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR; vrs_features.pNext = nextptr; vrs_features.pipelineFragmentShadingRate = vrs_capabilities.pipeline_vrs_supported; @@ -1436,6 +1436,24 @@ bool VulkanContext::_use_validation_layers() { return Engine::get_singleton()->is_validation_layers_enabled(); } +VkExtent2D VulkanContext::_compute_swapchain_extent(const VkSurfaceCapabilitiesKHR &p_surf_capabilities, int *p_window_width, int *p_window_height) const { + // Width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF. + if (p_surf_capabilities.currentExtent.width == 0xFFFFFFFF) { + // If the surface size is undefined, the size is set to the size + // of the images requested, which must fit within the minimum and + // maximum values. + VkExtent2D extent = {}; + extent.width = CLAMP((uint32_t)(*p_window_width), p_surf_capabilities.minImageExtent.width, p_surf_capabilities.maxImageExtent.width); + extent.height = CLAMP((uint32_t)(*p_window_height), p_surf_capabilities.minImageExtent.height, p_surf_capabilities.maxImageExtent.height); + return extent; + } else { + // If the surface size is defined, the swap chain size must match. + *p_window_width = p_surf_capabilities.currentExtent.width; + *p_window_height = p_surf_capabilities.currentExtent.height; + return p_surf_capabilities.currentExtent; + } +} + Error VulkanContext::_window_create(DisplayServer::WindowID p_window_id, DisplayServer::VSyncMode p_vsync_mode, VkSurfaceKHR p_surface, int p_width, int p_height) { ERR_FAIL_COND_V(windows.has(p_window_id), ERR_INVALID_PARAMETER); @@ -1576,32 +1594,7 @@ Error VulkanContext::_update_swap_chain(Window *window) { ERR_FAIL_V(ERR_CANT_CREATE); } - VkExtent2D swapchainExtent; - // Width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF. - if (surfCapabilities.currentExtent.width == 0xFFFFFFFF) { - // If the surface size is undefined, the size is set to the size - // of the images requested, which must fit within the minimum and - // maximum values. - swapchainExtent.width = window->width; - swapchainExtent.height = window->height; - - if (swapchainExtent.width < surfCapabilities.minImageExtent.width) { - swapchainExtent.width = surfCapabilities.minImageExtent.width; - } else if (swapchainExtent.width > surfCapabilities.maxImageExtent.width) { - swapchainExtent.width = surfCapabilities.maxImageExtent.width; - } - - if (swapchainExtent.height < surfCapabilities.minImageExtent.height) { - swapchainExtent.height = surfCapabilities.minImageExtent.height; - } else if (swapchainExtent.height > surfCapabilities.maxImageExtent.height) { - swapchainExtent.height = surfCapabilities.maxImageExtent.height; - } - } else { - // If the surface size is defined, the swap chain size must match. - swapchainExtent = surfCapabilities.currentExtent; - window->width = surfCapabilities.currentExtent.width; - window->height = surfCapabilities.currentExtent.height; - } + VkExtent2D swapchainExtent = _compute_swapchain_extent(surfCapabilities, &window->width, &window->height); if (window->width == 0 || window->height == 0) { free(presentModes); @@ -1611,17 +1604,17 @@ Error VulkanContext::_update_swap_chain(Window *window) { // The FIFO present mode is guaranteed by the spec to be supported // and to have no tearing. It's a great default present mode to use. - // There are times when you may wish to use another present mode. The - // following code shows how to select them, and the comments provide some - // reasons you may wish to use them. + // There are times when you may wish to use another present mode. The + // following code shows how to select them, and the comments provide some + // reasons you may wish to use them. // // It should be noted that Vulkan 1.0 doesn't provide a method for - // synchronizing rendering with the presentation engine's display. There + // synchronizing rendering with the presentation engine's display. There // is a method provided for throttling rendering with the display, but // there are some presentation engines for which this method will not work. // If an application doesn't throttle its rendering, and if it renders much // faster than the refresh rate of the display, this can waste power on - // mobile devices. That is because power is being spent rendering images + // mobile devices. That is because power is being spent rendering images // that may never be seen. // VK_PRESENT_MODE_IMMEDIATE_KHR is for applications that don't care about @@ -1666,7 +1659,22 @@ Error VulkanContext::_update_swap_chain(Window *window) { if (present_mode_available) { window->presentMode = requested_present_mode; } else { - WARN_PRINT("Requested VSync mode is not available!"); + String present_mode_string; + switch (window->vsync_mode) { + case DisplayServer::VSYNC_MAILBOX: + present_mode_string = "Mailbox"; + break; + case DisplayServer::VSYNC_ADAPTIVE: + present_mode_string = "Adaptive"; + break; + case DisplayServer::VSYNC_ENABLED: + present_mode_string = "Enabled"; + break; + case DisplayServer::VSYNC_DISABLED: + present_mode_string = "Disabled"; + break; + } + WARN_PRINT(vformat("The requested V-Sync mode %s is not available. Falling back to V-Sync mode Enabled.", present_mode_string)); window->vsync_mode = DisplayServer::VSYNC_ENABLED; // Set to default. } @@ -1684,7 +1692,7 @@ Error VulkanContext::_update_swap_chain(Window *window) { // If maxImageCount is 0, we can ask for as many images as we want; // otherwise we're limited to maxImageCount. if ((surfCapabilities.maxImageCount > 0) && (desiredNumOfSwapchainImages > surfCapabilities.maxImageCount)) { - // Application must settle for fewer images than desired: + // Application must settle for fewer images than desired. desiredNumOfSwapchainImages = surfCapabilities.maxImageCount; } @@ -2028,14 +2036,14 @@ Error VulkanContext::prepare_buffers() { } do { - // Get the index of the next available swapchain image: + // Get the index of the next available swapchain image. err = fpAcquireNextImageKHR(device, w->swapchain, UINT64_MAX, w->image_acquired_semaphores[frame_index], VK_NULL_HANDLE, &w->current_buffer); if (err == VK_ERROR_OUT_OF_DATE_KHR) { // Swapchain is out of date (e.g. the window was resized) and - // must be recreated: + // must be recreated. print_verbose("Vulkan: Early out of date swapchain, recreating."); // resize_notify(); _update_swap_chain(w); @@ -2068,7 +2076,7 @@ Error VulkanContext::swap_buffers() { #if 0 if (VK_GOOGLE_display_timing_enabled) { // Look at what happened to previous presents, and make appropriate - // adjustments in timing: + // adjustments in timing. DemoUpdateTargetIPD(demo); // Note: a real application would position its geometry to that it's in @@ -2231,7 +2239,7 @@ Error VulkanContext::swap_buffers() { uint64_t curtime = getTimeInNanoseconds(); if (curtime == 0) { // Since we didn't find out the current time, don't give a - // desiredPresentTime: + // desiredPresentTime. ptime.desiredPresentTime = 0; } else { ptime.desiredPresentTime = curtime + (target_IPD >> 1); @@ -2263,7 +2271,7 @@ Error VulkanContext::swap_buffers() { if (err == VK_ERROR_OUT_OF_DATE_KHR) { // Swapchain is out of date (e.g. the window was resized) and - // must be recreated: + // must be recreated. print_verbose("Vulkan: Swapchain is out of date, recreating."); resize_notify(); } else if (err == VK_SUBOPTIMAL_KHR) { @@ -2471,12 +2479,12 @@ String VulkanContext::get_device_pipeline_cache_uuid() const { } DisplayServer::VSyncMode VulkanContext::get_vsync_mode(DisplayServer::WindowID p_window) const { - ERR_FAIL_COND_V_MSG(!windows.has(p_window), DisplayServer::VSYNC_ENABLED, "Could not get VSync mode for window with WindowID " + itos(p_window) + " because it does not exist."); + ERR_FAIL_COND_V_MSG(!windows.has(p_window), DisplayServer::VSYNC_ENABLED, "Could not get V-Sync mode for window with WindowID " + itos(p_window) + " because it does not exist."); return windows[p_window].vsync_mode; } void VulkanContext::set_vsync_mode(DisplayServer::WindowID p_window, DisplayServer::VSyncMode p_mode) { - ERR_FAIL_COND_MSG(!windows.has(p_window), "Could not set VSync mode for window with WindowID " + itos(p_window) + " because it does not exist."); + ERR_FAIL_COND_MSG(!windows.has(p_window), "Could not set V-Sync mode for window with WindowID " + itos(p_window) + " because it does not exist."); windows[p_window].vsync_mode = p_mode; _update_swap_chain(&windows[p_window]); } diff --git a/drivers/vulkan/vulkan_context.h b/drivers/vulkan/vulkan_context.h index 35e7ce7db8..9889cf336b 100644 --- a/drivers/vulkan/vulkan_context.h +++ b/drivers/vulkan/vulkan_context.h @@ -70,9 +70,9 @@ public: }; struct VRSCapabilities { - bool pipeline_vrs_supported; // We can specify our fragment rate on a pipeline level - bool primitive_vrs_supported; // We can specify our fragment rate on each drawcall - bool attachment_vrs_supported; // We can provide a density map attachment on our framebuffer + bool pipeline_vrs_supported; // We can specify our fragment rate on a pipeline level. + bool primitive_vrs_supported; // We can specify our fragment rate on each drawcall. + bool attachment_vrs_supported; // We can provide a density map attachment on our framebuffer. Size2i min_texel_size; Size2i max_texel_size; @@ -107,7 +107,7 @@ private: bool device_initialized = false; bool inst_initialized = false; - // Vulkan 1.0 doesn't return version info so we assume this by default until we know otherwise + // Vulkan 1.0 doesn't return version info so we assume this by default until we know otherwise. uint32_t vulkan_major = 1; uint32_t vulkan_minor = 0; uint32_t vulkan_patch = 0; @@ -266,8 +266,10 @@ protected: Error _get_preferred_validation_layers(uint32_t *count, const char *const **names); + virtual VkExtent2D _compute_swapchain_extent(const VkSurfaceCapabilitiesKHR &p_surf_capabilities, int *p_window_width, int *p_window_height) const; + public: - // Extension calls + // Extension calls. VkResult vkCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass); uint32_t get_vulkan_major() const { return vulkan_major; }; diff --git a/drivers/wasapi/audio_driver_wasapi.cpp b/drivers/wasapi/audio_driver_wasapi.cpp index 3a62850339..fb90b776cf 100644 --- a/drivers/wasapi/audio_driver_wasapi.cpp +++ b/drivers/wasapi/audio_driver_wasapi.cpp @@ -501,11 +501,11 @@ Error AudioDriverWASAPI::init_capture_device(bool reinit) { } Error AudioDriverWASAPI::audio_device_finish(AudioDeviceWASAPI *p_device) { - if (p_device->active) { + if (p_device->active.is_set()) { if (p_device->audio_client) { p_device->audio_client->Stop(); } - p_device->active = false; + p_device->active.clear(); } SAFE_RELEASE(p_device->audio_client) @@ -533,8 +533,7 @@ Error AudioDriverWASAPI::init() { ERR_PRINT("WASAPI: init_render_device error"); } - exit_thread = false; - thread_exited = false; + exit_thread.clear(); thread.start(thread_func, this); @@ -553,8 +552,8 @@ AudioDriver::SpeakerMode AudioDriverWASAPI::get_speaker_mode() const { return get_speaker_mode_by_total_channels(channels); } -Array AudioDriverWASAPI::audio_device_get_list(bool p_capture) { - Array list; +PackedStringArray AudioDriverWASAPI::audio_device_get_list(bool p_capture) { + PackedStringArray list; IMMDeviceCollection *devices = nullptr; IMMDeviceEnumerator *enumerator = nullptr; @@ -563,14 +562,14 @@ Array AudioDriverWASAPI::audio_device_get_list(bool p_capture) { CoInitialize(nullptr); HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator); - ERR_FAIL_COND_V(hr != S_OK, Array()); + ERR_FAIL_COND_V(hr != S_OK, PackedStringArray()); hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices); - ERR_FAIL_COND_V(hr != S_OK, Array()); + ERR_FAIL_COND_V(hr != S_OK, PackedStringArray()); UINT count = 0; hr = devices->GetCount(&count); - ERR_FAIL_COND_V(hr != S_OK, Array()); + ERR_FAIL_COND_V(hr != S_OK, PackedStringArray()); for (ULONG i = 0; i < count; i++) { IMMDevice *device = nullptr; @@ -600,7 +599,7 @@ Array AudioDriverWASAPI::audio_device_get_list(bool p_capture) { return list; } -Array AudioDriverWASAPI::get_device_list() { +PackedStringArray AudioDriverWASAPI::get_device_list() { return audio_device_get_list(false); } @@ -684,7 +683,7 @@ void AudioDriverWASAPI::thread_func(void *p_udata) { uint32_t avail_frames = 0; uint32_t write_ofs = 0; - while (!ad->exit_thread) { + while (!ad->exit_thread.is_set()) { uint32_t read_frames = 0; uint32_t written_frames = 0; @@ -692,7 +691,7 @@ void AudioDriverWASAPI::thread_func(void *p_udata) { ad->lock(); ad->start_counting_ticks(); - if (ad->audio_output.active) { + if (ad->audio_output.active.is_set()) { ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw()); } else { for (int i = 0; i < ad->samples_in.size(); i++) { @@ -758,7 +757,7 @@ void AudioDriverWASAPI::thread_func(void *p_udata) { } } else { ERR_PRINT("WASAPI: Get buffer error"); - ad->exit_thread = true; + ad->exit_thread.set(); } } } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { @@ -807,7 +806,7 @@ void AudioDriverWASAPI::thread_func(void *p_udata) { write_ofs = 0; } - if (ad->audio_input.active) { + if (ad->audio_input.active.is_set()) { UINT32 packet_length = 0; BYTE *data; UINT32 num_frames_available; @@ -886,8 +885,6 @@ void AudioDriverWASAPI::thread_func(void *p_udata) { OS::get_singleton()->delay_usec(1000); } } - - ad->thread_exited = true; } void AudioDriverWASAPI::start() { @@ -896,7 +893,7 @@ void AudioDriverWASAPI::start() { if (hr != S_OK) { ERR_PRINT("WASAPI: Start failed"); } else { - audio_output.active = true; + audio_output.active.set(); } } } @@ -910,7 +907,7 @@ void AudioDriverWASAPI::unlock() { } void AudioDriverWASAPI::finish() { - exit_thread = true; + exit_thread.set(); thread.wait_to_finish(); finish_capture_device(); @@ -924,19 +921,19 @@ Error AudioDriverWASAPI::capture_start() { return err; } - if (audio_input.active) { + if (audio_input.active.is_set()) { return FAILED; } audio_input.audio_client->Start(); - audio_input.active = true; + audio_input.active.set(); return OK; } Error AudioDriverWASAPI::capture_stop() { - if (audio_input.active) { + if (audio_input.active.is_set()) { audio_input.audio_client->Stop(); - audio_input.active = false; + audio_input.active.clear(); return OK; } @@ -950,7 +947,7 @@ void AudioDriverWASAPI::capture_set_device(const String &p_name) { unlock(); } -Array AudioDriverWASAPI::capture_get_device_list() { +PackedStringArray AudioDriverWASAPI::capture_get_device_list() { return audio_device_get_list(true); } diff --git a/drivers/wasapi/audio_driver_wasapi.h b/drivers/wasapi/audio_driver_wasapi.h index 9058077a1f..c30a54c042 100644 --- a/drivers/wasapi/audio_driver_wasapi.h +++ b/drivers/wasapi/audio_driver_wasapi.h @@ -35,6 +35,7 @@ #include "core/os/mutex.h" #include "core/os/thread.h" +#include "core/templates/safe_refcount.h" #include "servers/audio_server.h" #include <audioclient.h> @@ -48,7 +49,7 @@ class AudioDriverWASAPI : public AudioDriver { IAudioClient *audio_client = nullptr; IAudioRenderClient *render_client = nullptr; IAudioCaptureClient *capture_client = nullptr; - bool active = false; + SafeFlag active; WORD format_tag = 0; WORD bits_per_sample = 0; @@ -76,8 +77,7 @@ class AudioDriverWASAPI : public AudioDriver { float real_latency = 0.0; bool using_audio_client_3 = false; - bool thread_exited = false; - mutable bool exit_thread = false; + SafeFlag exit_thread; static _FORCE_INLINE_ void write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample); static _FORCE_INLINE_ int32_t read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i); @@ -91,7 +91,7 @@ class AudioDriverWASAPI : public AudioDriver { Error audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit); Error audio_device_finish(AudioDeviceWASAPI *p_device); - Array audio_device_get_list(bool p_capture); + PackedStringArray audio_device_get_list(bool p_capture); public: virtual const char *get_name() const { @@ -103,7 +103,7 @@ public: virtual int get_mix_rate() const; virtual float get_latency(); virtual SpeakerMode get_speaker_mode() const; - virtual Array get_device_list(); + virtual PackedStringArray get_device_list(); virtual String get_device(); virtual void set_device(String device); virtual void lock(); @@ -112,7 +112,7 @@ public: virtual Error capture_start(); virtual Error capture_stop(); - virtual Array capture_get_device_list(); + virtual PackedStringArray capture_get_device_list(); virtual void capture_set_device(const String &p_name); virtual String capture_get_device(); diff --git a/drivers/windows/dir_access_windows.cpp b/drivers/windows/dir_access_windows.cpp index 881575d245..11fd29c8f5 100644 --- a/drivers/windows/dir_access_windows.cpp +++ b/drivers/windows/dir_access_windows.cpp @@ -157,7 +157,7 @@ Error DirAccessWindows::make_dir(String p_dir) { p_dir = fix_path(p_dir); if (p_dir.is_relative_path()) { - p_dir = current_dir.plus_file(p_dir); + p_dir = current_dir.path_join(p_dir); } p_dir = p_dir.replace("/", "\\"); @@ -213,7 +213,7 @@ bool DirAccessWindows::file_exists(String p_file) { GLOBAL_LOCK_FUNCTION if (!p_file.is_absolute_path()) { - p_file = get_current_dir().plus_file(p_file); + p_file = get_current_dir().path_join(p_file); } p_file = fix_path(p_file); @@ -232,7 +232,7 @@ bool DirAccessWindows::dir_exists(String p_dir) { GLOBAL_LOCK_FUNCTION if (p_dir.is_relative_path()) { - p_dir = get_current_dir().plus_file(p_dir); + p_dir = get_current_dir().path_join(p_dir); } p_dir = fix_path(p_dir); @@ -247,13 +247,13 @@ bool DirAccessWindows::dir_exists(String p_dir) { Error DirAccessWindows::rename(String p_path, String p_new_path) { if (p_path.is_relative_path()) { - p_path = get_current_dir().plus_file(p_path); + p_path = get_current_dir().path_join(p_path); } p_path = fix_path(p_path); if (p_new_path.is_relative_path()) { - p_new_path = get_current_dir().plus_file(p_new_path); + p_new_path = get_current_dir().path_join(p_new_path); } p_new_path = fix_path(p_new_path); @@ -291,7 +291,7 @@ Error DirAccessWindows::rename(String p_path, String p_new_path) { Error DirAccessWindows::remove(String p_path) { if (p_path.is_relative_path()) { - p_path = get_current_dir().plus_file(p_path); + p_path = get_current_dir().path_join(p_path); } p_path = fix_path(p_path); @@ -402,6 +402,8 @@ DirAccessWindows::DirAccessWindows() { } DirAccessWindows::~DirAccessWindows() { + list_dir_end(); + memdelete(p); } diff --git a/drivers/windows/dir_access_windows.h b/drivers/windows/dir_access_windows.h index fbb07ddef8..c2835b3347 100644 --- a/drivers/windows/dir_access_windows.h +++ b/drivers/windows/dir_access_windows.h @@ -54,33 +54,33 @@ class DirAccessWindows : public DirAccess { bool _cishidden = false; public: - virtual Error list_dir_begin(); ///< This starts dir listing - virtual String get_next(); - virtual bool current_is_dir() const; - virtual bool current_is_hidden() const; - virtual void list_dir_end(); ///< + virtual Error list_dir_begin() override; ///< This starts dir listing + virtual String get_next() override; + virtual bool current_is_dir() const override; + virtual bool current_is_hidden() const override; + virtual void list_dir_end() override; ///< - virtual int get_drive_count(); - virtual String get_drive(int p_drive); + virtual int get_drive_count() override; + virtual String get_drive(int p_drive) override; - virtual Error change_dir(String p_dir); ///< can be relative or absolute, return false on success - virtual String get_current_dir(bool p_include_drive = true) const; ///< return current dir location + virtual Error change_dir(String p_dir) override; ///< can be relative or absolute, return false on success + virtual String get_current_dir(bool p_include_drive = true) const override; ///< return current dir location - virtual bool file_exists(String p_file); - virtual bool dir_exists(String p_dir); + virtual bool file_exists(String p_file) override; + virtual bool dir_exists(String p_dir) override; - virtual Error make_dir(String p_dir); + virtual Error make_dir(String p_dir) override; - virtual Error rename(String p_path, String p_new_path); - virtual Error remove(String p_path); + virtual Error rename(String p_path, String p_new_path) override; + virtual Error remove(String p_path) override; - virtual bool is_link(String p_file) { return false; }; - virtual String read_link(String p_file) { return p_file; }; - virtual Error create_link(String p_source, String p_target) { return FAILED; }; + virtual bool is_link(String p_file) override { return false; }; + virtual String read_link(String p_file) override { return p_file; }; + virtual Error create_link(String p_source, String p_target) override { return FAILED; }; - uint64_t get_space_left(); + uint64_t get_space_left() override; - virtual String get_filesystem_type() const; + virtual String get_filesystem_type() const override; DirAccessWindows(); ~DirAccessWindows(); diff --git a/drivers/windows/file_access_windows.h b/drivers/windows/file_access_windows.h index 5d67b6ca4f..8629bb936b 100644 --- a/drivers/windows/file_access_windows.h +++ b/drivers/windows/file_access_windows.h @@ -51,33 +51,33 @@ class FileAccessWindows : public FileAccess { void _close(); public: - virtual Error _open(const String &p_path, int p_mode_flags); ///< open a file - virtual bool is_open() const; ///< true when file is open + virtual Error _open(const String &p_path, int p_mode_flags) override; ///< open a file + virtual bool is_open() const override; ///< true when file is open - virtual String get_path() const; /// returns the path for the current open file - virtual String get_path_absolute() const; /// returns the absolute path for the current open file + virtual String get_path() const override; /// returns the path for the current open file + virtual String get_path_absolute() const override; /// returns the absolute path for the current open file - virtual void seek(uint64_t p_position); ///< seek to a given position - virtual void seek_end(int64_t p_position = 0); ///< seek from the end of file - virtual uint64_t get_position() const; ///< get position in the file - virtual uint64_t get_length() const; ///< get size of the file + virtual void seek(uint64_t p_position) override; ///< seek to a given position + virtual void seek_end(int64_t p_position = 0) override; ///< seek from the end of file + virtual uint64_t get_position() const override; ///< get position in the file + virtual uint64_t get_length() const override; ///< get size of the file - virtual bool eof_reached() const; ///< reading passed EOF + virtual bool eof_reached() const override; ///< reading passed EOF - virtual uint8_t get_8() const; ///< get a byte - virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const; + virtual uint8_t get_8() const override; ///< get a byte + virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const override; - virtual Error get_error() const; ///< get last error + virtual Error get_error() const override; ///< get last error - virtual void flush(); - virtual void store_8(uint8_t p_dest); ///< store a byte - virtual void store_buffer(const uint8_t *p_src, uint64_t p_length); ///< store an array of bytes + virtual void flush() override; + virtual void store_8(uint8_t p_dest) override; ///< store a byte + virtual void store_buffer(const uint8_t *p_src, uint64_t p_length) override; ///< store an array of bytes - virtual bool file_exists(const String &p_name); ///< return true if a file exists + virtual bool file_exists(const String &p_name) override; ///< return true if a file exists - uint64_t _get_modified_time(const String &p_file); - virtual uint32_t _get_unix_permissions(const String &p_file); - virtual Error _set_unix_permissions(const String &p_file, uint32_t p_permissions); + uint64_t _get_modified_time(const String &p_file) override; + virtual uint32_t _get_unix_permissions(const String &p_file) override; + virtual Error _set_unix_permissions(const String &p_file, uint32_t p_permissions) override; FileAccessWindows() {} virtual ~FileAccessWindows(); diff --git a/drivers/xaudio2/audio_driver_xaudio2.cpp b/drivers/xaudio2/audio_driver_xaudio2.cpp index c32c7cf1e5..6c48c1a844 100644 --- a/drivers/xaudio2/audio_driver_xaudio2.cpp +++ b/drivers/xaudio2/audio_driver_xaudio2.cpp @@ -38,9 +38,8 @@ const char *AudioDriverXAudio2::get_name() const { } Error AudioDriverXAudio2::init() { - active = false; - thread_exited = false; - exit_thread = false; + active.clear(); + exit_thread.clear(); pcm_open = false; samples_in = nullptr; @@ -86,17 +85,19 @@ Error AudioDriverXAudio2::init() { void AudioDriverXAudio2::thread_func(void *p_udata) { AudioDriverXAudio2 *ad = static_cast<AudioDriverXAudio2 *>(p_udata); - while (!ad->exit_thread) { - if (!ad->active) { + while (!ad->exit_thread.is_set()) { + if (!ad->active.is_set()) { for (int i = 0; i < AUDIO_BUFFERS; i++) { ad->xaudio_buffer[i].Flags = XAUDIO2_END_OF_STREAM; } } else { ad->lock(); + ad->start_counting_ticks(); ad->audio_server_process(ad->buffer_size, ad->samples_in); + ad->stop_counting_ticks(); ad->unlock(); for (unsigned int i = 0; i < ad->buffer_size * ad->channels; i++) { @@ -117,12 +118,10 @@ void AudioDriverXAudio2::thread_func(void *p_udata) { } } } - - ad->thread_exited = true; } void AudioDriverXAudio2::start() { - active = true; + active.set(); HRESULT hr = source_voice->Start(0); ERR_FAIL_COND_MSG(hr != S_OK, "Error starting XAudio2 driver. Error code: " + itos(hr) + "."); } @@ -154,7 +153,7 @@ void AudioDriverXAudio2::unlock() { } void AudioDriverXAudio2::finish() { - exit_thread = true; + exit_thread.set(); thread.wait_to_finish(); if (source_voice) { diff --git a/drivers/xaudio2/audio_driver_xaudio2.h b/drivers/xaudio2/audio_driver_xaudio2.h index 81432ceb8e..0f64d54a1f 100644 --- a/drivers/xaudio2/audio_driver_xaudio2.h +++ b/drivers/xaudio2/audio_driver_xaudio2.h @@ -33,6 +33,7 @@ #include "core/os/mutex.h" #include "core/os/thread.h" +#include "core/templates/safe_refcount.h" #include "servers/audio_server.h" #include <mmsystem.h> @@ -77,9 +78,8 @@ class AudioDriverXAudio2 : public AudioDriver { int channels = 0; - bool active = false; - bool thread_exited = false; - mutable bool exit_thread = false; + SafeFlag active; + SafeFlag exit_thread; bool pcm_open = false; WAVEFORMATEX wave_format = { 0 }; |