diff options
78 files changed, 11543 insertions, 6822 deletions
diff --git a/doc/classes/Animation.xml b/doc/classes/Animation.xml index d9a1f896f1..c0626dcfe4 100644 --- a/doc/classes/Animation.xml +++ b/doc/classes/Animation.xml @@ -305,9 +305,9 @@ <return type="int" /> <param index="0" name="track_idx" type="int" /> <param index="1" name="time" type="float" /> - <param index="2" name="exact" type="bool" default="false" /> + <param index="2" name="find_mode" type="int" enum="Animation.FindMode" default="0" /> <description> - Finds the key index by time in a given track. Optionally, only find it if the exact time is given. + Finds the key index by time in a given track. Optionally, only find it if the approx/exact time is given. </description> </method> <method name="track_get_interpolation_loop_wrap" qualifiers="const"> @@ -622,5 +622,14 @@ <constant name="LOOPED_FLAG_START" value="2" enum="LoopedFlag"> This flag indicates that the animation has reached the start of the animation and just after loop processed. </constant> + <constant name="FIND_MODE_NEAREST" value="0" enum="FindMode"> + Finds the nearest time key. + </constant> + <constant name="FIND_MODE_APPROX" value="1" enum="FindMode"> + Finds only the key with approximating the time. + </constant> + <constant name="FIND_MODE_EXACT" value="2" enum="FindMode"> + Finds only the key with matching the time. + </constant> </constants> </class> diff --git a/doc/classes/AnimationNodeStateMachineTransition.xml b/doc/classes/AnimationNodeStateMachineTransition.xml index 4c2a30030b..814b2d0052 100644 --- a/doc/classes/AnimationNodeStateMachineTransition.xml +++ b/doc/classes/AnimationNodeStateMachineTransition.xml @@ -22,14 +22,11 @@ <member name="advance_expression" type="String" setter="set_advance_expression" getter="get_advance_expression" default=""""> Use an expression as a condition for state machine transitions. It is possible to create complex animation advance conditions for switching between states and gives much greater flexibility for creating complex state machines by directly interfacing with the script code. </member> - <member name="auto_advance" type="bool" setter="set_auto_advance" getter="has_auto_advance" default="false"> - Turn on the transition automatically when this state is reached. This works best with [constant SWITCH_MODE_AT_END]. - </member> - <member name="disabled" type="bool" setter="set_disabled" getter="is_disabled" default="false"> - Don't use this transition during [method AnimationNodeStateMachinePlayback.travel] or [member auto_advance]. + <member name="advance_mode" type="int" setter="set_advance_mode" getter="get_advance_mode" enum="AnimationNodeStateMachineTransition.AdvanceMode" default="1"> + Determines whether the transition should disabled, enabled when using [method AnimationNodeStateMachinePlayback.travel], or traversed automatically if the [member advance_condition] and [member advance_expression] checks are true (if assigned). </member> <member name="priority" type="int" setter="set_priority" getter="get_priority" default="1"> - Lower priority transitions are preferred when travelling through the tree via [method AnimationNodeStateMachinePlayback.travel] or [member auto_advance]. + Lower priority transitions are preferred when travelling through the tree via [method AnimationNodeStateMachinePlayback.travel] or [member advance_mode] is set to [constant ADVANCE_MODE_AUTO]. </member> <member name="switch_mode" type="int" setter="set_switch_mode" getter="get_switch_mode" enum="AnimationNodeStateMachineTransition.SwitchMode" default="0"> The transition type. @@ -58,5 +55,14 @@ <constant name="SWITCH_MODE_AT_END" value="2" enum="SwitchMode"> Wait for the current state playback to end, then switch to the beginning of the next state animation. </constant> + <constant name="ADVANCE_MODE_DISABLED" value="0" enum="AdvanceMode"> + Don't use this transition. + </constant> + <constant name="ADVANCE_MODE_ENABLED" value="1" enum="AdvanceMode"> + Only use this transition during [method AnimationNodeStateMachinePlayback.travel]. + </constant> + <constant name="ADVANCE_MODE_AUTO" value="2" enum="AdvanceMode"> + Automatically use this transition if the [member advance_condition] and [member advance_expression] checks are true (if assigned). + </constant> </constants> </class> diff --git a/doc/classes/AnimationTree.xml b/doc/classes/AnimationTree.xml index 21f4b37741..a17a727d7e 100644 --- a/doc/classes/AnimationTree.xml +++ b/doc/classes/AnimationTree.xml @@ -111,11 +111,25 @@ </member> </members> <signals> + <signal name="animation_finished"> + <param index="0" name="anim_name" type="StringName" /> + <description> + Notifies when an animation finished playing. + [b]Note:[/b] This signal is not emitted if an animation is looping or aborted. Also be aware of the possibility of unseen playback by sync and xfade. + </description> + </signal> <signal name="animation_player_changed"> <description> Emitted when the [member anim_player] is changed. </description> </signal> + <signal name="animation_started"> + <param index="0" name="anim_name" type="StringName" /> + <description> + Notifies when an animation starts playing. + [b]Note:[/b] This signal is not emitted if an animation is looping or playbacked from the middle. Also be aware of the possibility of unseen playback by sync and xfade. + </description> + </signal> </signals> <constants> <constant name="ANIMATION_PROCESS_PHYSICS" value="0" enum="AnimationProcessCallback"> diff --git a/doc/classes/EditorSpinSlider.xml b/doc/classes/EditorSpinSlider.xml index de105b32e1..d270d32df7 100644 --- a/doc/classes/EditorSpinSlider.xml +++ b/doc/classes/EditorSpinSlider.xml @@ -28,4 +28,26 @@ The suffix to display after the value (in a faded color). This should generally be a plural word. You may have to use an abbreviation if the suffix is too long to be displayed. </member> </members> + <signals> + <signal name="grabbed"> + <description> + Emitted when the spinner/slider is grabbed. + </description> + </signal> + <signal name="ungrabbed"> + <description> + Emitted when the spinner/slider is ungrabbed. + </description> + </signal> + <signal name="value_focus_entered"> + <description> + Emitted when the value form gains focus. + </description> + </signal> + <signal name="value_focus_exited"> + <description> + Emitted when the value form loses focus. + </description> + </signal> + </signals> </class> diff --git a/editor/animation_bezier_editor.cpp b/editor/animation_bezier_editor.cpp index 530708f3e5..7b5a7b7046 100644 --- a/editor/animation_bezier_editor.cpp +++ b/editor/animation_bezier_editor.cpp @@ -575,7 +575,7 @@ void AnimationBezierTrackEdit::_notification(int p_what) { ep.point_rect.size = bezier_icon->get_size(); if (selection.has(IntPair(i, j))) { draw_texture(selected_icon, ep.point_rect.position); - draw_string(font, ep.point_rect.position + Vector2(8, -font->get_height(font_size) - 8), TTR("Time:") + " " + TS->format_number(rtos(Math::snapped(offset, 0.001))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, accent); + draw_string(font, ep.point_rect.position + Vector2(8, -font->get_height(font_size) - 8), TTR("Time:") + " " + TS->format_number(rtos(Math::snapped(offset, 0.0001))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, accent); draw_string(font, ep.point_rect.position + Vector2(8, -8), TTR("Value:") + " " + TS->format_number(rtos(Math::snapped(value, 0.001))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, accent); } else { Color track_color = Color(1, 1, 1, 1); @@ -812,7 +812,7 @@ void AnimationBezierTrackEdit::_select_at_anim(const Ref<Animation> &p_anim, int return; } - int idx = animation->track_find_key(p_track, p_pos, true); + int idx = animation->track_find_key(p_track, p_pos, Animation::FIND_MODE_APPROX); ERR_FAIL_COND(idx < 0); selection.insert(IntPair(p_track, idx)); @@ -1168,8 +1168,8 @@ void AnimationBezierTrackEdit::gui_input(const Ref<InputEvent> &p_event) { new_point[4] = 0; real_t time = ((mb->get_position().x - limit) / timeline->get_zoom_scale()) + timeline->get_value(); - while (animation->track_find_key(selected_track, time, true) != -1) { - time += 0.001; + while (animation->track_find_key(selected_track, time, Animation::FIND_MODE_APPROX) != -1) { + time += 0.0001; } Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); @@ -1179,7 +1179,7 @@ void AnimationBezierTrackEdit::gui_input(const Ref<InputEvent> &p_event) { undo_redo->commit_action(); //then attempt to move - int index = animation->track_find_key(selected_track, time, true); + int index = animation->track_find_key(selected_track, time, Animation::FIND_MODE_APPROX); ERR_FAIL_COND(index == -1); _clear_selection(); selection.insert(IntPair(selected_track, index)); @@ -1283,7 +1283,7 @@ void AnimationBezierTrackEdit::gui_input(const Ref<InputEvent> &p_event) { for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { real_t newtime = editor->snap_time(animation->track_get_key_time(E->get().first, E->get().second) + moving_selection_offset.x); - int idx = animation->track_find_key(E->get().first, newtime, true); + int idx = animation->track_find_key(E->get().first, newtime, Animation::FIND_MODE_APPROX); if (idx == -1) { continue; } @@ -1539,7 +1539,7 @@ void AnimationBezierTrackEdit::_menu_selected(int p_index) { real_t time = ((menu_insert_key.x - limit) / timeline->get_zoom_scale()) + timeline->get_value(); - while (animation->track_find_key(selected_track, time, true) != -1) { + while (animation->track_find_key(selected_track, time, Animation::FIND_MODE_APPROX) != -1) { time += 0.001; } @@ -1599,7 +1599,7 @@ void AnimationBezierTrackEdit::duplicate_selection() { for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { real_t t = animation->track_get_key_time(E->get().first, E->get().second); real_t dst_time = t + (timeline->get_play_position() - top_time); - int existing_idx = animation->track_find_key(E->get().first, dst_time, true); + int existing_idx = animation->track_find_key(E->get().first, dst_time, Animation::FIND_MODE_APPROX); undo_redo->add_do_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, E->get().second), animation->track_get_key_transition(E->get().first, E->get().second)); undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, dst_time); @@ -1623,7 +1623,7 @@ void AnimationBezierTrackEdit::duplicate_selection() { int track = E.first; real_t time = E.second; - int existing_idx = animation->track_find_key(track, time, true); + int existing_idx = animation->track_find_key(track, time, Animation::FIND_MODE_APPROX); if (existing_idx == -1) { continue; diff --git a/editor/animation_track_editor.cpp b/editor/animation_track_editor.cpp index 2ee06a0dbd..74e0db967d 100644 --- a/editor/animation_track_editor.cpp +++ b/editor/animation_track_editor.cpp @@ -48,1356 +48,1182 @@ #include "scene/scene_string_names.h" #include "servers/audio/audio_stream.h" -class AnimationTrackKeyEdit : public Object { - GDCLASS(AnimationTrackKeyEdit, Object); - -public: - bool setting = false; - bool animation_read_only = false; +void AnimationTrackKeyEdit::_bind_methods() { + ClassDB::bind_method(D_METHOD("_update_obj"), &AnimationTrackKeyEdit::_update_obj); + ClassDB::bind_method(D_METHOD("_key_ofs_changed"), &AnimationTrackKeyEdit::_key_ofs_changed); + ClassDB::bind_method(D_METHOD("_hide_script_from_inspector"), &AnimationTrackKeyEdit::_hide_script_from_inspector); + ClassDB::bind_method(D_METHOD("_hide_metadata_from_inspector"), &AnimationTrackKeyEdit::_hide_metadata_from_inspector); + ClassDB::bind_method(D_METHOD("get_root_path"), &AnimationTrackKeyEdit::get_root_path); + ClassDB::bind_method(D_METHOD("_dont_undo_redo"), &AnimationTrackKeyEdit::_dont_undo_redo); + ClassDB::bind_method(D_METHOD("_is_read_only"), &AnimationTrackKeyEdit::_is_read_only); +} - bool _hide_script_from_inspector() { return true; } - bool _hide_metadata_from_inspector() { return true; } - bool _dont_undo_redo() { return true; } +void AnimationTrackKeyEdit::_fix_node_path(Variant &value) { + NodePath np = value; - bool _is_read_only() { - return animation_read_only; + if (np == NodePath()) { + return; } - static void _bind_methods() { - ClassDB::bind_method(D_METHOD("_update_obj"), &AnimationTrackKeyEdit::_update_obj); - ClassDB::bind_method(D_METHOD("_key_ofs_changed"), &AnimationTrackKeyEdit::_key_ofs_changed); - ClassDB::bind_method(D_METHOD("_hide_script_from_inspector"), &AnimationTrackKeyEdit::_hide_script_from_inspector); - ClassDB::bind_method(D_METHOD("_hide_metadata_from_inspector"), &AnimationTrackKeyEdit::_hide_metadata_from_inspector); - ClassDB::bind_method(D_METHOD("get_root_path"), &AnimationTrackKeyEdit::get_root_path); - ClassDB::bind_method(D_METHOD("_dont_undo_redo"), &AnimationTrackKeyEdit::_dont_undo_redo); - ClassDB::bind_method(D_METHOD("_is_read_only"), &AnimationTrackKeyEdit::_is_read_only); - } + Node *root = EditorNode::get_singleton()->get_tree()->get_root(); - void _fix_node_path(Variant &value) { - NodePath np = value; + Node *np_node = root->get_node(np); + ERR_FAIL_COND(!np_node); - if (np == NodePath()) { - return; - } + Node *edited_node = root->get_node(base); + ERR_FAIL_COND(!edited_node); - Node *root = EditorNode::get_singleton()->get_tree()->get_root(); + value = edited_node->get_path_to(np_node); +} - Node *np_node = root->get_node(np); - ERR_FAIL_COND(!np_node); +void AnimationTrackKeyEdit::_update_obj(const Ref<Animation> &p_anim) { + if (setting || animation != p_anim) { + return; + } - Node *edited_node = root->get_node(base); - ERR_FAIL_COND(!edited_node); + notify_change(); +} - value = edited_node->get_path_to(np_node); +void AnimationTrackKeyEdit::_key_ofs_changed(const Ref<Animation> &p_anim, float from, float to) { + if (animation != p_anim || from != key_ofs) { + return; } - void _update_obj(const Ref<Animation> &p_anim) { - if (setting || animation != p_anim) { - return; - } + key_ofs = to; - notify_change(); + if (setting) { + return; } - void _key_ofs_changed(const Ref<Animation> &p_anim, float from, float to) { - if (animation != p_anim || from != key_ofs) { - return; - } + notify_change(); +} - key_ofs = to; +bool AnimationTrackKeyEdit::_set(const StringName &p_name, const Variant &p_value) { + int key = animation->track_find_key(track, key_ofs, Animation::FIND_MODE_APPROX); + ERR_FAIL_COND_V(key == -1, false); - if (setting) { - return; - } + String name = p_name; + if (name == "easing") { + float val = p_value; + float prev_val = animation->track_get_key_transition(track, key); + setting = true; + Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); + undo_redo->create_action(TTR("Animation Change Transition"), UndoRedo::MERGE_ENDS); + undo_redo->add_do_method(animation.ptr(), "track_set_key_transition", track, key, val); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_transition", track, key, prev_val); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - notify_change(); + setting = false; + return true; } - bool _set(const StringName &p_name, const Variant &p_value) { - int key = animation->track_find_key(track, key_ofs, true); - ERR_FAIL_COND_V(key == -1, false); - - String name = p_name; - if (name == "time" || name == "frame") { - float new_time = p_value; - - if (name == "frame") { - float fps = animation->get_step(); - if (fps > 0) { - fps = 1.0 / fps; + Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); + switch (animation->track_get_type(track)) { + case Animation::TYPE_POSITION_3D: + case Animation::TYPE_ROTATION_3D: + case Animation::TYPE_SCALE_3D: { + if (name == "position" || name == "rotation" || name == "scale") { + Variant old = animation->track_get_key_value(track, key); + setting = true; + String chan; + switch (animation->track_get_type(track)) { + case Animation::TYPE_POSITION_3D: + chan = "Position3D"; + break; + case Animation::TYPE_ROTATION_3D: + chan = "Rotation3D"; + break; + case Animation::TYPE_SCALE_3D: + chan = "Scale3D"; + break; + default: { + } } - new_time /= fps; - } - if (new_time == key_ofs) { + undo_redo->create_action(vformat(TTR("Animation Change %s"), chan)); + undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, p_value); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, old); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); + + setting = false; return true; } - int existing = animation->track_find_key(track, new_time, true); + } break; + case Animation::TYPE_BLEND_SHAPE: + case Animation::TYPE_VALUE: { + if (name == "value") { + Variant value = p_value; - setting = true; - Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); - undo_redo->create_action(TTR("Animation Change Keyframe Time"), UndoRedo::MERGE_ENDS); + if (value.get_type() == Variant::NODE_PATH) { + _fix_node_path(value); + } - Variant val = animation->track_get_key_value(track, key); - float trans = animation->track_get_key_transition(track, key); + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + Variant prev = animation->track_get_key_value(track, key); + undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - undo_redo->add_do_method(animation.ptr(), "track_remove_key", track, key); - undo_redo->add_do_method(animation.ptr(), "track_insert_key", track, new_time, val, trans); - undo_redo->add_do_method(this, "_key_ofs_changed", animation, key_ofs, new_time); - undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", track, new_time); - undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track, key_ofs, val, trans); - undo_redo->add_undo_method(this, "_key_ofs_changed", animation, new_time, key_ofs); + setting = false; + return true; + } + } break; + case Animation::TYPE_METHOD: { + Dictionary d_old = animation->track_get_key_value(track, key); + Dictionary d_new = d_old.duplicate(); + + bool change_notify_deserved = false; + bool mergeable = false; + + if (name == "name") { + d_new["method"] = p_value; + } else if (name == "arg_count") { + Vector<Variant> args = d_old["args"]; + args.resize(p_value); + d_new["args"] = args; + change_notify_deserved = true; + } else if (name.begins_with("args/")) { + Vector<Variant> args = d_old["args"]; + int idx = name.get_slice("/", 1).to_int(); + ERR_FAIL_INDEX_V(idx, args.size(), false); + + String what = name.get_slice("/", 2); + if (what == "type") { + Variant::Type t = Variant::Type(int(p_value)); + + if (t != args[idx].get_type()) { + Callable::CallError err; + if (Variant::can_convert(args[idx].get_type(), t)) { + Variant old = args[idx]; + Variant *ptrs[1] = { &old }; + Variant::construct(t, args.write[idx], (const Variant **)ptrs, 1, err); + } else { + Variant::construct(t, args.write[idx], nullptr, 0, err); + } + change_notify_deserved = true; + d_new["args"] = args; + } + } else if (what == "value") { + Variant value = p_value; + if (value.get_type() == Variant::NODE_PATH) { + _fix_node_path(value); + } - if (existing != -1) { - Variant v = animation->track_get_key_value(track, existing); - trans = animation->track_get_key_transition(track, existing); - undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track, new_time, v, trans); + args.write[idx] = value; + d_new["args"] = args; + mergeable = true; + } } - undo_redo->commit_action(); - setting = false; - return true; - } + if (mergeable) { + undo_redo->create_action(TTR("Animation Change Call"), UndoRedo::MERGE_ENDS); + } else { + undo_redo->create_action(TTR("Animation Change Call")); + } - if (name == "easing") { - float val = p_value; - float prev_val = animation->track_get_key_transition(track, key); setting = true; - Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); - undo_redo->create_action(TTR("Animation Change Transition"), UndoRedo::MERGE_ENDS); - undo_redo->add_do_method(animation.ptr(), "track_set_key_transition", track, key, val); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_transition", track, key, prev_val); + undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, d_new); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, d_old); undo_redo->add_do_method(this, "_update_obj", animation); undo_redo->add_undo_method(this, "_update_obj", animation); undo_redo->commit_action(); setting = false; + if (change_notify_deserved) { + notify_change(); + } return true; - } - - Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); - switch (animation->track_get_type(track)) { - case Animation::TYPE_POSITION_3D: - case Animation::TYPE_ROTATION_3D: - case Animation::TYPE_SCALE_3D: { - if (name == "position" || name == "rotation" || name == "scale") { - Variant old = animation->track_get_key_value(track, key); - setting = true; - String chan; - switch (animation->track_get_type(track)) { - case Animation::TYPE_POSITION_3D: - chan = "Position3D"; - break; - case Animation::TYPE_ROTATION_3D: - chan = "Rotation3D"; - break; - case Animation::TYPE_SCALE_3D: - chan = "Scale3D"; - break; - default: { - } - } - - undo_redo->create_action(vformat(TTR("Anim Change %s"), chan)); - undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, p_value); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, old); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); + } break; + case Animation::TYPE_BEZIER: { + if (name == "value") { + const Variant &value = p_value; - setting = false; - return true; - } + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + float prev = animation->bezier_track_get_key_value(track, key); + undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_value", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_value", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - } break; - case Animation::TYPE_BLEND_SHAPE: - case Animation::TYPE_VALUE: { - if (name == "value") { - Variant value = p_value; + setting = false; + return true; + } - if (value.get_type() == Variant::NODE_PATH) { - _fix_node_path(value); - } + if (name == "in_handle") { + const Variant &value = p_value; - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - Variant prev = animation->track_get_key_value(track, key); - undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + Vector2 prev = animation->bezier_track_get_key_in_handle(track, key); + undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_in_handle", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_in_handle", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - setting = false; - return true; - } - } break; - case Animation::TYPE_METHOD: { - Dictionary d_old = animation->track_get_key_value(track, key); - Dictionary d_new = d_old.duplicate(); + setting = false; + return true; + } - bool change_notify_deserved = false; - bool mergeable = false; + if (name == "out_handle") { + const Variant &value = p_value; - if (name == "name") { - d_new["method"] = p_value; - } else if (name == "arg_count") { - Vector<Variant> args = d_old["args"]; - args.resize(p_value); - d_new["args"] = args; - change_notify_deserved = true; - } else if (name.begins_with("args/")) { - Vector<Variant> args = d_old["args"]; - int idx = name.get_slice("/", 1).to_int(); - ERR_FAIL_INDEX_V(idx, args.size(), false); - - String what = name.get_slice("/", 2); - if (what == "type") { - Variant::Type t = Variant::Type(int(p_value)); - - if (t != args[idx].get_type()) { - Callable::CallError err; - if (Variant::can_convert(args[idx].get_type(), t)) { - Variant old = args[idx]; - Variant *ptrs[1] = { &old }; - Variant::construct(t, args.write[idx], (const Variant **)ptrs, 1, err); - } else { - Variant::construct(t, args.write[idx], nullptr, 0, err); - } - change_notify_deserved = true; - d_new["args"] = args; - } - } else if (what == "value") { - Variant value = p_value; - if (value.get_type() == Variant::NODE_PATH) { - _fix_node_path(value); - } + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + Vector2 prev = animation->bezier_track_get_key_out_handle(track, key); + undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_out_handle", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_out_handle", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - args.write[idx] = value; - d_new["args"] = args; - mergeable = true; - } - } + setting = false; + return true; + } - if (mergeable) { - undo_redo->create_action(TTR("Animation Change Call"), UndoRedo::MERGE_ENDS); - } else { - undo_redo->create_action(TTR("Animation Change Call")); - } + if (name == "handle_mode") { + const Variant &value = p_value; setting = true; - undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, d_new); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, d_old); + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + int prev = animation->bezier_track_get_key_handle_mode(track, key); + undo_redo->add_do_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, value); + undo_redo->add_undo_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, prev); undo_redo->add_do_method(this, "_update_obj", animation); undo_redo->add_undo_method(this, "_update_obj", animation); undo_redo->commit_action(); setting = false; - if (change_notify_deserved) { - notify_change(); - } return true; - } break; - case Animation::TYPE_BEZIER: { - if (name == "value") { - const Variant &value = p_value; - - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - float prev = animation->bezier_track_get_key_value(track, key); - undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_value", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_value", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } + } + } break; + case Animation::TYPE_AUDIO: { + if (name == "stream") { + Ref<AudioStream> stream = p_value; - if (name == "in_handle") { - const Variant &value = p_value; + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + Ref<Resource> prev = animation->audio_track_get_key_stream(track, key); + undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_stream", track, key, stream); + undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_stream", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - Vector2 prev = animation->bezier_track_get_key_in_handle(track, key); - undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_in_handle", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_in_handle", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } + setting = false; + return true; + } - if (name == "out_handle") { - const Variant &value = p_value; + if (name == "start_offset") { + float value = p_value; - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - Vector2 prev = animation->bezier_track_get_key_out_handle(track, key); - undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_out_handle", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_out_handle", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + float prev = animation->audio_track_get_key_start_offset(track, key); + undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - if (name == "handle_mode") { - const Variant &value = p_value; + setting = false; + return true; + } - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - int prev = animation->bezier_track_get_key_handle_mode(track, key); - undo_redo->add_do_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, value); - undo_redo->add_undo_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } - } break; - case Animation::TYPE_AUDIO: { - if (name == "stream") { - Ref<AudioStream> stream = p_value; + if (name == "end_offset") { + float value = p_value; - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - Ref<Resource> prev = animation->audio_track_get_key_stream(track, key); - undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_stream", track, key, stream); - undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_stream", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + float prev = animation->audio_track_get_key_end_offset(track, key); + undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - if (name == "start_offset") { - float value = p_value; + setting = false; + return true; + } + } break; + case Animation::TYPE_ANIMATION: { + if (name == "animation") { + StringName anim_name = p_value; - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - float prev = animation->audio_track_get_key_start_offset(track, key); - undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } + setting = true; + undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); + StringName prev = animation->animation_track_get_key_animation(track, key); + undo_redo->add_do_method(animation.ptr(), "animation_track_set_key_animation", track, key, anim_name); + undo_redo->add_undo_method(animation.ptr(), "animation_track_set_key_animation", track, key, prev); + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + undo_redo->commit_action(); - if (name == "end_offset") { - float value = p_value; + setting = false; + return true; + } + } break; + } - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - float prev = animation->audio_track_get_key_end_offset(track, key); - undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } - } break; - case Animation::TYPE_ANIMATION: { - if (name == "animation") { - StringName anim_name = p_value; + return false; +} - setting = true; - undo_redo->create_action(TTR("Animation Change Keyframe Value"), UndoRedo::MERGE_ENDS); - StringName prev = animation->animation_track_get_key_animation(track, key); - undo_redo->add_do_method(animation.ptr(), "animation_track_set_key_animation", track, key, anim_name); - undo_redo->add_undo_method(animation.ptr(), "animation_track_set_key_animation", track, key, prev); - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - undo_redo->commit_action(); - - setting = false; - return true; - } - } break; - } +bool AnimationTrackKeyEdit::_get(const StringName &p_name, Variant &r_ret) const { + int key = animation->track_find_key(track, key_ofs, Animation::FIND_MODE_APPROX); + ERR_FAIL_COND_V(key == -1, false); - return false; + String name = p_name; + if (name == "easing") { + r_ret = animation->track_get_key_transition(track, key); + return true; } - bool _get(const StringName &p_name, Variant &r_ret) const { - int key = animation->track_find_key(track, key_ofs, true); - ERR_FAIL_COND_V(key == -1, false); - - String name = p_name; - if (name == "time") { - r_ret = key_ofs; - return true; - } - - if (name == "frame") { - float fps = animation->get_step(); - if (fps > 0) { - fps = 1.0 / fps; + switch (animation->track_get_type(track)) { + case Animation::TYPE_POSITION_3D: + case Animation::TYPE_ROTATION_3D: + case Animation::TYPE_SCALE_3D: { + if (name == "position" || name == "rotation" || name == "scale") { + r_ret = animation->track_get_key_value(track, key); + return true; + } + } break; + case Animation::TYPE_BLEND_SHAPE: + case Animation::TYPE_VALUE: { + if (name == "value") { + r_ret = animation->track_get_key_value(track, key); + return true; } - r_ret = key_ofs * fps; - return true; - } - if (name == "easing") { - r_ret = animation->track_get_key_transition(track, key); - return true; - } + } break; + case Animation::TYPE_METHOD: { + Dictionary d = animation->track_get_key_value(track, key); - switch (animation->track_get_type(track)) { - case Animation::TYPE_POSITION_3D: - case Animation::TYPE_ROTATION_3D: - case Animation::TYPE_SCALE_3D: { - if (name == "position" || name == "rotation" || name == "scale") { - r_ret = animation->track_get_key_value(track, key); - return true; - } - } break; - case Animation::TYPE_BLEND_SHAPE: - case Animation::TYPE_VALUE: { - if (name == "value") { - r_ret = animation->track_get_key_value(track, key); - return true; - } + if (name == "name") { + ERR_FAIL_COND_V(!d.has("method"), false); + r_ret = d["method"]; + return true; + } - } break; - case Animation::TYPE_METHOD: { - Dictionary d = animation->track_get_key_value(track, key); + ERR_FAIL_COND_V(!d.has("args"), false); - if (name == "name") { - ERR_FAIL_COND_V(!d.has("method"), false); - r_ret = d["method"]; - return true; - } + Vector<Variant> args = d["args"]; - ERR_FAIL_COND_V(!d.has("args"), false); + if (name == "arg_count") { + r_ret = args.size(); + return true; + } - Vector<Variant> args = d["args"]; + if (name.begins_with("args/")) { + int idx = name.get_slice("/", 1).to_int(); + ERR_FAIL_INDEX_V(idx, args.size(), false); - if (name == "arg_count") { - r_ret = args.size(); + String what = name.get_slice("/", 2); + if (what == "type") { + r_ret = args[idx].get_type(); return true; } - if (name.begins_with("args/")) { - int idx = name.get_slice("/", 1).to_int(); - ERR_FAIL_INDEX_V(idx, args.size(), false); - - String what = name.get_slice("/", 2); - if (what == "type") { - r_ret = args[idx].get_type(); - return true; - } - - if (what == "value") { - r_ret = args[idx]; - return true; - } - } - - } break; - case Animation::TYPE_BEZIER: { - if (name == "value") { - r_ret = animation->bezier_track_get_key_value(track, key); + if (what == "value") { + r_ret = args[idx]; return true; } + } - if (name == "in_handle") { - r_ret = animation->bezier_track_get_key_in_handle(track, key); - return true; - } + } break; + case Animation::TYPE_BEZIER: { + if (name == "value") { + r_ret = animation->bezier_track_get_key_value(track, key); + return true; + } - if (name == "out_handle") { - r_ret = animation->bezier_track_get_key_out_handle(track, key); - return true; - } + if (name == "in_handle") { + r_ret = animation->bezier_track_get_key_in_handle(track, key); + return true; + } - if (name == "handle_mode") { - r_ret = animation->bezier_track_get_key_handle_mode(track, key); - return true; - } + if (name == "out_handle") { + r_ret = animation->bezier_track_get_key_out_handle(track, key); + return true; + } - } break; - case Animation::TYPE_AUDIO: { - if (name == "stream") { - r_ret = animation->audio_track_get_key_stream(track, key); - return true; - } + if (name == "handle_mode") { + r_ret = animation->bezier_track_get_key_handle_mode(track, key); + return true; + } - if (name == "start_offset") { - r_ret = animation->audio_track_get_key_start_offset(track, key); - return true; - } + } break; + case Animation::TYPE_AUDIO: { + if (name == "stream") { + r_ret = animation->audio_track_get_key_stream(track, key); + return true; + } - if (name == "end_offset") { - r_ret = animation->audio_track_get_key_end_offset(track, key); - return true; - } + if (name == "start_offset") { + r_ret = animation->audio_track_get_key_start_offset(track, key); + return true; + } - } break; - case Animation::TYPE_ANIMATION: { - if (name == "animation") { - r_ret = animation->animation_track_get_key_animation(track, key); - return true; - } + if (name == "end_offset") { + r_ret = animation->audio_track_get_key_end_offset(track, key); + return true; + } - } break; - } + } break; + case Animation::TYPE_ANIMATION: { + if (name == "animation") { + r_ret = animation->animation_track_get_key_animation(track, key); + return true; + } - return false; + } break; } - void _get_property_list(List<PropertyInfo> *p_list) const { - if (animation.is_null()) { - return; - } - ERR_FAIL_INDEX(track, animation->get_track_count()); - int key = animation->track_find_key(track, key_ofs, true); - ERR_FAIL_COND(key == -1); + return false; +} - if (use_fps && animation->get_step() > 0) { - float max_frame = animation->get_length() / animation->get_step(); - p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("frame"), PROPERTY_HINT_RANGE, "0," + rtos(max_frame) + ",1")); - } else { - p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("time"), PROPERTY_HINT_RANGE, "0," + rtos(animation->get_length()) + ",0.01")); - } +void AnimationTrackKeyEdit::_get_property_list(List<PropertyInfo> *p_list) const { + if (animation.is_null()) { + return; + } - switch (animation->track_get_type(track)) { - case Animation::TYPE_POSITION_3D: { - p_list->push_back(PropertyInfo(Variant::VECTOR3, PNAME("position"))); - } break; - case Animation::TYPE_ROTATION_3D: { - p_list->push_back(PropertyInfo(Variant::QUATERNION, PNAME("rotation"))); - } break; - case Animation::TYPE_SCALE_3D: { - p_list->push_back(PropertyInfo(Variant::VECTOR3, PNAME("scale"))); - } break; - case Animation::TYPE_BLEND_SHAPE: { - p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("value"))); - } break; - case Animation::TYPE_VALUE: { - Variant v = animation->track_get_key_value(track, key); + ERR_FAIL_INDEX(track, animation->get_track_count()); + int key = animation->track_find_key(track, key_ofs, Animation::FIND_MODE_APPROX); + ERR_FAIL_COND(key == -1); - if (hint.type != Variant::NIL) { - PropertyInfo pi = hint; - pi.name = PNAME("value"); - p_list->push_back(pi); - } else { - PropertyHint val_hint = PROPERTY_HINT_NONE; - String val_hint_string; - - if (v.get_type() == Variant::OBJECT) { - // Could actually check the object property if exists..? Yes I will! - Ref<Resource> res = v; - if (res.is_valid()) { - val_hint = PROPERTY_HINT_RESOURCE_TYPE; - val_hint_string = res->get_class(); - } - } + switch (animation->track_get_type(track)) { + case Animation::TYPE_POSITION_3D: { + p_list->push_back(PropertyInfo(Variant::VECTOR3, PNAME("position"))); + } break; + case Animation::TYPE_ROTATION_3D: { + p_list->push_back(PropertyInfo(Variant::QUATERNION, PNAME("rotation"))); + } break; + case Animation::TYPE_SCALE_3D: { + p_list->push_back(PropertyInfo(Variant::VECTOR3, PNAME("scale"))); + } break; + case Animation::TYPE_BLEND_SHAPE: { + p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("value"))); + } break; + case Animation::TYPE_VALUE: { + Variant v = animation->track_get_key_value(track, key); - if (v.get_type() != Variant::NIL) { - p_list->push_back(PropertyInfo(v.get_type(), PNAME("value"), val_hint, val_hint_string)); + if (hint.type != Variant::NIL) { + PropertyInfo pi = hint; + pi.name = PNAME("value"); + p_list->push_back(pi); + } else { + PropertyHint val_hint = PROPERTY_HINT_NONE; + String val_hint_string; + + if (v.get_type() == Variant::OBJECT) { + // Could actually check the object property if exists..? Yes I will! + Ref<Resource> res = v; + if (res.is_valid()) { + val_hint = PROPERTY_HINT_RESOURCE_TYPE; + val_hint_string = res->get_class(); } } - } break; - case Animation::TYPE_METHOD: { - p_list->push_back(PropertyInfo(Variant::STRING_NAME, PNAME("name"))); - p_list->push_back(PropertyInfo(Variant::INT, PNAME("arg_count"), PROPERTY_HINT_RANGE, "0,32,1,or_greater")); - - Dictionary d = animation->track_get_key_value(track, key); - ERR_FAIL_COND(!d.has("args")); - Vector<Variant> args = d["args"]; - String vtypes; - for (int i = 0; i < Variant::VARIANT_MAX; i++) { - if (i > 0) { - vtypes += ","; - } - vtypes += Variant::get_type_name(Variant::Type(i)); + if (v.get_type() != Variant::NIL) { + p_list->push_back(PropertyInfo(v.get_type(), PNAME("value"), val_hint, val_hint_string)); } + } - for (int i = 0; i < args.size(); i++) { - p_list->push_back(PropertyInfo(Variant::INT, vformat("%s/%d/%s", PNAME("args"), i, PNAME("type")), PROPERTY_HINT_ENUM, vtypes)); - if (args[i].get_type() != Variant::NIL) { - p_list->push_back(PropertyInfo(args[i].get_type(), vformat("%s/%d/%s", PNAME("args"), i, PNAME("value")))); - } + } break; + case Animation::TYPE_METHOD: { + p_list->push_back(PropertyInfo(Variant::STRING_NAME, PNAME("name"))); + p_list->push_back(PropertyInfo(Variant::INT, PNAME("arg_count"), PROPERTY_HINT_RANGE, "0,32,1,or_greater")); + + Dictionary d = animation->track_get_key_value(track, key); + ERR_FAIL_COND(!d.has("args")); + Vector<Variant> args = d["args"]; + String vtypes; + for (int i = 0; i < Variant::VARIANT_MAX; i++) { + if (i > 0) { + vtypes += ","; } + vtypes += Variant::get_type_name(Variant::Type(i)); + } - } break; - case Animation::TYPE_BEZIER: { - Animation::HandleMode hm = animation->bezier_track_get_key_handle_mode(track, key); - p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("value"))); - if (hm == Animation::HANDLE_MODE_LINEAR) { - p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("in_handle"), PROPERTY_HINT_NONE, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_READ_ONLY)); - p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("out_handle"), PROPERTY_HINT_NONE, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_READ_ONLY)); - } else { - p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("in_handle"))); - p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("out_handle"))); + for (int i = 0; i < args.size(); i++) { + p_list->push_back(PropertyInfo(Variant::INT, vformat("%s/%d/%s", PNAME("args"), i, PNAME("type")), PROPERTY_HINT_ENUM, vtypes)); + if (args[i].get_type() != Variant::NIL) { + p_list->push_back(PropertyInfo(args[i].get_type(), vformat("%s/%d/%s", PNAME("args"), i, PNAME("value")))); } - p_list->push_back(PropertyInfo(Variant::INT, PNAME("handle_mode"), PROPERTY_HINT_ENUM, "Free,Linear,Balanced,Mirrored")); - - } break; - case Animation::TYPE_AUDIO: { - p_list->push_back(PropertyInfo(Variant::OBJECT, PNAME("stream"), PROPERTY_HINT_RESOURCE_TYPE, "AudioStream")); - p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("start_offset"), PROPERTY_HINT_RANGE, "0,3600,0.01,or_greater")); - p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("end_offset"), PROPERTY_HINT_RANGE, "0,3600,0.01,or_greater")); + } - } break; - case Animation::TYPE_ANIMATION: { - String animations; + } break; + case Animation::TYPE_BEZIER: { + Animation::HandleMode hm = animation->bezier_track_get_key_handle_mode(track, key); + p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("value"))); + if (hm == Animation::HANDLE_MODE_LINEAR) { + p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("in_handle"), PROPERTY_HINT_NONE, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_READ_ONLY)); + p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("out_handle"), PROPERTY_HINT_NONE, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_READ_ONLY)); + } else { + p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("in_handle"))); + p_list->push_back(PropertyInfo(Variant::VECTOR2, PNAME("out_handle"))); + } + p_list->push_back(PropertyInfo(Variant::INT, PNAME("handle_mode"), PROPERTY_HINT_ENUM, "Free,Linear,Balanced,Mirrored")); - if (root_path && root_path->has_node(animation->track_get_path(track))) { - AnimationPlayer *ap = Object::cast_to<AnimationPlayer>(root_path->get_node(animation->track_get_path(track))); - if (ap) { - List<StringName> anims; - ap->get_animation_list(&anims); - for (const StringName &E : anims) { - if (!animations.is_empty()) { - animations += ","; - } + } break; + case Animation::TYPE_AUDIO: { + p_list->push_back(PropertyInfo(Variant::OBJECT, PNAME("stream"), PROPERTY_HINT_RESOURCE_TYPE, "AudioStream")); + p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("start_offset"), PROPERTY_HINT_RANGE, "0,3600,0.0001,or_greater")); + p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("end_offset"), PROPERTY_HINT_RANGE, "0,3600,0.0001,or_greater")); - animations += String(E); + } break; + case Animation::TYPE_ANIMATION: { + String animations; + + if (root_path && root_path->has_node(animation->track_get_path(track))) { + AnimationPlayer *ap = Object::cast_to<AnimationPlayer>(root_path->get_node(animation->track_get_path(track))); + if (ap) { + List<StringName> anims; + ap->get_animation_list(&anims); + for (const StringName &E : anims) { + if (!animations.is_empty()) { + animations += ","; } + + animations += String(E); } } + } - if (!animations.is_empty()) { - animations += ","; - } - animations += "[stop]"; + if (!animations.is_empty()) { + animations += ","; + } + animations += "[stop]"; - p_list->push_back(PropertyInfo(Variant::STRING_NAME, PNAME("animation"), PROPERTY_HINT_ENUM, animations)); + p_list->push_back(PropertyInfo(Variant::STRING_NAME, PNAME("animation"), PROPERTY_HINT_ENUM, animations)); - } break; - } + } break; + } - if (animation->track_get_type(track) == Animation::TYPE_VALUE) { - p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("easing"), PROPERTY_HINT_EXP_EASING)); - } + if (animation->track_get_type(track) == Animation::TYPE_VALUE) { + p_list->push_back(PropertyInfo(Variant::FLOAT, PNAME("easing"), PROPERTY_HINT_EXP_EASING)); } +} - Ref<Animation> animation; - int track = -1; - float key_ofs = 0; - Node *root_path = nullptr; +void AnimationTrackKeyEdit::notify_change() { + notify_property_list_changed(); +} - PropertyInfo hint; - NodePath base; - bool use_fps = false; +Node *AnimationTrackKeyEdit::get_root_path() { + return root_path; +} - void notify_change() { - notify_property_list_changed(); - } +void AnimationTrackKeyEdit::set_use_fps(bool p_enable) { + use_fps = p_enable; + notify_property_list_changed(); +} - Node *get_root_path() { - return root_path; - } +void AnimationMultiTrackKeyEdit::_bind_methods() { + ClassDB::bind_method(D_METHOD("_update_obj"), &AnimationMultiTrackKeyEdit::_update_obj); + ClassDB::bind_method(D_METHOD("_key_ofs_changed"), &AnimationMultiTrackKeyEdit::_key_ofs_changed); + ClassDB::bind_method(D_METHOD("_hide_script_from_inspector"), &AnimationMultiTrackKeyEdit::_hide_script_from_inspector); + ClassDB::bind_method(D_METHOD("_hide_metadata_from_inspector"), &AnimationMultiTrackKeyEdit::_hide_metadata_from_inspector); + ClassDB::bind_method(D_METHOD("get_root_path"), &AnimationMultiTrackKeyEdit::get_root_path); + ClassDB::bind_method(D_METHOD("_dont_undo_redo"), &AnimationMultiTrackKeyEdit::_dont_undo_redo); + ClassDB::bind_method(D_METHOD("_is_read_only"), &AnimationMultiTrackKeyEdit::_is_read_only); +} - void set_use_fps(bool p_enable) { - use_fps = p_enable; - notify_property_list_changed(); +void AnimationMultiTrackKeyEdit::_fix_node_path(Variant &value, NodePath &base) { + NodePath np = value; + + if (np == NodePath()) { + return; } -}; -class AnimationMultiTrackKeyEdit : public Object { - GDCLASS(AnimationMultiTrackKeyEdit, Object); + Node *root = EditorNode::get_singleton()->get_tree()->get_root(); -public: - bool setting = false; - bool animation_read_only = false; + Node *np_node = root->get_node(np); + ERR_FAIL_COND(!np_node); - bool _hide_script_from_inspector() { return true; } - bool _hide_metadata_from_inspector() { return true; } - bool _dont_undo_redo() { return true; } + Node *edited_node = root->get_node(base); + ERR_FAIL_COND(!edited_node); - bool _is_read_only() { - return animation_read_only; - } + value = edited_node->get_path_to(np_node); +} - static void _bind_methods() { - ClassDB::bind_method(D_METHOD("_update_obj"), &AnimationMultiTrackKeyEdit::_update_obj); - ClassDB::bind_method(D_METHOD("_key_ofs_changed"), &AnimationMultiTrackKeyEdit::_key_ofs_changed); - ClassDB::bind_method(D_METHOD("_hide_script_from_inspector"), &AnimationMultiTrackKeyEdit::_hide_script_from_inspector); - ClassDB::bind_method(D_METHOD("_hide_metadata_from_inspector"), &AnimationMultiTrackKeyEdit::_hide_metadata_from_inspector); - ClassDB::bind_method(D_METHOD("get_root_path"), &AnimationMultiTrackKeyEdit::get_root_path); - ClassDB::bind_method(D_METHOD("_dont_undo_redo"), &AnimationMultiTrackKeyEdit::_dont_undo_redo); - ClassDB::bind_method(D_METHOD("_is_read_only"), &AnimationMultiTrackKeyEdit::_is_read_only); +void AnimationMultiTrackKeyEdit::_update_obj(const Ref<Animation> &p_anim) { + if (setting || animation != p_anim) { + return; } - void _fix_node_path(Variant &value, NodePath &base) { - NodePath np = value; + notify_change(); +} - if (np == NodePath()) { - return; - } +void AnimationMultiTrackKeyEdit::_key_ofs_changed(const Ref<Animation> &p_anim, float from, float to) { + if (animation != p_anim) { + return; + } - Node *root = EditorNode::get_singleton()->get_tree()->get_root(); + for (const KeyValue<int, List<float>> &E : key_ofs_map) { + int key = 0; + for (const float &key_ofs : E.value) { + if (from != key_ofs) { + key++; + continue; + } - Node *np_node = root->get_node(np); - ERR_FAIL_COND(!np_node); + int track = E.key; + key_ofs_map[track][key] = to; - Node *edited_node = root->get_node(base); - ERR_FAIL_COND(!edited_node); + if (setting) { + return; + } - value = edited_node->get_path_to(np_node); - } + notify_change(); - void _update_obj(const Ref<Animation> &p_anim) { - if (setting || animation != p_anim) { return; } - - notify_change(); } +} - void _key_ofs_changed(const Ref<Animation> &p_anim, float from, float to) { - if (animation != p_anim) { - return; - } - - for (const KeyValue<int, List<float>> &E : key_ofs_map) { - int key = 0; - for (const float &key_ofs : E.value) { - if (from != key_ofs) { - key++; - continue; - } +bool AnimationMultiTrackKeyEdit::_set(const StringName &p_name, const Variant &p_value) { + bool update_obj = false; + bool change_notify_deserved = false; + for (const KeyValue<int, List<float>> &E : key_ofs_map) { + int track = E.key; + for (const float &key_ofs : E.value) { + int key = animation->track_find_key(track, key_ofs, Animation::FIND_MODE_APPROX); + ERR_FAIL_COND_V(key == -1, false); - int track = E.key; - key_ofs_map[track][key] = to; + String name = p_name; + if (name == "easing") { + float val = p_value; + float prev_val = animation->track_get_key_transition(track, key); - if (setting) { - return; + Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Transition"), UndoRedo::MERGE_ENDS); } - - notify_change(); - - return; + undo_redo->add_do_method(animation.ptr(), "track_set_key_transition", track, key, val); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_transition", track, key, prev_val); + update_obj = true; } - } - } - bool _set(const StringName &p_name, const Variant &p_value) { - bool update_obj = false; - bool change_notify_deserved = false; - for (const KeyValue<int, List<float>> &E : key_ofs_map) { - int track = E.key; - for (const float &key_ofs : E.value) { - int key = animation->track_find_key(track, key_ofs, true); - ERR_FAIL_COND_V(key == -1, false); - - String name = p_name; - if (name == "time" || name == "frame") { - float new_time = p_value; - - if (name == "frame") { - float fps = animation->get_step(); - if (fps > 0) { - fps = 1.0 / fps; + Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); + switch (animation->track_get_type(track)) { + case Animation::TYPE_POSITION_3D: + case Animation::TYPE_ROTATION_3D: + case Animation::TYPE_SCALE_3D: { + Variant old = animation->track_get_key_value(track, key); + if (!setting) { + String chan; + switch (animation->track_get_type(track)) { + case Animation::TYPE_POSITION_3D: + chan = "Position3D"; + break; + case Animation::TYPE_ROTATION_3D: + chan = "Rotation3D"; + break; + case Animation::TYPE_SCALE_3D: + chan = "Scale3D"; + break; + default: { + } } - new_time /= fps; - } - - int existing = animation->track_find_key(track, new_time, true); - Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); - if (!setting) { setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Time"), UndoRedo::MERGE_ENDS); + undo_redo->create_action(vformat(TTR("Animation Multi Change %s"), chan)); } + undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, p_value); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, old); + update_obj = true; + } break; + case Animation::TYPE_BLEND_SHAPE: + case Animation::TYPE_VALUE: { + if (name == "value") { + Variant value = p_value; - Variant val = animation->track_get_key_value(track, key); - float trans = animation->track_get_key_transition(track, key); - - undo_redo->add_do_method(animation.ptr(), "track_remove_key", track, key); - undo_redo->add_do_method(animation.ptr(), "track_insert_key", track, new_time, val, trans); - undo_redo->add_do_method(this, "_key_ofs_changed", animation, key_ofs, new_time); - undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", track, new_time); - undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track, key_ofs, val, trans); - undo_redo->add_undo_method(this, "_key_ofs_changed", animation, new_time, key_ofs); + if (value.get_type() == Variant::NODE_PATH) { + _fix_node_path(value, base_map[track]); + } - if (existing != -1) { - Variant v = animation->track_get_key_value(track, existing); - trans = animation->track_get_key_transition(track, existing); - undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track, new_time, v, trans); + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); + } + Variant prev = animation->track_get_key_value(track, key); + undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, prev); + update_obj = true; } - } else if (name == "easing") { - float val = p_value; - float prev_val = animation->track_get_key_transition(track, key); + } break; + case Animation::TYPE_METHOD: { + Dictionary d_old = animation->track_get_key_value(track, key); + Dictionary d_new = d_old.duplicate(); - Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Transition"), UndoRedo::MERGE_ENDS); - } - undo_redo->add_do_method(animation.ptr(), "track_set_key_transition", track, key, val); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_transition", track, key, prev_val); - update_obj = true; - } + bool mergeable = false; - Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); - switch (animation->track_get_type(track)) { - case Animation::TYPE_POSITION_3D: - case Animation::TYPE_ROTATION_3D: - case Animation::TYPE_SCALE_3D: { - Variant old = animation->track_get_key_value(track, key); - if (!setting) { - String chan; - switch (animation->track_get_type(track)) { - case Animation::TYPE_POSITION_3D: - chan = "Position3D"; - break; - case Animation::TYPE_ROTATION_3D: - chan = "Rotation3D"; - break; - case Animation::TYPE_SCALE_3D: - chan = "Scale3D"; - break; - default: { + if (name == "name") { + d_new["method"] = p_value; + } else if (name == "arg_count") { + Vector<Variant> args = d_old["args"]; + args.resize(p_value); + d_new["args"] = args; + change_notify_deserved = true; + } else if (name.begins_with("args/")) { + Vector<Variant> args = d_old["args"]; + int idx = name.get_slice("/", 1).to_int(); + ERR_FAIL_INDEX_V(idx, args.size(), false); + + String what = name.get_slice("/", 2); + if (what == "type") { + Variant::Type t = Variant::Type(int(p_value)); + + if (t != args[idx].get_type()) { + Callable::CallError err; + if (Variant::can_convert(args[idx].get_type(), t)) { + Variant old = args[idx]; + Variant *ptrs[1] = { &old }; + Variant::construct(t, args.write[idx], (const Variant **)ptrs, 1, err); + } else { + Variant::construct(t, args.write[idx], nullptr, 0, err); } + change_notify_deserved = true; + d_new["args"] = args; } - - setting = true; - undo_redo->create_action(vformat(TTR("Animation Multi Change %s"), chan)); - } - undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, p_value); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, old); - update_obj = true; - } break; - case Animation::TYPE_BLEND_SHAPE: - case Animation::TYPE_VALUE: { - if (name == "value") { + } else if (what == "value") { Variant value = p_value; - if (value.get_type() == Variant::NODE_PATH) { _fix_node_path(value, base_map[track]); } - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - Variant prev = animation->track_get_key_value(track, key); - undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, prev); - update_obj = true; + args.write[idx] = value; + d_new["args"] = args; + mergeable = true; } - } break; - case Animation::TYPE_METHOD: { - Dictionary d_old = animation->track_get_key_value(track, key); - Dictionary d_new = d_old.duplicate(); + } - bool mergeable = false; + Variant prev = animation->track_get_key_value(track, key); - if (name == "name") { - d_new["method"] = p_value; - } else if (name == "arg_count") { - Vector<Variant> args = d_old["args"]; - args.resize(p_value); - d_new["args"] = args; - change_notify_deserved = true; - } else if (name.begins_with("args/")) { - Vector<Variant> args = d_old["args"]; - int idx = name.get_slice("/", 1).to_int(); - ERR_FAIL_INDEX_V(idx, args.size(), false); - - String what = name.get_slice("/", 2); - if (what == "type") { - Variant::Type t = Variant::Type(int(p_value)); - - if (t != args[idx].get_type()) { - Callable::CallError err; - if (Variant::can_convert(args[idx].get_type(), t)) { - Variant old = args[idx]; - Variant *ptrs[1] = { &old }; - Variant::construct(t, args.write[idx], (const Variant **)ptrs, 1, err); - } else { - Variant::construct(t, args.write[idx], nullptr, 0, err); - } - change_notify_deserved = true; - d_new["args"] = args; - } - } else if (what == "value") { - Variant value = p_value; - if (value.get_type() == Variant::NODE_PATH) { - _fix_node_path(value, base_map[track]); - } + if (!setting) { + if (mergeable) { + undo_redo->create_action(TTR("Animation Multi Change Call"), UndoRedo::MERGE_ENDS); + } else { + undo_redo->create_action(TTR("Animation Multi Change Call")); + } - args.write[idx] = value; - d_new["args"] = args; - mergeable = true; - } + setting = true; + } + + undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, d_new); + undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, d_old); + update_obj = true; + } break; + case Animation::TYPE_BEZIER: { + if (name == "value") { + const Variant &value = p_value; + + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); } + float prev = animation->bezier_track_get_key_value(track, key); + undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_value", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_value", track, key, prev); + update_obj = true; + } else if (name == "in_handle") { + const Variant &value = p_value; - Variant prev = animation->track_get_key_value(track, key); + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); + } + Vector2 prev = animation->bezier_track_get_key_in_handle(track, key); + undo_redo->add_do_method(this, "_bezier_track_set_key_in_handle", track, key, value); + undo_redo->add_undo_method(this, "_bezier_track_set_key_in_handle", track, key, prev); + update_obj = true; + } else if (name == "out_handle") { + const Variant &value = p_value; if (!setting) { - if (mergeable) { - undo_redo->create_action(TTR("Animation Multi Change Call"), UndoRedo::MERGE_ENDS); - } else { - undo_redo->create_action(TTR("Animation Multi Change Call")); - } + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); + } + Vector2 prev = animation->bezier_track_get_key_out_handle(track, key); + undo_redo->add_do_method(this, "_bezier_track_set_key_out_handle", track, key, value); + undo_redo->add_undo_method(this, "_bezier_track_set_key_out_handle", track, key, prev); + update_obj = true; + } else if (name == "handle_mode") { + const Variant &value = p_value; + if (!setting) { setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); } + int prev = animation->bezier_track_get_key_handle_mode(track, key); + undo_redo->add_do_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, value); + undo_redo->add_undo_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, prev); + update_obj = true; + } + } break; + case Animation::TYPE_AUDIO: { + if (name == "stream") { + Ref<AudioStream> stream = p_value; - undo_redo->add_do_method(animation.ptr(), "track_set_key_value", track, key, d_new); - undo_redo->add_undo_method(animation.ptr(), "track_set_key_value", track, key, d_old); + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); + } + Ref<Resource> prev = animation->audio_track_get_key_stream(track, key); + undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_stream", track, key, stream); + undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_stream", track, key, prev); update_obj = true; - } break; - case Animation::TYPE_BEZIER: { - if (name == "value") { - const Variant &value = p_value; + } else if (name == "start_offset") { + float value = p_value; - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - float prev = animation->bezier_track_get_key_value(track, key); - undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_value", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_value", track, key, prev); - update_obj = true; - } else if (name == "in_handle") { - const Variant &value = p_value; - - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - Vector2 prev = animation->bezier_track_get_key_in_handle(track, key); - undo_redo->add_do_method(this, "_bezier_track_set_key_in_handle", track, key, value); - undo_redo->add_undo_method(this, "_bezier_track_set_key_in_handle", track, key, prev); - update_obj = true; - } else if (name == "out_handle") { - const Variant &value = p_value; - - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - Vector2 prev = animation->bezier_track_get_key_out_handle(track, key); - undo_redo->add_do_method(this, "_bezier_track_set_key_out_handle", track, key, value); - undo_redo->add_undo_method(this, "_bezier_track_set_key_out_handle", track, key, prev); - update_obj = true; - } else if (name == "handle_mode") { - const Variant &value = p_value; - - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - int prev = animation->bezier_track_get_key_handle_mode(track, key); - undo_redo->add_do_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, value); - undo_redo->add_undo_method(this, "_bezier_track_set_key_handle_mode", animation.ptr(), track, key, prev); - update_obj = true; + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); } - } break; - case Animation::TYPE_AUDIO: { - if (name == "stream") { - Ref<AudioStream> stream = p_value; + float prev = animation->audio_track_get_key_start_offset(track, key); + undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, prev); + update_obj = true; + } else if (name == "end_offset") { + float value = p_value; - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - Ref<Resource> prev = animation->audio_track_get_key_stream(track, key); - undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_stream", track, key, stream); - undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_stream", track, key, prev); - update_obj = true; - } else if (name == "start_offset") { - float value = p_value; - - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - float prev = animation->audio_track_get_key_start_offset(track, key); - undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_start_offset", track, key, prev); - update_obj = true; - } else if (name == "end_offset") { - float value = p_value; - - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - float prev = animation->audio_track_get_key_end_offset(track, key); - undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, value); - undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, prev); - update_obj = true; + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); } - } break; - case Animation::TYPE_ANIMATION: { - if (name == "animation") { - StringName anim_name = p_value; + float prev = animation->audio_track_get_key_end_offset(track, key); + undo_redo->add_do_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, value); + undo_redo->add_undo_method(animation.ptr(), "audio_track_set_key_end_offset", track, key, prev); + update_obj = true; + } + } break; + case Animation::TYPE_ANIMATION: { + if (name == "animation") { + StringName anim_name = p_value; - if (!setting) { - setting = true; - undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); - } - StringName prev = animation->animation_track_get_key_animation(track, key); - undo_redo->add_do_method(animation.ptr(), "animation_track_set_key_animation", track, key, anim_name); - undo_redo->add_undo_method(animation.ptr(), "animation_track_set_key_animation", track, key, prev); - update_obj = true; + if (!setting) { + setting = true; + undo_redo->create_action(TTR("Animation Multi Change Keyframe Value"), UndoRedo::MERGE_ENDS); } - } break; - } + StringName prev = animation->animation_track_get_key_animation(track, key); + undo_redo->add_do_method(animation.ptr(), "animation_track_set_key_animation", track, key, anim_name); + undo_redo->add_undo_method(animation.ptr(), "animation_track_set_key_animation", track, key, prev); + update_obj = true; + } + } break; } } + } - Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); - if (setting) { - if (update_obj) { - undo_redo->add_do_method(this, "_update_obj", animation); - undo_redo->add_undo_method(this, "_update_obj", animation); - } - - undo_redo->commit_action(); - setting = false; + Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); + if (setting) { + if (update_obj) { + undo_redo->add_do_method(this, "_update_obj", animation); + undo_redo->add_undo_method(this, "_update_obj", animation); + } - if (change_notify_deserved) { - notify_change(); - } + undo_redo->commit_action(); + setting = false; - return true; + if (change_notify_deserved) { + notify_change(); } - return false; + return true; } - bool _get(const StringName &p_name, Variant &r_ret) const { - for (const KeyValue<int, List<float>> &E : key_ofs_map) { - int track = E.key; - for (const float &key_ofs : E.value) { - int key = animation->track_find_key(track, key_ofs, true); - ERR_CONTINUE(key == -1); + return false; +} - String name = p_name; - if (name == "time") { - r_ret = key_ofs; - return true; - } +bool AnimationMultiTrackKeyEdit::_get(const StringName &p_name, Variant &r_ret) const { + for (const KeyValue<int, List<float>> &E : key_ofs_map) { + int track = E.key; + for (const float &key_ofs : E.value) { + int key = animation->track_find_key(track, key_ofs, Animation::FIND_MODE_APPROX); + ERR_CONTINUE(key == -1); - if (name == "frame") { - float fps = animation->get_step(); - if (fps > 0) { - fps = 1.0 / fps; + String name = p_name; + if (name == "easing") { + r_ret = animation->track_get_key_transition(track, key); + return true; + } + + switch (animation->track_get_type(track)) { + case Animation::TYPE_POSITION_3D: + case Animation::TYPE_ROTATION_3D: + case Animation::TYPE_SCALE_3D: { + if (name == "position" || name == "rotation" || name == "scale") { + r_ret = animation->track_get_key_value(track, key); + return true; } - r_ret = key_ofs * fps; - return true; - } - if (name == "easing") { - r_ret = animation->track_get_key_transition(track, key); - return true; - } + } break; + case Animation::TYPE_BLEND_SHAPE: + case Animation::TYPE_VALUE: { + if (name == "value") { + r_ret = animation->track_get_key_value(track, key); + return true; + } - switch (animation->track_get_type(track)) { - case Animation::TYPE_POSITION_3D: - case Animation::TYPE_ROTATION_3D: - case Animation::TYPE_SCALE_3D: { - if (name == "position" || name == "rotation" || name == "scale") { - r_ret = animation->track_get_key_value(track, key); - return true; - } + } break; + case Animation::TYPE_METHOD: { + Dictionary d = animation->track_get_key_value(track, key); - } break; - case Animation::TYPE_BLEND_SHAPE: - case Animation::TYPE_VALUE: { - if (name == "value") { - r_ret = animation->track_get_key_value(track, key); - return true; - } + if (name == "name") { + ERR_FAIL_COND_V(!d.has("method"), false); + r_ret = d["method"]; + return true; + } - } break; - case Animation::TYPE_METHOD: { - Dictionary d = animation->track_get_key_value(track, key); + ERR_FAIL_COND_V(!d.has("args"), false); - if (name == "name") { - ERR_FAIL_COND_V(!d.has("method"), false); - r_ret = d["method"]; - return true; - } + Vector<Variant> args = d["args"]; - ERR_FAIL_COND_V(!d.has("args"), false); + if (name == "arg_count") { + r_ret = args.size(); + return true; + } - Vector<Variant> args = d["args"]; + if (name.begins_with("args/")) { + int idx = name.get_slice("/", 1).to_int(); + ERR_FAIL_INDEX_V(idx, args.size(), false); - if (name == "arg_count") { - r_ret = args.size(); + String what = name.get_slice("/", 2); + if (what == "type") { + r_ret = args[idx].get_type(); return true; } - if (name.begins_with("args/")) { - int idx = name.get_slice("/", 1).to_int(); - ERR_FAIL_INDEX_V(idx, args.size(), false); - - String what = name.get_slice("/", 2); - if (what == "type") { - r_ret = args[idx].get_type(); - return true; - } - - if (what == "value") { - r_ret = args[idx]; - return true; - } - } - - } break; - case Animation::TYPE_BEZIER: { - if (name == "value") { - r_ret = animation->bezier_track_get_key_value(track, key); + if (what == "value") { + r_ret = args[idx]; return true; } + } - if (name == "in_handle") { - r_ret = animation->bezier_track_get_key_in_handle(track, key); - return true; - } + } break; + case Animation::TYPE_BEZIER: { + if (name == "value") { + r_ret = animation->bezier_track_get_key_value(track, key); + return true; + } - if (name == "out_handle") { - r_ret = animation->bezier_track_get_key_out_handle(track, key); - return true; - } + if (name == "in_handle") { + r_ret = animation->bezier_track_get_key_in_handle(track, key); + return true; + } - if (name == "handle_mode") { - r_ret = animation->bezier_track_get_key_handle_mode(track, key); - return true; - } + if (name == "out_handle") { + r_ret = animation->bezier_track_get_key_out_handle(track, key); + return true; + } - } break; - case Animation::TYPE_AUDIO: { - if (name == "stream") { - r_ret = animation->audio_track_get_key_stream(track, key); - return true; - } + if (name == "handle_mode") { + r_ret = animation->bezier_track_get_key_handle_mode(track, key); + return true; + } - if (name == "start_offset") { - r_ret = animation->audio_track_get_key_start_offset(track, key); - return true; - } + } break; + case Animation::TYPE_AUDIO: { + if (name == "stream") { + r_ret = animation->audio_track_get_key_stream(track, key); + return true; + } - if (name == "end_offset") { - r_ret = animation->audio_track_get_key_end_offset(track, key); - return true; - } + if (name == "start_offset") { + r_ret = animation->audio_track_get_key_start_offset(track, key); + return true; + } - } break; - case Animation::TYPE_ANIMATION: { - if (name == "animation") { - r_ret = animation->animation_track_get_key_animation(track, key); - return true; - } + if (name == "end_offset") { + r_ret = animation->audio_track_get_key_end_offset(track, key); + return true; + } - } break; - } + } break; + case Animation::TYPE_ANIMATION: { + if (name == "animation") { + r_ret = animation->animation_track_get_key_animation(track, key); + return true; + } + + } break; } } + } - return false; + return false; +} + +void AnimationMultiTrackKeyEdit::_get_property_list(List<PropertyInfo> *p_list) const { + if (animation.is_null()) { + return; } - void _get_property_list(List<PropertyInfo> *p_list) const { - if (animation.is_null()) { - return; - } - int first_track = -1; - float first_key = -1.0; + int first_track = -1; + float first_key = -1.0; - bool show_time = true; - bool same_track_type = true; - bool same_key_type = true; - for (const KeyValue<int, List<float>> &E : key_ofs_map) { - int track = E.key; - ERR_FAIL_INDEX(track, animation->get_track_count()); + bool same_track_type = true; + bool same_key_type = true; + for (const KeyValue<int, List<float>> &E : key_ofs_map) { + int track = E.key; + ERR_FAIL_INDEX(track, animation->get_track_count()); - if (first_track < 0) { - first_track = track; - } + if (first_track < 0) { + first_track = track; + } - if (show_time && E.value.size() > 1) { - show_time = false; + if (same_track_type) { + if (animation->track_get_type(first_track) != animation->track_get_type(track)) { + same_track_type = false; + same_key_type = false; } - if (same_track_type) { - if (animation->track_get_type(first_track) != animation->track_get_type(track)) { - same_track_type = false; - same_key_type = false; + for (const float &F : E.value) { + int key = animation->track_find_key(track, F, Animation::FIND_MODE_APPROX); + ERR_FAIL_COND(key == -1); + if (first_key < 0) { + first_key = key; } - for (const float &F : E.value) { - int key = animation->track_find_key(track, F, true); - ERR_FAIL_COND(key == -1); - if (first_key < 0) { - first_key = key; - } - - if (animation->track_get_key_value(first_track, first_key).get_type() != animation->track_get_key_value(track, key).get_type()) { - same_key_type = false; - } + if (animation->track_get_key_value(first_track, first_key).get_type() != animation->track_get_key_value(track, key).get_type()) { + same_key_type = false; } } } + } - if (show_time) { - if (use_fps && animation->get_step() > 0) { - float max_frame = animation->get_length() / animation->get_step(); - p_list->push_back(PropertyInfo(Variant::FLOAT, "frame", PROPERTY_HINT_RANGE, "0," + rtos(max_frame) + ",1")); - } else { - p_list->push_back(PropertyInfo(Variant::FLOAT, "time", PROPERTY_HINT_RANGE, "0," + rtos(animation->get_length()) + ",0.01")); - } - } - - if (same_track_type) { - switch (animation->track_get_type(first_track)) { - case Animation::TYPE_POSITION_3D: { - p_list->push_back(PropertyInfo(Variant::VECTOR3, "position")); - } break; - case Animation::TYPE_ROTATION_3D: { - p_list->push_back(PropertyInfo(Variant::QUATERNION, "scale")); - } break; - case Animation::TYPE_SCALE_3D: { - p_list->push_back(PropertyInfo(Variant::VECTOR3, "scale")); - } break; - case Animation::TYPE_BLEND_SHAPE: { - p_list->push_back(PropertyInfo(Variant::FLOAT, "value")); - } break; - case Animation::TYPE_VALUE: { - if (same_key_type) { - Variant v = animation->track_get_key_value(first_track, first_key); + if (same_track_type) { + switch (animation->track_get_type(first_track)) { + case Animation::TYPE_POSITION_3D: { + p_list->push_back(PropertyInfo(Variant::VECTOR3, "position")); + } break; + case Animation::TYPE_ROTATION_3D: { + p_list->push_back(PropertyInfo(Variant::QUATERNION, "scale")); + } break; + case Animation::TYPE_SCALE_3D: { + p_list->push_back(PropertyInfo(Variant::VECTOR3, "scale")); + } break; + case Animation::TYPE_BLEND_SHAPE: { + p_list->push_back(PropertyInfo(Variant::FLOAT, "value")); + } break; + case Animation::TYPE_VALUE: { + if (same_key_type) { + Variant v = animation->track_get_key_value(first_track, first_key); - if (hint.type != Variant::NIL) { - PropertyInfo pi = hint; - pi.name = "value"; - p_list->push_back(pi); - } else { - PropertyHint val_hint = PROPERTY_HINT_NONE; - String val_hint_string; - - if (v.get_type() == Variant::OBJECT) { - // Could actually check the object property if exists..? Yes I will! - Ref<Resource> res = v; - if (res.is_valid()) { - val_hint = PROPERTY_HINT_RESOURCE_TYPE; - val_hint_string = res->get_class(); - } + if (hint.type != Variant::NIL) { + PropertyInfo pi = hint; + pi.name = "value"; + p_list->push_back(pi); + } else { + PropertyHint val_hint = PROPERTY_HINT_NONE; + String val_hint_string; + + if (v.get_type() == Variant::OBJECT) { + // Could actually check the object property if exists..? Yes I will! + Ref<Resource> res = v; + if (res.is_valid()) { + val_hint = PROPERTY_HINT_RESOURCE_TYPE; + val_hint_string = res->get_class(); } + } - if (v.get_type() != Variant::NIL) { - p_list->push_back(PropertyInfo(v.get_type(), "value", val_hint, val_hint_string)); - } + if (v.get_type() != Variant::NIL) { + p_list->push_back(PropertyInfo(v.get_type(), "value", val_hint, val_hint_string)); } } + } - p_list->push_back(PropertyInfo(Variant::FLOAT, "easing", PROPERTY_HINT_EXP_EASING)); - } break; - case Animation::TYPE_METHOD: { - p_list->push_back(PropertyInfo(Variant::STRING_NAME, "name")); + p_list->push_back(PropertyInfo(Variant::FLOAT, "easing", PROPERTY_HINT_EXP_EASING)); + } break; + case Animation::TYPE_METHOD: { + p_list->push_back(PropertyInfo(Variant::STRING_NAME, "name")); - p_list->push_back(PropertyInfo(Variant::INT, "arg_count", PROPERTY_HINT_RANGE, "0,32,1,or_greater")); + p_list->push_back(PropertyInfo(Variant::INT, "arg_count", PROPERTY_HINT_RANGE, "0,32,1,or_greater")); - Dictionary d = animation->track_get_key_value(first_track, first_key); - ERR_FAIL_COND(!d.has("args")); - Vector<Variant> args = d["args"]; - String vtypes; - for (int i = 0; i < Variant::VARIANT_MAX; i++) { - if (i > 0) { - vtypes += ","; - } - vtypes += Variant::get_type_name(Variant::Type(i)); + Dictionary d = animation->track_get_key_value(first_track, first_key); + ERR_FAIL_COND(!d.has("args")); + Vector<Variant> args = d["args"]; + String vtypes; + for (int i = 0; i < Variant::VARIANT_MAX; i++) { + if (i > 0) { + vtypes += ","; } + vtypes += Variant::get_type_name(Variant::Type(i)); + } - for (int i = 0; i < args.size(); i++) { - p_list->push_back(PropertyInfo(Variant::INT, "args/" + itos(i) + "/type", PROPERTY_HINT_ENUM, vtypes)); - if (args[i].get_type() != Variant::NIL) { - p_list->push_back(PropertyInfo(args[i].get_type(), "args/" + itos(i) + "/value")); - } - } - } break; - case Animation::TYPE_BEZIER: { - p_list->push_back(PropertyInfo(Variant::FLOAT, "value")); - p_list->push_back(PropertyInfo(Variant::VECTOR2, "in_handle")); - p_list->push_back(PropertyInfo(Variant::VECTOR2, "out_handle")); - p_list->push_back(PropertyInfo(Variant::INT, "handle_mode", PROPERTY_HINT_ENUM, "Free,Linear,Balanced,Mirrored")); - } break; - case Animation::TYPE_AUDIO: { - p_list->push_back(PropertyInfo(Variant::OBJECT, "stream", PROPERTY_HINT_RESOURCE_TYPE, "AudioStream")); - p_list->push_back(PropertyInfo(Variant::FLOAT, "start_offset", PROPERTY_HINT_RANGE, "0,3600,0.01,or_greater")); - p_list->push_back(PropertyInfo(Variant::FLOAT, "end_offset", PROPERTY_HINT_RANGE, "0,3600,0.01,or_greater")); - } break; - case Animation::TYPE_ANIMATION: { - if (key_ofs_map.size() > 1) { - break; + for (int i = 0; i < args.size(); i++) { + p_list->push_back(PropertyInfo(Variant::INT, "args/" + itos(i) + "/type", PROPERTY_HINT_ENUM, vtypes)); + if (args[i].get_type() != Variant::NIL) { + p_list->push_back(PropertyInfo(args[i].get_type(), "args/" + itos(i) + "/value")); } + } + } break; + case Animation::TYPE_BEZIER: { + p_list->push_back(PropertyInfo(Variant::FLOAT, "value")); + p_list->push_back(PropertyInfo(Variant::VECTOR2, "in_handle")); + p_list->push_back(PropertyInfo(Variant::VECTOR2, "out_handle")); + p_list->push_back(PropertyInfo(Variant::INT, "handle_mode", PROPERTY_HINT_ENUM, "Free,Linear,Balanced,Mirrored")); + } break; + case Animation::TYPE_AUDIO: { + p_list->push_back(PropertyInfo(Variant::OBJECT, "stream", PROPERTY_HINT_RESOURCE_TYPE, "AudioStream")); + p_list->push_back(PropertyInfo(Variant::FLOAT, "start_offset", PROPERTY_HINT_RANGE, "0,3600,0.0001,or_greater")); + p_list->push_back(PropertyInfo(Variant::FLOAT, "end_offset", PROPERTY_HINT_RANGE, "0,3600,0.0001,or_greater")); + } break; + case Animation::TYPE_ANIMATION: { + if (key_ofs_map.size() > 1) { + break; + } - String animations; - - if (root_path && root_path->has_node(animation->track_get_path(first_track))) { - AnimationPlayer *ap = Object::cast_to<AnimationPlayer>(root_path->get_node(animation->track_get_path(first_track))); - if (ap) { - List<StringName> anims; - ap->get_animation_list(&anims); - for (List<StringName>::Element *G = anims.front(); G; G = G->next()) { - if (!animations.is_empty()) { - animations += ","; - } + String animations; - animations += String(G->get()); + if (root_path && root_path->has_node(animation->track_get_path(first_track))) { + AnimationPlayer *ap = Object::cast_to<AnimationPlayer>(root_path->get_node(animation->track_get_path(first_track))); + if (ap) { + List<StringName> anims; + ap->get_animation_list(&anims); + for (List<StringName>::Element *G = anims.front(); G; G = G->next()) { + if (!animations.is_empty()) { + animations += ","; } + + animations += String(G->get()); } } + } - if (!animations.is_empty()) { - animations += ","; - } - animations += "[stop]"; + if (!animations.is_empty()) { + animations += ","; + } + animations += "[stop]"; - p_list->push_back(PropertyInfo(Variant::STRING_NAME, "animation", PROPERTY_HINT_ENUM, animations)); - } break; - } + p_list->push_back(PropertyInfo(Variant::STRING_NAME, "animation", PROPERTY_HINT_ENUM, animations)); + } break; } } +} - Ref<Animation> animation; - - RBMap<int, List<float>> key_ofs_map; - RBMap<int, NodePath> base_map; - PropertyInfo hint; - - Node *root_path = nullptr; - - bool use_fps = false; - - void notify_change() { - notify_property_list_changed(); - } +void AnimationMultiTrackKeyEdit::notify_change() { + notify_property_list_changed(); +} - Node *get_root_path() { - return root_path; - } +Node *AnimationMultiTrackKeyEdit::get_root_path() { + return root_path; +} - void set_use_fps(bool p_enable) { - use_fps = p_enable; - notify_property_list_changed(); - } -}; +void AnimationMultiTrackKeyEdit::set_use_fps(bool p_enable) { + use_fps = p_enable; + notify_property_list_changed(); +} void AnimationTimelineEdit::_zoom_changed(double) { queue_redraw(); @@ -1420,7 +1246,7 @@ void AnimationTimelineEdit::_anim_length_changed(double p_new_len) { return; } - p_new_len = MAX(0.001, p_new_len); + p_new_len = MAX(0.0001, p_new_len); if (use_fps && animation->get_step() > 0) { p_new_len *= animation->get_step(); } @@ -1539,7 +1365,7 @@ void AnimationTimelineEdit::_notification(int p_what) { float l = animation->get_length(); if (l <= 0) { - l = 0.001; // Avoid crashor. + l = 0.0001; // Avoid crashor. } Ref<Texture2D> hsize_icon = get_theme_icon(SNAME("Hsize"), SNAME("EditorIcons")); @@ -1600,7 +1426,7 @@ void AnimationTimelineEdit::_notification(int p_what) { end_px = zoomw; } - draw_rect(Rect2(Point2(get_name_limit() + begin_px, 0), Point2(end_px - begin_px - 1, h)), timecolor); + draw_rect(Rect2(Point2(get_name_limit() + begin_px, 0), Point2(end_px - begin_px, h)), timecolor); } } @@ -1759,7 +1585,7 @@ void AnimationTimelineEdit::update_values() { } } else { length->set_value(animation->get_length()); - length->set_step(0.001); + length->set_step(0.0001); length->set_tooltip_text(TTR("Animation length (seconds)")); time_icon->set_tooltip_text(TTR("Animation length (seconds)")); } @@ -1950,9 +1776,9 @@ AnimationTimelineEdit::AnimationTimelineEdit() { time_icon->set_tooltip_text(TTR("Animation length (seconds)")); len_hb->add_child(time_icon); length = memnew(EditorSpinSlider); - length->set_min(0.001); + length->set_min(0.0001); length->set_max(36000); - length->set_step(0.001); + length->set_step(0.0001); length->set_allow_greater(true); length->set_custom_minimum_size(Vector2(70 * EDSCALE, 0)); length->set_hide_slider(true); @@ -2058,7 +1884,7 @@ void AnimationTrackEdit::_notification(int p_what) { } else if (animation->track_get_type(track) == Animation::TYPE_AUDIO) { text = TTR("Audio Clips:"); } else if (animation->track_get_type(track) == Animation::TYPE_ANIMATION) { - text = TTR("Anim Clips:"); + text = TTR("Animation Clips:"); } else { text += anim_path.get_concatenated_subnames(); } @@ -2670,7 +2496,7 @@ String AnimationTrackEdit::get_tooltip(const Point2 &p_pos) const { } if (key_idx != -1) { - String text = TTR("Time (s):") + " " + rtos(animation->track_get_key_time(track, key_idx)) + "\n"; + String text = TTR("Time (s):") + " " + TS->format_number(rtos(Math::snapped(animation->track_get_key_time(track, key_idx), 0.0001))) + "\n"; switch (animation->track_get_type(track)) { case Animation::TYPE_POSITION_3D: { Vector3 t = animation->track_get_key_value(track, key_idx); @@ -4390,7 +4216,7 @@ AnimationTrackEditor::TrackIndices AnimationTrackEditor::_confirm_insert(InsertD p_next_tracks.normal++; } else { undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", p_id.track_idx, time); - int existing = animation->track_find_key(p_id.track_idx, time, true); + int existing = animation->track_find_key(p_id.track_idx, time, Animation::FIND_MODE_APPROX); if (existing != -1) { Variant v = animation->track_get_key_value(p_id.track_idx, existing); float trans = animation->track_get_key_transition(p_id.track_idx, existing); @@ -5005,8 +4831,8 @@ void AnimationTrackEditor::_insert_key_from_track(float p_ofs, int p_track) { if (snap->is_pressed() && step->get_value() != 0) { p_ofs = snap_time(p_ofs); } - while (animation->track_find_key(p_track, p_ofs, true) != -1) { // Make sure insertion point is valid. - p_ofs += 0.001; + while (animation->track_find_key(p_track, p_ofs, Animation::FIND_MODE_APPROX) != -1) { // Make sure insertion point is valid. + p_ofs += 0.0001; } Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); @@ -5338,7 +5164,7 @@ void AnimationTrackEditor::_select_at_anim(const Ref<Animation> &p_anim, int p_t return; } - int idx = animation->track_find_key(p_track, p_pos, true); + int idx = animation->track_find_key(p_track, p_pos, Animation::FIND_MODE_APPROX); ERR_FAIL_COND(idx < 0); SelectedKey sk; @@ -5365,7 +5191,7 @@ void AnimationTrackEditor::_move_selection_commit() { // 2 - Remove overlapped keys. for (RBMap<SelectedKey, KeyInfo>::Element *E = selection.back(); E; E = E->prev()) { float newtime = snap_time(E->get().pos + motion); - int idx = animation->track_find_key(E->key().track, newtime, true); + int idx = animation->track_find_key(E->key().track, newtime, Animation::FIND_MODE_APPROX); if (idx == -1) { continue; } @@ -5625,7 +5451,7 @@ void AnimationTrackEditor::_anim_duplicate_keys(bool transpose) { continue; } - int existing_idx = animation->track_find_key(dst_track, dst_time, true); + int existing_idx = animation->track_find_key(dst_track, dst_time, Animation::FIND_MODE_APPROX); undo_redo->add_do_method(animation.ptr(), "track_insert_key", dst_track, dst_time, animation->track_get_key_value(E->key().track, E->key().key), animation->track_get_key_transition(E->key().track, E->key().key)); undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", dst_track, dst_time); @@ -5916,7 +5742,7 @@ void AnimationTrackEditor::_edit_menu_pressed(int p_option) { // 2 - Remove overlapped keys. for (RBMap<SelectedKey, KeyInfo>::Element *E = selection.back(); E; E = E->prev()) { float newtime = (E->get().pos - from_t) * s + from_t; - int idx = animation->track_find_key(E->key().track, newtime, true); + int idx = animation->track_find_key(E->key().track, newtime, Animation::FIND_MODE_APPROX); if (idx == -1) { continue; } @@ -6127,7 +5953,7 @@ void AnimationTrackEditor::_edit_menu_pressed(int p_option) { undo_redo->add_do_method(reset.ptr(), "track_set_path", dst_track, path); undo_redo->add_undo_method(reset.ptr(), "remove_track", dst_track); } else { - existing_idx = reset->track_find_key(dst_track, 0, true); + existing_idx = reset->track_find_key(dst_track, 0, Animation::FIND_MODE_APPROX); } undo_redo->add_do_method(reset.ptr(), "track_insert_key", dst_track, 0, animation->track_get_key_value(sk.track, sk.key), animation->track_get_key_transition(sk.track, sk.key)); @@ -6656,7 +6482,7 @@ AnimationTrackEditor::AnimationTrackEditor() { step = memnew(EditorSpinSlider); step->set_min(0); step->set_max(1000000); - step->set_step(0.001); + step->set_step(0.0001); step->set_hide_slider(true); step->set_custom_minimum_size(Size2(100, 0) * EDSCALE); step->set_tooltip_text(TTR("Animation step value.")); @@ -6946,3 +6772,103 @@ AnimationTrackEditor::~AnimationTrackEditor() { memdelete(multi_key_edit); } } + +// AnimationTrackKeyEditEditorPlugin + +void AnimationTrackKeyEditEditor::_time_edit_entered() { + int key = animation->track_find_key(track, key_ofs, Animation::FIND_MODE_APPROX); + if (key == -1) { + return; + } + key_data_cache.time = animation->track_get_key_time(track, key); + key_data_cache.transition = animation->track_get_key_transition(track, key); + key_data_cache.value = animation->track_get_key_value(track, key); +} + +void AnimationTrackKeyEditEditor::_time_edit_exited() { + real_t new_time = spinner->get_value(); + + if (use_fps) { + real_t fps = animation->get_step(); + if (fps > 0) { + fps = 1.0 / fps; + } + new_time /= fps; + } + + if (Math::is_equal_approx(new_time, key_data_cache.time)) { + return; // No change. + } + + int existing = animation->track_find_key(track, new_time, Animation::FIND_MODE_APPROX); + Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); + undo_redo->create_action(TTR("Animation Change Keyframe Time"), UndoRedo::MERGE_ENDS); + + if (existing != -1) { + undo_redo->add_do_method(animation.ptr(), "track_remove_key_at_time", track, animation->track_get_key_time(track, existing)); + } + undo_redo->add_do_method(animation.ptr(), "track_remove_key_at_time", track, key_data_cache.time); + undo_redo->add_do_method(animation.ptr(), "track_insert_key", track, new_time, key_data_cache.value, key_data_cache.transition); + undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", track, new_time); + undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track, key_data_cache.time, key_data_cache.value, key_data_cache.transition); + if (existing != -1) { + undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track, animation->track_get_key_time(track, existing), animation->track_get_key_value(track, existing), animation->track_get_key_transition(track, existing)); + } + + // Reselect key. + AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); + if (ape) { + AnimationTrackEditor *ate = ape->get_track_editor(); + if (ate) { + undo_redo->add_do_method(ate, "_clear_selection_for_anim", animation); + undo_redo->add_undo_method(ate, "_clear_selection_for_anim", animation); + undo_redo->add_do_method(ate, "_select_at_anim", animation, track, new_time); + undo_redo->add_undo_method(ate, "_select_at_anim", animation, track, key_data_cache.time); + } + } + + undo_redo->commit_action(); +} + +AnimationTrackKeyEditEditor::AnimationTrackKeyEditEditor(Ref<Animation> p_animation, int p_track, real_t p_key_ofs, bool p_use_fps) { + if (!p_animation.is_valid()) { + return; + } + + animation = p_animation; + track = p_track; + key_ofs = p_key_ofs; + use_fps = p_use_fps; + + set_label("Time"); + + spinner = memnew(EditorSpinSlider); + spinner->set_focus_mode(Control::FOCUS_CLICK); + spinner->set_min(0); + spinner->set_allow_greater(true); + spinner->set_allow_lesser(true); + + if (use_fps) { + spinner->set_step(1); + spinner->set_hide_slider(true); + real_t fps = animation->get_step(); + if (fps > 0) { + fps = 1.0 / fps; + } + spinner->set_value(key_ofs * fps); + } else { + spinner->set_step(0.0001); + spinner->set_value(key_ofs); + spinner->set_max(animation->get_length()); + } + + add_child(spinner); + + spinner->connect("grabbed", callable_mp(this, &AnimationTrackKeyEditEditor::_time_edit_entered), CONNECT_DEFERRED); + spinner->connect("ungrabbed", callable_mp(this, &AnimationTrackKeyEditEditor::_time_edit_exited), CONNECT_DEFERRED); + spinner->connect("value_focus_entered", callable_mp(this, &AnimationTrackKeyEditEditor::_time_edit_entered), CONNECT_DEFERRED); + spinner->connect("value_focus_exited", callable_mp(this, &AnimationTrackKeyEditEditor::_time_edit_exited), CONNECT_DEFERRED); +} + +AnimationTrackKeyEditEditor::~AnimationTrackKeyEditEditor() { +} diff --git a/editor/animation_track_editor.h b/editor/animation_track_editor.h index 4b50424f39..5ae826bd5c 100644 --- a/editor/animation_track_editor.h +++ b/editor/animation_track_editor.h @@ -50,9 +50,83 @@ #include "scene/resources/animation.h" #include "scene_tree_editor.h" +class AnimationTrackEditor; class AnimationTrackEdit; class ViewPanner; +class AnimationTrackKeyEdit : public Object { + GDCLASS(AnimationTrackKeyEdit, Object); + +public: + bool setting = false; + bool animation_read_only = false; + + Ref<Animation> animation; + int track = -1; + float key_ofs = 0; + Node *root_path = nullptr; + + PropertyInfo hint; + NodePath base; + bool use_fps = false; + + bool _hide_script_from_inspector() { return true; } + bool _hide_metadata_from_inspector() { return true; } + bool _dont_undo_redo() { return true; } + + bool _is_read_only() { return animation_read_only; } + + void notify_change(); + Node *get_root_path(); + void set_use_fps(bool p_enable); + +protected: + static void _bind_methods(); + void _fix_node_path(Variant &value); + void _update_obj(const Ref<Animation> &p_anim); + void _key_ofs_changed(const Ref<Animation> &p_anim, float from, float to); + bool _set(const StringName &p_name, const Variant &p_value); + bool _get(const StringName &p_name, Variant &r_ret) const; + void _get_property_list(List<PropertyInfo> *p_list) const; +}; + +class AnimationMultiTrackKeyEdit : public Object { + GDCLASS(AnimationMultiTrackKeyEdit, Object); + +public: + bool setting = false; + bool animation_read_only = false; + + Ref<Animation> animation; + + RBMap<int, List<float>> key_ofs_map; + RBMap<int, NodePath> base_map; + PropertyInfo hint; + + Node *root_path = nullptr; + + bool use_fps = false; + + bool _hide_script_from_inspector() { return true; } + bool _hide_metadata_from_inspector() { return true; } + bool _dont_undo_redo() { return true; } + + bool _is_read_only() { return animation_read_only; } + + void notify_change(); + Node *get_root_path(); + void set_use_fps(bool p_enable); + +protected: + static void _bind_methods(); + void _fix_node_path(Variant &value, NodePath &base); + void _update_obj(const Ref<Animation> &p_anim); + void _key_ofs_changed(const Ref<Animation> &p_anim, float from, float to); + bool _set(const StringName &p_name, const Variant &p_value); + bool _get(const StringName &p_name, Variant &r_ret) const; + void _get_property_list(List<PropertyInfo> *p_list) const; +}; + class AnimationTimelineEdit : public Range { GDCLASS(AnimationTimelineEdit, Range); @@ -129,8 +203,6 @@ public: AnimationTimelineEdit(); }; -class AnimationTrackEditor; - class AnimationTrackEdit : public Control { GDCLASS(AnimationTrackEdit, Control); friend class AnimationTimelineEdit; @@ -592,4 +664,30 @@ public: ~AnimationTrackEditor(); }; +// AnimationTrackKeyEditEditorPlugin + +class AnimationTrackKeyEditEditor : public EditorProperty { + GDCLASS(AnimationTrackKeyEditEditor, EditorProperty); + + Ref<Animation> animation; + int track = -1; + real_t key_ofs = 0.0; + bool use_fps = false; + + EditorSpinSlider *spinner = nullptr; + + struct KeyDataCache { + real_t time = 0.0; + float transition = 0.0; + Variant value; + } key_data_cache; + + void _time_edit_entered(); + void _time_edit_exited(); + +public: + AnimationTrackKeyEditEditor(Ref<Animation> p_animation, int p_track, real_t p_key_ofs, bool p_use_fps); + ~AnimationTrackKeyEditEditor(); +}; + #endif // ANIMATION_TRACK_EDITOR_H diff --git a/editor/animation_track_editor_plugins.cpp b/editor/animation_track_editor_plugins.cpp index 704935e163..0ad62710eb 100644 --- a/editor/animation_track_editor_plugins.cpp +++ b/editor/animation_track_editor_plugins.cpp @@ -831,8 +831,8 @@ Rect2 AnimationTrackEditTypeAudio::get_key_rect(int p_index, float p_pixels_sec) len -= end_ofs; len -= start_ofs; - if (len <= 0.001) { - len = 0.001; + if (len <= 0.0001) { + len = 0.0001; } if (get_animation()->track_get_key_count(get_track()) > p_index + 1) { @@ -887,8 +887,8 @@ void AnimationTrackEditTypeAudio::draw_key(int p_index, float p_pixels_sec, int len -= end_ofs; len -= start_ofs; - if (len <= 0.001) { - len = 0.001; + if (len <= 0.0001) { + len = 0.0001; } int pixel_len = len * p_pixels_sec; @@ -1014,8 +1014,8 @@ void AnimationTrackEditTypeAudio::drop_data(const Point2 &p_point, const Variant ofs = get_editor()->snap_time(ofs); - while (get_animation()->track_find_key(get_track(), ofs, true) != -1) { //make sure insertion point is valid - ofs += 0.001; + while (get_animation()->track_find_key(get_track(), ofs, Animation::FIND_MODE_APPROX) != -1) { //make sure insertion point is valid + ofs += 0.0001; } Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); diff --git a/editor/debugger/editor_profiler.cpp b/editor/debugger/editor_profiler.cpp index 35666a5566..0f8b2f36be 100644 --- a/editor/debugger/editor_profiler.cpp +++ b/editor/debugger/editor_profiler.cpp @@ -106,7 +106,7 @@ void EditorProfiler::clear() { seeking = false; // Ensure button text (start, stop) is correct - _set_button_text(); + _update_button_text(); emit_signal(SNAME("enable_profiling"), activate->is_pressed()); } @@ -376,7 +376,7 @@ void EditorProfiler::_update_frame() { updating_frame = false; } -void EditorProfiler::_set_button_text() { +void EditorProfiler::_update_button_text() { if (activate->is_pressed()) { activate->set_icon(get_theme_icon(SNAME("Stop"), SNAME("EditorIcons"))); activate->set_text(TTR("Stop")); @@ -387,7 +387,7 @@ void EditorProfiler::_set_button_text() { } void EditorProfiler::_activate_pressed() { - _set_button_text(); + _update_button_text(); if (activate->is_pressed()) { _clear_pressed(); @@ -510,13 +510,17 @@ void EditorProfiler::_bind_methods() { } void EditorProfiler::set_enabled(bool p_enable, bool p_clear) { - activate->set_pressed(false); activate->set_disabled(!p_enable); if (p_clear) { clear(); } } +void EditorProfiler::set_pressed(bool p_pressed) { + activate->set_pressed(p_pressed); + _update_button_text(); +} + bool EditorProfiler::is_profiling() { return activate->is_pressed(); } @@ -595,6 +599,7 @@ EditorProfiler::EditorProfiler() { add_child(hb); activate = memnew(Button); activate->set_toggle_mode(true); + activate->set_disabled(true); activate->set_text(TTR("Start")); activate->connect("pressed", callable_mp(this, &EditorProfiler::_activate_pressed)); hb->add_child(activate); diff --git a/editor/debugger/editor_profiler.h b/editor/debugger/editor_profiler.h index e9ecc285ed..d0dd67688b 100644 --- a/editor/debugger/editor_profiler.h +++ b/editor/debugger/editor_profiler.h @@ -122,7 +122,7 @@ private: Timer *frame_delay = nullptr; Timer *plot_delay = nullptr; - void _set_button_text(); + void _update_button_text(); void _update_frame(); void _activate_pressed(); @@ -155,6 +155,7 @@ protected: public: void add_frame_metric(const Metric &p_metric, bool p_final = false); void set_enabled(bool p_enable, bool p_clear = true); + void set_pressed(bool p_pressed); bool is_profiling(); bool is_seeking() { return seeking; } void disable_seeking(); diff --git a/editor/debugger/editor_visual_profiler.cpp b/editor/debugger/editor_visual_profiler.cpp index b8bc712ba6..fe425220c9 100644 --- a/editor/debugger/editor_visual_profiler.cpp +++ b/editor/debugger/editor_visual_profiler.cpp @@ -66,6 +66,7 @@ void EditorVisualProfiler::add_frame_metric(const Metric &p_metric) { } updating_frame = true; + clear_button->set_disabled(false); cursor_metric_edit->set_max(frame_metrics[last_metric].frame_number); cursor_metric_edit->set_min(MAX(frame_metrics[last_metric].frame_number - frame_metrics.size(), 0u)); @@ -408,6 +409,7 @@ void EditorVisualProfiler::_activate_pressed() { activate->set_icon(get_theme_icon(SNAME("Stop"), SNAME("EditorIcons"))); activate->set_text(TTR("Stop")); _clear_pressed(); //always clear on start + clear_button->set_disabled(false); } else { activate->set_icon(get_theme_icon(SNAME("Play"), SNAME("EditorIcons"))); activate->set_text(TTR("Start")); @@ -416,6 +418,7 @@ void EditorVisualProfiler::_activate_pressed() { } void EditorVisualProfiler::_clear_pressed() { + clear_button->set_disabled(true); clear(); _update_plot(); } @@ -647,10 +650,25 @@ void EditorVisualProfiler::_bind_methods() { ADD_SIGNAL(MethodInfo("enable_profiling", PropertyInfo(Variant::BOOL, "enable"))); } +void EditorVisualProfiler::_update_button_text() { + if (activate->is_pressed()) { + activate->set_icon(get_theme_icon(SNAME("Stop"), SNAME("EditorIcons"))); + activate->set_text(TTR("Stop")); + } else { + activate->set_icon(get_theme_icon(SNAME("Play"), SNAME("EditorIcons"))); + activate->set_text(TTR("Start")); + } +} + void EditorVisualProfiler::set_enabled(bool p_enable) { activate->set_disabled(!p_enable); } +void EditorVisualProfiler::set_pressed(bool p_pressed) { + activate->set_pressed(p_pressed); + _update_button_text(); +} + bool EditorVisualProfiler::is_profiling() { return activate->is_pressed(); } @@ -714,12 +732,14 @@ EditorVisualProfiler::EditorVisualProfiler() { add_child(hb); activate = memnew(Button); activate->set_toggle_mode(true); + activate->set_disabled(true); activate->set_text(TTR("Start")); activate->connect("pressed", callable_mp(this, &EditorVisualProfiler::_activate_pressed)); hb->add_child(activate); clear_button = memnew(Button); clear_button->set_text(TTR("Clear")); + clear_button->set_disabled(true); clear_button->connect("pressed", callable_mp(this, &EditorVisualProfiler::_clear_pressed)); hb->add_child(clear_button); diff --git a/editor/debugger/editor_visual_profiler.h b/editor/debugger/editor_visual_profiler.h index 8aa9e7b308..8180d354e8 100644 --- a/editor/debugger/editor_visual_profiler.h +++ b/editor/debugger/editor_visual_profiler.h @@ -101,6 +101,8 @@ private: Timer *frame_delay = nullptr; Timer *plot_delay = nullptr; + void _update_button_text(); + void _update_frame(bool p_focus_selected = false); void _activate_pressed(); @@ -133,6 +135,7 @@ protected: public: void add_frame_metric(const Metric &p_metric); void set_enabled(bool p_enable); + void set_pressed(bool p_pressed); bool is_profiling(); bool is_seeking() { return seeking; } void disable_seeking(); diff --git a/editor/debugger/script_editor_debugger.cpp b/editor/debugger/script_editor_debugger.cpp index 5cb7016b35..f6b00b83b0 100644 --- a/editor/debugger/script_editor_debugger.cpp +++ b/editor/debugger/script_editor_debugger.cpp @@ -319,6 +319,7 @@ void ScriptEditorDebugger::_parse_message(const String &p_msg, const Array &p_da tabs->set_current_tab(0); } profiler->set_enabled(false, false); + visual_profiler->set_enabled(false); inspector->clear_cache(); // Take a chance to force remote objects update. } else if (p_msg == "debug_exit") { @@ -328,8 +329,12 @@ void ScriptEditorDebugger::_parse_message(const String &p_msg, const Array &p_da _update_buttons_state(); _set_reason_text(TTR("Execution resumed."), MESSAGE_SUCCESS); emit_signal(SNAME("breaked"), false, false, "", false); + profiler->set_enabled(true, false); profiler->disable_seeking(); + + visual_profiler->set_enabled(true); + } else if (p_msg == "set_pid") { ERR_FAIL_COND(p_data.size() < 1); remote_pid = p_data[0]; @@ -901,6 +906,7 @@ void ScriptEditorDebugger::start(Ref<RemoteDebuggerPeer> p_peer) { stop(); profiler->set_enabled(true, true); + visual_profiler->set_enabled(true); peer = p_peer; ERR_FAIL_COND(p_peer.is_null()); @@ -957,7 +963,11 @@ void ScriptEditorDebugger::stop() { res_path_cache.clear(); profiler_signature.clear(); - profiler->set_enabled(true, false); + profiler->set_enabled(false, false); + profiler->set_pressed(false); + + visual_profiler->set_enabled(false); + visual_profiler->set_pressed(false); inspector->edit(nullptr); _update_buttons_state(); diff --git a/editor/editor_node.cpp b/editor/editor_node.cpp index 0c75a36c7d..01e605880c 100644 --- a/editor/editor_node.cpp +++ b/editor/editor_node.cpp @@ -7262,6 +7262,7 @@ EditorNode::EditorNode() { gui_base->add_child(disk_changed); add_editor_plugin(memnew(AnimationPlayerEditorPlugin)); + add_editor_plugin(memnew(AnimationTrackKeyEditEditorPlugin)); add_editor_plugin(memnew(CanvasItemEditorPlugin)); add_editor_plugin(memnew(Node3DEditorPlugin)); add_editor_plugin(memnew(ScriptEditorPlugin)); diff --git a/editor/editor_properties.cpp b/editor/editor_properties.cpp index fb3bf46c05..4c9b18efe7 100644 --- a/editor/editor_properties.cpp +++ b/editor/editor_properties.cpp @@ -1384,10 +1384,11 @@ void EditorPropertyInteger::update_property() { void EditorPropertyInteger::_bind_methods() { } -void EditorPropertyInteger::setup(int64_t p_min, int64_t p_max, int64_t p_step, bool p_allow_greater, bool p_allow_lesser, const String &p_suffix) { +void EditorPropertyInteger::setup(int64_t p_min, int64_t p_max, int64_t p_step, bool p_hide_slider, bool p_allow_greater, bool p_allow_lesser, const String &p_suffix) { spin->set_min(p_min); spin->set_max(p_max); spin->set_step(p_step); + spin->set_hide_slider(p_hide_slider); spin->set_allow_greater(p_allow_greater); spin->set_allow_lesser(p_allow_lesser); spin->set_suffix(p_suffix); @@ -2242,12 +2243,11 @@ void EditorPropertyVector2i::_notification(int p_what) { } } -void EditorPropertyVector2i::setup(int p_min, int p_max, bool p_hide_slider, bool p_link, const String &p_suffix) { +void EditorPropertyVector2i::setup(int p_min, int p_max, bool p_link, const String &p_suffix) { for (int i = 0; i < 2; i++) { spin[i]->set_min(p_min); spin[i]->set_max(p_max); spin[i]->set_step(1); - spin[i]->set_hide_slider(p_hide_slider); spin[i]->set_allow_greater(true); spin[i]->set_allow_lesser(true); spin[i]->set_suffix(p_suffix); @@ -2352,12 +2352,11 @@ void EditorPropertyRect2i::_notification(int p_what) { void EditorPropertyRect2i::_bind_methods() { } -void EditorPropertyRect2i::setup(int p_min, int p_max, bool p_hide_slider, const String &p_suffix) { +void EditorPropertyRect2i::setup(int p_min, int p_max, const String &p_suffix) { for (int i = 0; i < 4; i++) { spin[i]->set_min(p_min); spin[i]->set_max(p_max); spin[i]->set_step(1); - spin[i]->set_hide_slider(p_hide_slider); spin[i]->set_allow_greater(true); spin[i]->set_allow_lesser(true); spin[i]->set_suffix(p_suffix); @@ -2496,12 +2495,12 @@ void EditorPropertyVector3i::_notification(int p_what) { void EditorPropertyVector3i::_bind_methods() { } -void EditorPropertyVector3i::setup(int p_min, int p_max, bool p_hide_slider, bool p_link, const String &p_suffix) { +void EditorPropertyVector3i::setup(int p_min, int p_max, bool p_link, const String &p_suffix) { for (int i = 0; i < 3; i++) { spin[i]->set_min(p_min); spin[i]->set_max(p_max); spin[i]->set_step(1); - spin[i]->set_hide_slider(p_hide_slider); + spin[i]->set_hide_slider(false); spin[i]->set_allow_greater(true); spin[i]->set_allow_lesser(true); spin[i]->set_suffix(p_suffix); @@ -3004,11 +3003,11 @@ void EditorPropertyVector4i::_notification(int p_what) { void EditorPropertyVector4i::_bind_methods() { } -void EditorPropertyVector4i::setup(double p_min, double p_max, bool p_hide_slider, const String &p_suffix) { +void EditorPropertyVector4i::setup(double p_min, double p_max, const String &p_suffix) { for (int i = 0; i < 4; i++) { spin[i]->set_min(p_min); spin[i]->set_max(p_max); - spin[i]->set_hide_slider(p_hide_slider); + spin[i]->set_step(1); spin[i]->set_allow_greater(true); spin[i]->set_allow_lesser(true); spin[i]->set_suffix(p_suffix); @@ -4347,7 +4346,7 @@ EditorProperty *EditorInspectorDefaultPlugin::get_editor_for_property(Object *p_ EditorPropertyInteger *editor = memnew(EditorPropertyInteger); EditorPropertyRangeHint hint = _parse_range_hint(p_hint, p_hint_text, 1); - editor->setup(hint.min, hint.max, hint.step, hint.or_greater, hint.or_less, hint.suffix); + editor->setup(hint.min, hint.max, hint.step, hint.hide_slider, hint.or_greater, hint.or_less, hint.suffix); return editor; } @@ -4475,7 +4474,7 @@ EditorProperty *EditorInspectorDefaultPlugin::get_editor_for_property(Object *p_ case Variant::VECTOR2I: { EditorPropertyVector2i *editor = memnew(EditorPropertyVector2i(p_wide)); EditorPropertyRangeHint hint = _parse_range_hint(p_hint, p_hint_text, 1); - editor->setup(hint.min, hint.max, hint.hide_slider, p_hint == PROPERTY_HINT_LINK, hint.suffix); + editor->setup(hint.min, hint.max, p_hint == PROPERTY_HINT_LINK, hint.suffix); return editor; } break; @@ -4488,7 +4487,7 @@ EditorProperty *EditorInspectorDefaultPlugin::get_editor_for_property(Object *p_ case Variant::RECT2I: { EditorPropertyRect2i *editor = memnew(EditorPropertyRect2i(p_wide)); EditorPropertyRangeHint hint = _parse_range_hint(p_hint, p_hint_text, 1); - editor->setup(hint.min, hint.max, hint.hide_slider, hint.suffix); + editor->setup(hint.min, hint.max, hint.suffix); return editor; } break; @@ -4502,7 +4501,7 @@ EditorProperty *EditorInspectorDefaultPlugin::get_editor_for_property(Object *p_ case Variant::VECTOR3I: { EditorPropertyVector3i *editor = memnew(EditorPropertyVector3i(p_wide)); EditorPropertyRangeHint hint = _parse_range_hint(p_hint, p_hint_text, 1); - editor->setup(hint.min, hint.max, hint.hide_slider, p_hint == PROPERTY_HINT_LINK, hint.suffix); + editor->setup(hint.min, hint.max, p_hint == PROPERTY_HINT_LINK, hint.suffix); return editor; } break; @@ -4516,7 +4515,7 @@ EditorProperty *EditorInspectorDefaultPlugin::get_editor_for_property(Object *p_ case Variant::VECTOR4I: { EditorPropertyVector4i *editor = memnew(EditorPropertyVector4i); EditorPropertyRangeHint hint = _parse_range_hint(p_hint, p_hint_text, 1); - editor->setup(hint.min, hint.max, hint.hide_slider, hint.suffix); + editor->setup(hint.min, hint.max, hint.suffix); return editor; } break; diff --git a/editor/editor_properties.h b/editor/editor_properties.h index f38e33d9e3..042b94130b 100644 --- a/editor/editor_properties.h +++ b/editor/editor_properties.h @@ -378,7 +378,7 @@ protected: public: virtual void update_property() override; - void setup(int64_t p_min, int64_t p_max, int64_t p_step, bool p_allow_greater, bool p_allow_lesser, const String &p_suffix = String()); + void setup(int64_t p_min, int64_t p_max, int64_t p_step, bool p_hide_slider, bool p_allow_greater, bool p_allow_lesser, const String &p_suffix = String()); EditorPropertyInteger(); }; @@ -566,7 +566,7 @@ protected: public: virtual void update_property() override; - void setup(int p_min, int p_max, bool p_hide_slider, bool p_link = false, const String &p_suffix = String()); + void setup(int p_min, int p_max, bool p_link = false, const String &p_suffix = String()); EditorPropertyVector2i(bool p_force_wide = false); }; @@ -583,7 +583,7 @@ protected: public: virtual void update_property() override; - void setup(int p_min, int p_max, bool p_hide_slider, const String &p_suffix = String()); + void setup(int p_min, int p_max, const String &p_suffix = String()); EditorPropertyRect2i(bool p_force_wide = false); }; @@ -608,7 +608,7 @@ protected: public: virtual void update_property() override; - void setup(int p_min, int p_max, bool p_hide_slider, bool p_link = false, const String &p_suffix = String()); + void setup(int p_min, int p_max, bool p_link = false, const String &p_suffix = String()); EditorPropertyVector3i(bool p_force_wide = false); }; @@ -693,7 +693,7 @@ protected: public: virtual void update_property() override; - void setup(double p_min, double p_max, bool p_hide_slider, const String &p_suffix = String()); + void setup(double p_min, double p_max, const String &p_suffix = String()); EditorPropertyVector4i(); }; diff --git a/editor/editor_properties_array_dict.cpp b/editor/editor_properties_array_dict.cpp index edda6c5d7b..451cb7cfee 100644 --- a/editor/editor_properties_array_dict.cpp +++ b/editor/editor_properties_array_dict.cpp @@ -919,7 +919,7 @@ void EditorPropertyDictionary::update_property() { } break; case Variant::INT: { EditorPropertyInteger *editor = memnew(EditorPropertyInteger); - editor->setup(-100000, 100000, 1, true, true); + editor->setup(-100000, 100000, 1, false, true, true); prop = editor; } break; @@ -942,7 +942,7 @@ void EditorPropertyDictionary::update_property() { } break; case Variant::VECTOR2I: { EditorPropertyVector2i *editor = memnew(EditorPropertyVector2i); - editor->setup(-100000, 100000, true); + editor->setup(-100000, 100000); prop = editor; } break; @@ -954,7 +954,7 @@ void EditorPropertyDictionary::update_property() { } break; case Variant::RECT2I: { EditorPropertyRect2i *editor = memnew(EditorPropertyRect2i); - editor->setup(-100000, 100000, true); + editor->setup(-100000, 100000); prop = editor; } break; @@ -966,7 +966,7 @@ void EditorPropertyDictionary::update_property() { } break; case Variant::VECTOR3I: { EditorPropertyVector3i *editor = memnew(EditorPropertyVector3i); - editor->setup(-100000, 100000, true); + editor->setup(-100000, 100000); prop = editor; } break; @@ -978,7 +978,7 @@ void EditorPropertyDictionary::update_property() { } break; case Variant::VECTOR4I: { EditorPropertyVector4i *editor = memnew(EditorPropertyVector4i); - editor->setup(-100000, 100000, true); + editor->setup(-100000, 100000); prop = editor; } break; diff --git a/editor/editor_spin_slider.cpp b/editor/editor_spin_slider.cpp index 9128143619..60c46835fe 100644 --- a/editor/editor_spin_slider.cpp +++ b/editor/editor_spin_slider.cpp @@ -76,6 +76,7 @@ void EditorSpinSlider::gui_input(const Ref<InputEvent> &p_event) { pre_grab_value = get_value(); grabbing_spinner = false; grabbing_spinner_mouse_pos = get_global_mouse_position(); + emit_signal("grabbed"); } } else { if (grabbing_spinner_attempt) { @@ -83,6 +84,7 @@ void EditorSpinSlider::gui_input(const Ref<InputEvent> &p_event) { Input::get_singleton()->set_mouse_mode(Input::MOUSE_MODE_VISIBLE); Input::get_singleton()->warp_mouse(grabbing_spinner_mouse_pos); queue_redraw(); + emit_signal("ungrabbed"); } else { _focus_entered(); } @@ -178,9 +180,11 @@ void EditorSpinSlider::_grabber_gui_input(const Ref<InputEvent> &p_event) { grabbing_ratio = get_as_ratio(); grabbing_from = grabber->get_transform().xform(mb->get_position()).x; } + emit_signal("grabbed"); } else { grabbing_grabber = false; mousewheel_over_grabber = false; + emit_signal("ungrabbed"); } } @@ -299,10 +303,6 @@ void EditorSpinSlider::_draw_spin_slider() { Ref<Texture2D> updown = get_theme_icon(is_read_only() ? SNAME("updown_disabled") : SNAME("updown"), SNAME("SpinBox")); - if (get_step() == 1) { - number_width -= updown->get_width(); - } - String numstr = get_text_value(); int vofs = (size.height - font->get_height(font_size)) / 2 + font->get_ascent(font_size); @@ -359,76 +359,79 @@ void EditorSpinSlider::_draw_spin_slider() { } TS->free_rid(num_rid); - if (get_step() == 1) { - Ref<Texture2D> updown2 = get_theme_icon(is_read_only() ? SNAME("updown_disabled") : SNAME("updown"), SNAME("SpinBox")); - int updown_vofs = (size.height - updown2->get_height()) / 2; - if (rtl) { - updown_offset = sb->get_margin(SIDE_LEFT); - } else { - updown_offset = size.width - sb->get_margin(SIDE_RIGHT) - updown2->get_width(); - } - Color c(1, 1, 1); - if (hover_updown) { - c *= Color(1.2, 1.2, 1.2); - } - draw_texture(updown2, Vector2(updown_offset, updown_vofs), c); - if (grabber->is_visible()) { - grabber->hide(); - } - } else if (!hide_slider) { - const int grabber_w = 4 * EDSCALE; - const int width = size.width - sb->get_minimum_size().width - grabber_w; - const int ofs = sb->get_offset().x; - const int svofs = (size.height + vofs) / 2 - 1; - Color c = fc; - - // Draw the horizontal slider's background. - c.a = 0.2; - draw_rect(Rect2(ofs, svofs + 1, width, 2 * EDSCALE), c); - - // Draw the horizontal slider's filled part on the left. - const int gofs = get_as_ratio() * width; - c.a = 0.45; - draw_rect(Rect2(ofs, svofs + 1, gofs, 2 * EDSCALE), c); - - // Draw the horizontal slider's grabber. - c.a = 0.9; - const Rect2 grabber_rect = Rect2(ofs + gofs, svofs, grabber_w, 4 * EDSCALE); - draw_rect(grabber_rect, c); - - grabbing_spinner_mouse_pos = get_global_position() + grabber_rect.get_center(); - - bool display_grabber = (grabbing_grabber || mouse_over_spin || mouse_over_grabber) && !grabbing_spinner && !(value_input_popup && value_input_popup->is_visible()); - if (grabber->is_visible() != display_grabber) { - if (display_grabber) { - grabber->show(); + if (!hide_slider) { + if (get_step() == 1) { + number_width -= updown->get_width(); + Ref<Texture2D> updown2 = get_theme_icon(is_read_only() ? SNAME("updown_disabled") : SNAME("updown"), SNAME("SpinBox")); + int updown_vofs = (size.height - updown2->get_height()) / 2; + if (rtl) { + updown_offset = sb->get_margin(SIDE_LEFT); } else { + updown_offset = size.width - sb->get_margin(SIDE_RIGHT) - updown2->get_width(); + } + Color c(1, 1, 1); + if (hover_updown) { + c *= Color(1.2, 1.2, 1.2); + } + draw_texture(updown2, Vector2(updown_offset, updown_vofs), c); + if (grabber->is_visible()) { grabber->hide(); } - } - - if (display_grabber) { - Ref<Texture2D> grabber_tex; - if (mouse_over_grabber) { - grabber_tex = get_theme_icon(SNAME("grabber_highlight"), SNAME("HSlider")); - } else { - grabber_tex = get_theme_icon(SNAME("grabber"), SNAME("HSlider")); + } else { + const int grabber_w = 4 * EDSCALE; + const int width = size.width - sb->get_minimum_size().width - grabber_w; + const int ofs = sb->get_offset().x; + const int svofs = (size.height + vofs) / 2 - 1; + Color c = fc; + + // Draw the horizontal slider's background. + c.a = 0.2; + draw_rect(Rect2(ofs, svofs + 1, width, 2 * EDSCALE), c); + + // Draw the horizontal slider's filled part on the left. + const int gofs = get_as_ratio() * width; + c.a = 0.45; + draw_rect(Rect2(ofs, svofs + 1, gofs, 2 * EDSCALE), c); + + // Draw the horizontal slider's grabber. + c.a = 0.9; + const Rect2 grabber_rect = Rect2(ofs + gofs, svofs, grabber_w, 4 * EDSCALE); + draw_rect(grabber_rect, c); + + grabbing_spinner_mouse_pos = get_global_position() + grabber_rect.get_center(); + + bool display_grabber = (grabbing_grabber || mouse_over_spin || mouse_over_grabber) && !grabbing_spinner && !(value_input_popup && value_input_popup->is_visible()); + if (grabber->is_visible() != display_grabber) { + if (display_grabber) { + grabber->show(); + } else { + grabber->hide(); + } } - if (grabber->get_texture() != grabber_tex) { - grabber->set_texture(grabber_tex); - } + if (display_grabber) { + Ref<Texture2D> grabber_tex; + if (mouse_over_grabber) { + grabber_tex = get_theme_icon(SNAME("grabber_highlight"), SNAME("HSlider")); + } else { + grabber_tex = get_theme_icon(SNAME("grabber"), SNAME("HSlider")); + } - Vector2 scale = get_global_transform_with_canvas().get_scale(); - grabber->set_scale(scale); - grabber->reset_size(); - grabber->set_position(get_global_position() + (grabber_rect.get_center() - grabber->get_size() * 0.5) * scale); + if (grabber->get_texture() != grabber_tex) { + grabber->set_texture(grabber_tex); + } - if (mousewheel_over_grabber) { - Input::get_singleton()->warp_mouse(grabber->get_position() + grabber_rect.size); - } + Vector2 scale = get_global_transform_with_canvas().get_scale(); + grabber->set_scale(scale); + grabber->reset_size(); + grabber->set_position(get_global_position() + (grabber_rect.get_center() - grabber->get_size() * 0.5) * scale); + + if (mousewheel_over_grabber) { + Input::get_singleton()->warp_mouse(grabber->get_position() + grabber_rect.size); + } - grabber_range = width; + grabber_range = width; + } } } } @@ -584,6 +587,8 @@ void EditorSpinSlider::_value_focus_exited() { //enter, click, esc grab_focus(); } + + emit_signal("value_focus_exited"); } void EditorSpinSlider::_grabber_mouse_entered() { @@ -627,6 +632,7 @@ void EditorSpinSlider::_focus_entered() { value_input->call_deferred(SNAME("select_all")); value_input->set_focus_next(find_next_valid_focus()->get_path()); value_input->set_focus_previous(find_prev_valid_focus()->get_path()); + emit_signal("value_focus_entered"); } void EditorSpinSlider::_bind_methods() { @@ -650,6 +656,11 @@ void EditorSpinSlider::_bind_methods() { ADD_PROPERTY(PropertyInfo(Variant::BOOL, "read_only"), "set_read_only", "is_read_only"); ADD_PROPERTY(PropertyInfo(Variant::BOOL, "flat"), "set_flat", "is_flat"); ADD_PROPERTY(PropertyInfo(Variant::BOOL, "hide_slider"), "set_hide_slider", "is_hiding_slider"); + + ADD_SIGNAL(MethodInfo("grabbed")); + ADD_SIGNAL(MethodInfo("ungrabbed")); + ADD_SIGNAL(MethodInfo("value_focus_entered")); + ADD_SIGNAL(MethodInfo("value_focus_exited")); } void EditorSpinSlider::_ensure_input_popup() { diff --git a/editor/plugins/animation_player_editor_plugin.cpp b/editor/plugins/animation_player_editor_plugin.cpp index de16400ec9..344a800241 100644 --- a/editor/plugins/animation_player_editor_plugin.cpp +++ b/editor/plugins/animation_player_editor_plugin.cpp @@ -87,13 +87,13 @@ void AnimationPlayerEditor::_notification(int p_what) { } frame->set_value(player->get_current_animation_position()); track_editor->set_anim_pos(player->get_current_animation_position()); - } else if (!player->is_valid()) { // Reset timeline when the player has been stopped externally frame->set_value(0); } else if (last_active) { // Need the last frame after it stopped. frame->set_value(player->get_current_animation_position()); + track_editor->set_anim_pos(player->get_current_animation_position()); } last_active = player->is_playing(); @@ -423,7 +423,7 @@ void AnimationPlayerEditor::_select_anim_by_name(const String &p_anim) { _animation_selected(idx); } -double AnimationPlayerEditor::_get_editor_step() const { +float AnimationPlayerEditor::_get_editor_step() const { // Returns the effective snapping value depending on snapping modifiers, or 0 if snapping is disabled. if (track_editor->is_snap_enabled()) { const String current = player->get_assigned_animation(); @@ -434,7 +434,7 @@ double AnimationPlayerEditor::_get_editor_step() const { return Input::get_singleton()->is_key_pressed(Key::SHIFT) ? anim->get_step() * 0.25 : anim->get_step(); } - return 0.0; + return 0.0f; } void AnimationPlayerEditor::_animation_name_edited() { @@ -1973,3 +1973,26 @@ AnimationPlayerEditorPlugin::AnimationPlayerEditorPlugin() { AnimationPlayerEditorPlugin::~AnimationPlayerEditorPlugin() { } + +// AnimationTrackKeyEditEditorPlugin + +bool EditorInspectorPluginAnimationTrackKeyEdit::can_handle(Object *p_object) { + return Object::cast_to<AnimationTrackKeyEdit>(p_object) != nullptr; +} + +void EditorInspectorPluginAnimationTrackKeyEdit::parse_begin(Object *p_object) { + AnimationTrackKeyEdit *atk = Object::cast_to<AnimationTrackKeyEdit>(p_object); + ERR_FAIL_COND(!atk); + + atk_editor = memnew(AnimationTrackKeyEditEditor(atk->animation, atk->track, atk->key_ofs, atk->use_fps)); + add_custom_control(atk_editor); +} + +AnimationTrackKeyEditEditorPlugin::AnimationTrackKeyEditEditorPlugin() { + atk_plugin = memnew(EditorInspectorPluginAnimationTrackKeyEdit); + EditorInspector::add_inspector_plugin(atk_plugin); +} + +bool AnimationTrackKeyEditEditorPlugin::handles(Object *p_object) const { + return p_object->is_class("AnimationTrackKeyEdit"); +} diff --git a/editor/plugins/animation_player_editor_plugin.h b/editor/plugins/animation_player_editor_plugin.h index 53d460fc9e..8539d450e6 100644 --- a/editor/plugins/animation_player_editor_plugin.h +++ b/editor/plugins/animation_player_editor_plugin.h @@ -162,7 +162,7 @@ class AnimationPlayerEditor : public VBoxContainer { } onion; void _select_anim_by_name(const String &p_anim); - double _get_editor_step() const; + float _get_editor_step() const; void _play_pressed(); void _play_from_pressed(); void _play_bw_pressed(); @@ -272,4 +272,30 @@ public: ~AnimationPlayerEditorPlugin(); }; +// AnimationTrackKeyEditEditorPlugin + +class EditorInspectorPluginAnimationTrackKeyEdit : public EditorInspectorPlugin { + GDCLASS(EditorInspectorPluginAnimationTrackKeyEdit, EditorInspectorPlugin); + + AnimationTrackKeyEditEditor *atk_editor = nullptr; + +public: + virtual bool can_handle(Object *p_object) override; + virtual void parse_begin(Object *p_object) override; +}; + +class AnimationTrackKeyEditEditorPlugin : public EditorPlugin { + GDCLASS(AnimationTrackKeyEditEditorPlugin, EditorPlugin); + + EditorInspectorPluginAnimationTrackKeyEdit *atk_plugin = nullptr; + +public: + bool has_main_screen() const override { return false; } + virtual bool handles(Object *p_object) const override; + + virtual String get_name() const override { return "AnimationTrackKeyEdit"; } + + AnimationTrackKeyEditEditorPlugin(); +}; + #endif // ANIMATION_PLAYER_EDITOR_PLUGIN_H diff --git a/editor/plugins/animation_state_machine_editor.cpp b/editor/plugins/animation_state_machine_editor.cpp index 66a0c746d9..7b8a5d06f8 100644 --- a/editor/plugins/animation_state_machine_editor.cpp +++ b/editor/plugins/animation_state_machine_editor.cpp @@ -1097,7 +1097,8 @@ void AnimationNodeStateMachineEditor::_add_transition(const bool p_nested_action Ref<AnimationNodeStateMachineTransition> tr; tr.instantiate(); - tr->set_switch_mode(AnimationNodeStateMachineTransition::SwitchMode(transition_mode->get_selected())); + tr->set_advance_mode(auto_advance->is_pressed() ? AnimationNodeStateMachineTransition::AdvanceMode::ADVANCE_MODE_AUTO : AnimationNodeStateMachineTransition::AdvanceMode::ADVANCE_MODE_ENABLED); + tr->set_switch_mode(AnimationNodeStateMachineTransition::SwitchMode(switch_mode->get_selected())); Ref<EditorUndoRedoManager> &undo_redo = EditorNode::get_undo_redo(); if (!p_nested_action) { @@ -1326,7 +1327,7 @@ void AnimationNodeStateMachineEditor::_state_machine_draw() { } } - _connection_draw(from, to, AnimationNodeStateMachineTransition::SwitchMode(transition_mode->get_selected()), true, false, false, false, false); + _connection_draw(from, to, AnimationNodeStateMachineTransition::SwitchMode(switch_mode->get_selected()), true, false, false, false, false); } Ref<Texture2D> tr_reference_icon = get_theme_icon(SNAME("TransitionImmediateBig"), SNAME("EditorIcons")); @@ -1349,8 +1350,8 @@ void AnimationNodeStateMachineEditor::_state_machine_draw() { tl.to = (state_machine->get_node_position(local_to) * EDSCALE) + ofs_to - state_machine->get_graph_offset() * EDSCALE; Ref<AnimationNodeStateMachineTransition> tr = state_machine->get_transition(i); - tl.disabled = tr->is_disabled(); - tl.auto_advance = tr->has_auto_advance(); + tl.disabled = bool(tr->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_DISABLED); + tl.auto_advance = bool(tr->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_AUTO); tl.advance_condition_name = tr->get_advance_condition_name(); tl.advance_condition_state = false; tl.mode = tr->get_switch_mode(); @@ -1590,10 +1591,12 @@ void AnimationNodeStateMachineEditor::_notification(int p_what) { tool_create->set_icon(get_theme_icon(SNAME("ToolAddNode"), SNAME("EditorIcons"))); tool_connect->set_icon(get_theme_icon(SNAME("ToolConnect"), SNAME("EditorIcons"))); - transition_mode->clear(); - transition_mode->add_icon_item(get_theme_icon(SNAME("TransitionImmediate"), SNAME("EditorIcons")), TTR("Immediate")); - transition_mode->add_icon_item(get_theme_icon(SNAME("TransitionSync"), SNAME("EditorIcons")), TTR("Sync")); - transition_mode->add_icon_item(get_theme_icon(SNAME("TransitionEnd"), SNAME("EditorIcons")), TTR("At End")); + switch_mode->clear(); + switch_mode->add_icon_item(get_theme_icon(SNAME("TransitionImmediate"), SNAME("EditorIcons")), TTR("Immediate")); + switch_mode->add_icon_item(get_theme_icon(SNAME("TransitionSync"), SNAME("EditorIcons")), TTR("Sync")); + switch_mode->add_icon_item(get_theme_icon(SNAME("TransitionEnd"), SNAME("EditorIcons")), TTR("At End")); + + auto_advance->set_icon(get_theme_icon(SNAME("AutoPlay"), SNAME("EditorIcons"))); tool_erase->set_icon(get_theme_icon(SNAME("Remove"), SNAME("EditorIcons"))); tool_group->set_icon(get_theme_icon(SNAME("Group"), SNAME("EditorIcons"))); @@ -1652,12 +1655,12 @@ void AnimationNodeStateMachineEditor::_notification(int p_what) { break; } - if (transition_lines[i].disabled != state_machine->get_transition(tidx)->is_disabled()) { + if (transition_lines[i].disabled != bool(state_machine->get_transition(tidx)->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_DISABLED)) { state_machine_draw->queue_redraw(); break; } - if (transition_lines[i].auto_advance != state_machine->get_transition(tidx)->has_auto_advance()) { + if (transition_lines[i].auto_advance != bool(state_machine->get_transition(tidx)->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_AUTO)) { state_machine_draw->queue_redraw(); break; } @@ -1904,7 +1907,7 @@ void AnimationNodeStateMachineEditor::_erase_selected(const bool p_nested_action void AnimationNodeStateMachineEditor::_update_mode() { if (tool_select->is_pressed()) { - tool_erase_hb->show(); + selection_tools_hb->show(); bool nothing_selected = selected_nodes.is_empty() && selected_transition_from == StringName() && selected_transition_to == StringName(); bool start_end_selected = selected_nodes.size() == 1 && (*selected_nodes.begin() == state_machine->start_node || *selected_nodes.begin() == state_machine->end_node); tool_erase->set_disabled(nothing_selected || start_end_selected || read_only); @@ -1927,7 +1930,13 @@ void AnimationNodeStateMachineEditor::_update_mode() { } } } else { - tool_erase_hb->hide(); + selection_tools_hb->hide(); + } + + if (tool_connect->is_pressed()) { + transition_tools_hb->show(); + } else { + transition_tools_hb->hide(); } } @@ -1978,35 +1987,48 @@ AnimationNodeStateMachineEditor::AnimationNodeStateMachineEditor() { tool_connect->set_tooltip_text(TTR("Connect nodes.")); tool_connect->connect("pressed", callable_mp(this, &AnimationNodeStateMachineEditor::_update_mode), CONNECT_DEFERRED); - tool_erase_hb = memnew(HBoxContainer); - top_hb->add_child(tool_erase_hb); - tool_erase_hb->add_child(memnew(VSeparator)); + // Context-sensitive selection tools: + selection_tools_hb = memnew(HBoxContainer); + top_hb->add_child(selection_tools_hb); + selection_tools_hb->add_child(memnew(VSeparator)); tool_group = memnew(Button); tool_group->set_flat(true); tool_group->set_tooltip_text(TTR("Group Selected Node(s)") + " (Ctrl+G)"); tool_group->connect("pressed", callable_mp(this, &AnimationNodeStateMachineEditor::_group_selected_nodes)); tool_group->set_disabled(true); - tool_erase_hb->add_child(tool_group); + selection_tools_hb->add_child(tool_group); tool_ungroup = memnew(Button); tool_ungroup->set_flat(true); tool_ungroup->set_tooltip_text(TTR("Ungroup Selected Node") + " (Ctrl+Shift+G)"); tool_ungroup->connect("pressed", callable_mp(this, &AnimationNodeStateMachineEditor::_ungroup_selected_nodes)); tool_ungroup->set_visible(false); - tool_erase_hb->add_child(tool_ungroup); + selection_tools_hb->add_child(tool_ungroup); tool_erase = memnew(Button); tool_erase->set_flat(true); tool_erase->set_tooltip_text(TTR("Remove selected node or transition.")); tool_erase->connect("pressed", callable_mp(this, &AnimationNodeStateMachineEditor::_erase_selected).bind(false)); tool_erase->set_disabled(true); - tool_erase_hb->add_child(tool_erase); + selection_tools_hb->add_child(tool_erase); + + transition_tools_hb = memnew(HBoxContainer); + top_hb->add_child(transition_tools_hb); + transition_tools_hb->add_child(memnew(VSeparator)); + + transition_tools_hb->add_child(memnew(Label(TTR("Transition:")))); + switch_mode = memnew(OptionButton); + transition_tools_hb->add_child(switch_mode); + + auto_advance = memnew(Button); + auto_advance->set_flat(true); + auto_advance->set_tooltip_text(TTR("New Transitions Should Auto Advance")); + auto_advance->set_toggle_mode(true); + auto_advance->set_pressed(true); + transition_tools_hb->add_child(auto_advance); - top_hb->add_child(memnew(VSeparator)); - top_hb->add_child(memnew(Label(TTR("Transition:")))); - transition_mode = memnew(OptionButton); - top_hb->add_child(transition_mode); + // top_hb->add_spacer(); diff --git a/editor/plugins/animation_state_machine_editor.h b/editor/plugins/animation_state_machine_editor.h index 5edf803c41..28b5f0cbcc 100644 --- a/editor/plugins/animation_state_machine_editor.h +++ b/editor/plugins/animation_state_machine_editor.h @@ -52,15 +52,18 @@ class AnimationNodeStateMachineEditor : public AnimationTreeNodeEditorPlugin { Button *tool_select = nullptr; Button *tool_create = nullptr; Button *tool_connect = nullptr; - Button *tool_group = nullptr; - Button *tool_ungroup = nullptr; Popup *name_edit_popup = nullptr; LineEdit *name_edit = nullptr; - HBoxContainer *tool_erase_hb = nullptr; + HBoxContainer *selection_tools_hb = nullptr; + Button *tool_group = nullptr; + Button *tool_ungroup = nullptr; Button *tool_erase = nullptr; - OptionButton *transition_mode = nullptr; + HBoxContainer *transition_tools_hb = nullptr; + OptionButton *switch_mode = nullptr; + Button *auto_advance = nullptr; + OptionButton *play_mode = nullptr; PanelContainer *panel = nullptr; diff --git a/editor/plugins/font_config_plugin.cpp b/editor/plugins/font_config_plugin.cpp index 4370d013be..6b3db095d4 100644 --- a/editor/plugins/font_config_plugin.cpp +++ b/editor/plugins/font_config_plugin.cpp @@ -483,7 +483,7 @@ void EditorPropertyOTVariation::update_property() { Vector3i range = supported.get_value_at_index(i); EditorPropertyInteger *prop = memnew(EditorPropertyInteger); - prop->setup(range.x, range.y, 1, false, false); + prop->setup(range.x, range.y, false, 1, false, false); prop->set_object_and_property(object.ptr(), "keys/" + itos(name_tag)); String name = TS->tag_to_name(name_tag); @@ -762,7 +762,7 @@ void EditorPropertyOTFeatures::update_property() { } break; case Variant::INT: { EditorPropertyInteger *editor = memnew(EditorPropertyInteger); - editor->setup(0, 255, 1, false, false); + editor->setup(0, 255, 1, false, false, false); prop = editor; } break; default: { diff --git a/editor/plugins/tiles/tile_proxies_manager_dialog.cpp b/editor/plugins/tiles/tile_proxies_manager_dialog.cpp index b31fb1aa58..2868c14452 100644 --- a/editor/plugins/tiles/tile_proxies_manager_dialog.cpp +++ b/editor/plugins/tiles/tile_proxies_manager_dialog.cpp @@ -398,7 +398,7 @@ TileProxiesManagerDialog::TileProxiesManagerDialog() { source_from_property_editor->connect("property_changed", callable_mp(this, &TileProxiesManagerDialog::_property_changed)); source_from_property_editor->set_selectable(false); source_from_property_editor->set_h_size_flags(Control::SIZE_EXPAND_FILL); - source_from_property_editor->setup(-1, 99999, 1, true, false); + source_from_property_editor->setup(-1, 99999, 1, false, true, false); vboxcontainer_from->add_child(source_from_property_editor); coords_from_property_editor = memnew(EditorPropertyVector2i); @@ -417,7 +417,7 @@ TileProxiesManagerDialog::TileProxiesManagerDialog() { alternative_from_property_editor->connect("property_changed", callable_mp(this, &TileProxiesManagerDialog::_property_changed)); alternative_from_property_editor->set_selectable(false); alternative_from_property_editor->set_h_size_flags(Control::SIZE_EXPAND_FILL); - alternative_from_property_editor->setup(-1, 99999, 1, true, false); + alternative_from_property_editor->setup(-1, 99999, 1, false, true, false); alternative_from_property_editor->hide(); vboxcontainer_from->add_child(alternative_from_property_editor); @@ -432,7 +432,7 @@ TileProxiesManagerDialog::TileProxiesManagerDialog() { source_to_property_editor->connect("property_changed", callable_mp(this, &TileProxiesManagerDialog::_property_changed)); source_to_property_editor->set_selectable(false); source_to_property_editor->set_h_size_flags(Control::SIZE_EXPAND_FILL); - source_to_property_editor->setup(-1, 99999, 1, true, false); + source_to_property_editor->setup(-1, 99999, 1, false, true, false); vboxcontainer_to->add_child(source_to_property_editor); coords_to_property_editor = memnew(EditorPropertyVector2i); @@ -451,7 +451,7 @@ TileProxiesManagerDialog::TileProxiesManagerDialog() { alternative_to_property_editor->connect("property_changed", callable_mp(this, &TileProxiesManagerDialog::_property_changed)); alternative_to_property_editor->set_selectable(false); alternative_to_property_editor->set_h_size_flags(Control::SIZE_EXPAND_FILL); - alternative_to_property_editor->setup(-1, 99999, 1, true, false); + alternative_to_property_editor->setup(-1, 99999, 1, false, true, false); alternative_to_property_editor->hide(); vboxcontainer_to->add_child(alternative_to_property_editor); diff --git a/editor/plugins/version_control_editor_plugin.cpp b/editor/plugins/version_control_editor_plugin.cpp index f54bebfd8e..e9c6184d73 100644 --- a/editor/plugins/version_control_editor_plugin.cpp +++ b/editor/plugins/version_control_editor_plugin.cpp @@ -594,7 +594,6 @@ void VersionControlEditorPlugin::_display_diff(int p_idx) { diff->pop(); diff->pop(); - diff->add_newline(); diff->push_font(EditorNode::get_singleton()->get_gui_base()->get_theme_font(SNAME("status_source"), SNAME("EditorFonts"))); for (int j = 0; j < diff_file.diff_hunks.size(); j++) { EditorVCSInterface::DiffHunk hunk = diff_file.diff_hunks[j]; @@ -604,6 +603,7 @@ void VersionControlEditorPlugin::_display_diff(int p_idx) { String old_lines = String::num_int64(hunk.old_lines); String new_lines = String::num_int64(hunk.new_lines); + diff->add_newline(); diff->append_text("[center]@@ " + old_start + "," + old_lines + " " + new_start + "," + new_lines + " @@[/center]"); diff->add_newline(); @@ -616,7 +616,6 @@ void VersionControlEditorPlugin::_display_diff(int p_idx) { break; } diff->add_newline(); - diff->add_newline(); } diff->pop(); @@ -684,12 +683,10 @@ void VersionControlEditorPlugin::_display_diff_split_view(List<EditorVCSInterfac if (diff_line.old_line_no >= 0) { diff->push_cell(); - diff->push_indent(1); diff->push_color(has_change ? red : white); diff->add_text(String::num_int64(diff_line.old_line_no)); diff->pop(); diff->pop(); - diff->pop(); diff->push_cell(); diff->push_color(has_change ? red : white); @@ -716,12 +713,10 @@ void VersionControlEditorPlugin::_display_diff_split_view(List<EditorVCSInterfac if (diff_line.new_line_no >= 0) { diff->push_cell(); - diff->push_indent(1); diff->push_color(has_change ? green : white); diff->add_text(String::num_int64(diff_line.new_line_no)); diff->pop(); diff->pop(); - diff->pop(); diff->push_cell(); diff->push_color(has_change ? green : white); diff --git a/editor/project_manager.cpp b/editor/project_manager.cpp index 1b169076c6..0d93c8a95e 100644 --- a/editor/project_manager.cpp +++ b/editor/project_manager.cpp @@ -447,7 +447,7 @@ private: } else if (renderer_type == "gl_compatibility") { renderer_info->set_text( String::utf8("• ") + TTR("Supports desktop, mobile + web platforms.") + - String::utf8("\n• ") + TTR("Least advanced 3D graphics.") + + String::utf8("\n• ") + TTR("Least advanced 3D graphics (currently work-in-progress).") + String::utf8("\n• ") + TTR("Intended for low-end/older devices.") + String::utf8("\n• ") + TTR("Uses OpenGL 3 backend (OpenGL 3.3/ES 3.0/WebGL2).") + String::utf8("\n• ") + TTR("Fastest rendering of simple scenes.")); diff --git a/modules/regex/config.py b/modules/regex/config.py index 1248a8374d..df9f44cb95 100644 --- a/modules/regex/config.py +++ b/modules/regex/config.py @@ -1,5 +1,5 @@ def can_build(env, platform): - return not env["arch"].startswith("rv") + return True def configure(env): diff --git a/scene/animation/animation_blend_tree.cpp b/scene/animation/animation_blend_tree.cpp index d10b271b79..26261d6da5 100644 --- a/scene/animation/animation_blend_tree.cpp +++ b/scene/animation/animation_blend_tree.cpp @@ -144,6 +144,19 @@ double AnimationNodeAnimation::process(double p_time, bool p_seek, bool p_is_ext } } } + + // Emit start & finish signal. Internally, the detections are the same for backward. + // We should use call_deferred since the track keys are still being prosessed. + if (state->tree) { + // AnimationTree uses seek to 0 "internally" to process the first key of the animation, which is used as the start detection. + if (p_seek && !p_is_external_seeking && cur_time == 0) { + state->tree->call_deferred(SNAME("emit_signal"), "animation_started", animation); + } + // Finished. + if (prev_time < anim_size && cur_time >= anim_size) { + state->tree->call_deferred(SNAME("emit_signal"), "animation_finished", animation); + } + } } if (play_mode == PLAY_MODE_FORWARD) { diff --git a/scene/animation/animation_node_state_machine.cpp b/scene/animation/animation_node_state_machine.cpp index aff2b11267..d54740e9b0 100644 --- a/scene/animation/animation_node_state_machine.cpp +++ b/scene/animation/animation_node_state_machine.cpp @@ -41,12 +41,12 @@ AnimationNodeStateMachineTransition::SwitchMode AnimationNodeStateMachineTransit return switch_mode; } -void AnimationNodeStateMachineTransition::set_auto_advance(bool p_enable) { - auto_advance = p_enable; +void AnimationNodeStateMachineTransition::set_advance_mode(AdvanceMode p_mode) { + advance_mode = p_mode; } -bool AnimationNodeStateMachineTransition::has_auto_advance() const { - return auto_advance; +AnimationNodeStateMachineTransition::AdvanceMode AnimationNodeStateMachineTransition::get_advance_mode() const { + return advance_mode; } void AnimationNodeStateMachineTransition::set_advance_condition(const StringName &p_condition) { @@ -107,15 +107,6 @@ Ref<Curve> AnimationNodeStateMachineTransition::get_xfade_curve() const { return xfade_curve; } -void AnimationNodeStateMachineTransition::set_disabled(bool p_disabled) { - disabled = p_disabled; - emit_changed(); -} - -bool AnimationNodeStateMachineTransition::is_disabled() const { - return disabled; -} - void AnimationNodeStateMachineTransition::set_priority(int p_priority) { priority = p_priority; emit_changed(); @@ -129,8 +120,8 @@ void AnimationNodeStateMachineTransition::_bind_methods() { ClassDB::bind_method(D_METHOD("set_switch_mode", "mode"), &AnimationNodeStateMachineTransition::set_switch_mode); ClassDB::bind_method(D_METHOD("get_switch_mode"), &AnimationNodeStateMachineTransition::get_switch_mode); - ClassDB::bind_method(D_METHOD("set_auto_advance", "auto_advance"), &AnimationNodeStateMachineTransition::set_auto_advance); - ClassDB::bind_method(D_METHOD("has_auto_advance"), &AnimationNodeStateMachineTransition::has_auto_advance); + ClassDB::bind_method(D_METHOD("set_advance_mode", "mode"), &AnimationNodeStateMachineTransition::set_advance_mode); + ClassDB::bind_method(D_METHOD("get_advance_mode"), &AnimationNodeStateMachineTransition::get_advance_mode); ClassDB::bind_method(D_METHOD("set_advance_condition", "name"), &AnimationNodeStateMachineTransition::set_advance_condition); ClassDB::bind_method(D_METHOD("get_advance_condition"), &AnimationNodeStateMachineTransition::get_advance_condition); @@ -141,9 +132,6 @@ void AnimationNodeStateMachineTransition::_bind_methods() { ClassDB::bind_method(D_METHOD("set_xfade_curve", "curve"), &AnimationNodeStateMachineTransition::set_xfade_curve); ClassDB::bind_method(D_METHOD("get_xfade_curve"), &AnimationNodeStateMachineTransition::get_xfade_curve); - ClassDB::bind_method(D_METHOD("set_disabled", "disabled"), &AnimationNodeStateMachineTransition::set_disabled); - ClassDB::bind_method(D_METHOD("is_disabled"), &AnimationNodeStateMachineTransition::is_disabled); - ClassDB::bind_method(D_METHOD("set_priority", "priority"), &AnimationNodeStateMachineTransition::set_priority); ClassDB::bind_method(D_METHOD("get_priority"), &AnimationNodeStateMachineTransition::get_priority); @@ -155,17 +143,19 @@ void AnimationNodeStateMachineTransition::_bind_methods() { ADD_PROPERTY(PropertyInfo(Variant::INT, "priority", PROPERTY_HINT_RANGE, "0,32,1"), "set_priority", "get_priority"); ADD_GROUP("Switch", ""); ADD_PROPERTY(PropertyInfo(Variant::INT, "switch_mode", PROPERTY_HINT_ENUM, "Immediate,Sync,At End"), "set_switch_mode", "get_switch_mode"); - ADD_PROPERTY(PropertyInfo(Variant::BOOL, "auto_advance"), "set_auto_advance", "has_auto_advance"); ADD_GROUP("Advance", "advance_"); + ADD_PROPERTY(PropertyInfo(Variant::INT, "advance_mode", PROPERTY_HINT_ENUM, "Disabled,Enabled,Auto"), "set_advance_mode", "get_advance_mode"); ADD_PROPERTY(PropertyInfo(Variant::STRING_NAME, "advance_condition"), "set_advance_condition", "get_advance_condition"); ADD_PROPERTY(PropertyInfo(Variant::STRING, "advance_expression", PROPERTY_HINT_EXPRESSION, ""), "set_advance_expression", "get_advance_expression"); - ADD_GROUP("Disabling", ""); - ADD_PROPERTY(PropertyInfo(Variant::BOOL, "disabled"), "set_disabled", "is_disabled"); BIND_ENUM_CONSTANT(SWITCH_MODE_IMMEDIATE); BIND_ENUM_CONSTANT(SWITCH_MODE_SYNC); BIND_ENUM_CONSTANT(SWITCH_MODE_AT_END); + BIND_ENUM_CONSTANT(ADVANCE_MODE_DISABLED); + BIND_ENUM_CONSTANT(ADVANCE_MODE_ENABLED); + BIND_ENUM_CONSTANT(ADVANCE_MODE_AUTO); + ADD_SIGNAL(MethodInfo("advance_condition_changed")); } @@ -234,7 +224,7 @@ bool AnimationNodeStateMachinePlayback::_travel(AnimationNodeStateMachine *p_sta //build open list for (int i = 0; i < p_state_machine->transitions.size(); i++) { - if (p_state_machine->transitions[i].transition->is_disabled()) { + if (p_state_machine->transitions[i].transition->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_DISABLED) { continue; } @@ -279,7 +269,7 @@ bool AnimationNodeStateMachinePlayback::_travel(AnimationNodeStateMachine *p_sta StringName transition = p_state_machine->transitions[least_cost_transition->get()].local_to; for (int i = 0; i < p_state_machine->transitions.size(); i++) { - if (p_state_machine->transitions[i].transition->is_disabled()) { + if (p_state_machine->transitions[i].transition->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_DISABLED) { continue; } @@ -379,6 +369,7 @@ double AnimationNodeStateMachinePlayback::process(AnimationNodeStateMachine *p_s // can't travel, then teleport path.clear(); current = start_request; + play_start = true; } start_request = StringName(); //clear start request } @@ -424,7 +415,7 @@ double AnimationNodeStateMachinePlayback::process(AnimationNodeStateMachine *p_s fading_pos += p_time; } fade_blend = MIN(1.0, fading_pos / fading_time); - if (fade_blend >= 1.0) { + if (fade_blend > 1.0) { fading_from = StringName(); } } @@ -433,9 +424,9 @@ double AnimationNodeStateMachinePlayback::process(AnimationNodeStateMachine *p_s if (current_curve.is_valid()) { fade_blend = current_curve->sample(fade_blend); } - float rem = p_state_machine->blend_node(current, p_state_machine->states[current].node, p_time, p_seek, p_is_external_seeking, Math::is_zero_approx(fade_blend) ? CMP_EPSILON : fade_blend, AnimationNode::FILTER_IGNORE, true); // Blend values must be more than CMP_EPSILON to process discrete keys in edge. + double rem = p_state_machine->blend_node(current, p_state_machine->states[current].node, p_time, p_seek, p_is_external_seeking, Math::is_zero_approx(fade_blend) ? CMP_EPSILON : fade_blend, AnimationNode::FILTER_IGNORE, true); // Blend values must be more than CMP_EPSILON to process discrete keys in edge. - float fade_blend_inv = 1.0 - fade_blend; + double fade_blend_inv = 1.0 - fade_blend; if (fading_from != StringName()) { p_state_machine->blend_node(fading_from, p_state_machine->states[fading_from].node, p_time, p_seek, p_is_external_seeking, Math::is_zero_approx(fade_blend_inv) ? CMP_EPSILON : fade_blend_inv, AnimationNode::FILTER_IGNORE, true); // Blend values must be more than CMP_EPSILON to process discrete keys in edge. } @@ -446,19 +437,19 @@ double AnimationNodeStateMachinePlayback::process(AnimationNodeStateMachine *p_s } { //advance and loop check - float next_pos = len_current - rem; + double next_pos = len_current - rem; end_loop = next_pos < pos_current; pos_current = next_pos; //looped } //find next StringName next; - float next_xfade = 0.0; + double next_xfade = 0.0; AnimationNodeStateMachineTransition::SwitchMode switch_mode = AnimationNodeStateMachineTransition::SWITCH_MODE_IMMEDIATE; if (path.size()) { for (int i = 0; i < p_state_machine->transitions.size(); i++) { - if (p_state_machine->transitions[i].transition->is_disabled()) { + if (p_state_machine->transitions[i].transition->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_DISABLED) { continue; } @@ -474,7 +465,7 @@ double AnimationNodeStateMachinePlayback::process(AnimationNodeStateMachine *p_s int auto_advance_to = -1; for (int i = 0; i < p_state_machine->transitions.size(); i++) { - if (p_state_machine->transitions[i].transition->is_disabled()) { + if (p_state_machine->transitions[i].transition->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_DISABLED) { continue; } @@ -542,7 +533,7 @@ double AnimationNodeStateMachinePlayback::process(AnimationNodeStateMachine *p_s int auto_advance_to = -1; for (int i = 0; i < prev_state_machine->transitions.size(); i++) { - if (prev_state_machine->transitions[i].transition->is_disabled()) { + if (prev_state_machine->transitions[i].transition->get_advance_mode() == AnimationNodeStateMachineTransition::ADVANCE_MODE_DISABLED) { continue; } @@ -629,14 +620,14 @@ double AnimationNodeStateMachinePlayback::process(AnimationNodeStateMachine *p_s } bool AnimationNodeStateMachinePlayback::_check_advance_condition(const Ref<AnimationNodeStateMachine> state_machine, const Ref<AnimationNodeStateMachineTransition> transition) const { - if (transition->has_auto_advance()) { - return true; + if (transition->get_advance_mode() != AnimationNodeStateMachineTransition::ADVANCE_MODE_AUTO) { + return false; } StringName advance_condition_name = transition->get_advance_condition_name(); - if (advance_condition_name != StringName() && bool(state_machine->get_parameter(advance_condition_name))) { - return true; + if (advance_condition_name != StringName() && !bool(state_machine->get_parameter(advance_condition_name))) { + return false; } if (transition->expression.is_valid()) { @@ -646,20 +637,18 @@ bool AnimationNodeStateMachinePlayback::_check_advance_condition(const Ref<Anima NodePath advance_expression_base_node_path = tree_base->get_advance_expression_base_node(); Node *expression_base = tree_base->get_node_or_null(advance_expression_base_node_path); - WARN_PRINT_ONCE("Animation transition has a valid expression, but no expression base node was set on its AnimationTree."); - if (expression_base) { Ref<Expression> exp = transition->expression; bool ret = exp->execute(Array(), expression_base, false, Engine::get_singleton()->is_editor_hint()); // Avoids allowing the user to crash the system with an expression by only allowing const calls. - if (!exp->has_execute_failed()) { - if (ret) { - return true; - } + if (exp->has_execute_failed() || !ret) { + return false; } + } else { + WARN_PRINT_ONCE("Animation transition has a valid expression, but no expression base node was set on its AnimationTree."); } } - return false; + return true; } void AnimationNodeStateMachinePlayback::_bind_methods() { diff --git a/scene/animation/animation_node_state_machine.h b/scene/animation/animation_node_state_machine.h index 0dfe5a3a43..83b5e66491 100644 --- a/scene/animation/animation_node_state_machine.h +++ b/scene/animation/animation_node_state_machine.h @@ -44,14 +44,19 @@ public: SWITCH_MODE_AT_END, }; + enum AdvanceMode { + ADVANCE_MODE_DISABLED, + ADVANCE_MODE_ENABLED, + ADVANCE_MODE_AUTO, + }; + private: SwitchMode switch_mode = SWITCH_MODE_IMMEDIATE; - bool auto_advance = false; + AdvanceMode advance_mode = ADVANCE_MODE_ENABLED; StringName advance_condition; StringName advance_condition_name; float xfade_time = 0.0; Ref<Curve> xfade_curve; - bool disabled = false; int priority = 1; String advance_expression; @@ -65,8 +70,8 @@ public: void set_switch_mode(SwitchMode p_mode); SwitchMode get_switch_mode() const; - void set_auto_advance(bool p_enable); - bool has_auto_advance() const; + void set_advance_mode(AdvanceMode p_mode); + AdvanceMode get_advance_mode() const; void set_advance_condition(const StringName &p_condition); StringName get_advance_condition() const; @@ -82,9 +87,6 @@ public: void set_xfade_curve(const Ref<Curve> &p_curve); Ref<Curve> get_xfade_curve() const; - void set_disabled(bool p_disabled); - bool is_disabled() const; - void set_priority(int p_priority); int get_priority() const; @@ -92,6 +94,7 @@ public: }; VARIANT_ENUM_CAST(AnimationNodeStateMachineTransition::SwitchMode) +VARIANT_ENUM_CAST(AnimationNodeStateMachineTransition::AdvanceMode) class AnimationNodeStateMachine; @@ -111,8 +114,8 @@ class AnimationNodeStateMachinePlayback : public Resource { StringName next; }; - float len_current = 0.0; - float pos_current = 0.0; + double len_current = 0.0; + double pos_current = 0.0; bool end_loop = false; StringName current; diff --git a/scene/animation/animation_player.cpp b/scene/animation/animation_player.cpp index b76e49b86f..ff20724f89 100644 --- a/scene/animation/animation_player.cpp +++ b/scene/animation/animation_player.cpp @@ -691,7 +691,7 @@ void AnimationPlayer::_animation_process_animation(AnimationData *p_anim, double } } else { if (p_started) { - int first_key = a->track_find_key(i, p_prev_time, true); + int first_key = a->track_find_key(i, p_prev_time, Animation::FIND_MODE_EXACT); if (first_key >= 0) { indices.push_back(first_key); } @@ -761,7 +761,7 @@ void AnimationPlayer::_animation_process_animation(AnimationData *p_anim, double } } else { if (p_started) { - int first_key = a->track_find_key(i, p_prev_time, true); + int first_key = a->track_find_key(i, p_prev_time, Animation::FIND_MODE_EXACT); if (first_key >= 0) { indices.push_back(first_key); } @@ -855,7 +855,7 @@ void AnimationPlayer::_animation_process_animation(AnimationData *p_anim, double //find stuff to play List<int> to_play; if (p_started) { - int first_key = a->track_find_key(i, p_prev_time, true); + int first_key = a->track_find_key(i, p_prev_time, Animation::FIND_MODE_EXACT); if (first_key >= 0) { to_play.push_back(first_key); } @@ -968,7 +968,7 @@ void AnimationPlayer::_animation_process_animation(AnimationData *p_anim, double //find stuff to play List<int> to_play; if (p_started) { - int first_key = a->track_find_key(i, p_prev_time, true); + int first_key = a->track_find_key(i, p_prev_time, Animation::FIND_MODE_EXACT); if (first_key >= 0) { to_play.push_back(first_key); } diff --git a/scene/animation/animation_tree.cpp b/scene/animation/animation_tree.cpp index b06a21dea9..b3408c1509 100644 --- a/scene/animation/animation_tree.cpp +++ b/scene/animation/animation_tree.cpp @@ -432,7 +432,6 @@ void AnimationNode::_bind_methods() { GDVIRTUAL_BIND(_has_filter); ADD_SIGNAL(MethodInfo("removed_from_graph")); - ADD_SIGNAL(MethodInfo("tree_changed")); BIND_ENUM_CONSTANT(FILTER_IGNORE); @@ -1383,7 +1382,7 @@ void AnimationTree::_process_graph(double p_delta) { } } else { if (seeked) { - int idx = a->track_find_key(i, time, !is_external_seeking); + int idx = a->track_find_key(i, time, is_external_seeking ? Animation::FIND_MODE_NEAREST : Animation::FIND_MODE_EXACT); if (idx < 0) { continue; } @@ -1406,7 +1405,7 @@ void AnimationTree::_process_graph(double p_delta) { TrackCacheMethod *t = static_cast<TrackCacheMethod *>(track); if (seeked) { - int idx = a->track_find_key(i, time, !is_external_seeking); + int idx = a->track_find_key(i, time, is_external_seeking ? Animation::FIND_MODE_NEAREST : Animation::FIND_MODE_EXACT); if (idx < 0) { continue; } @@ -1440,7 +1439,7 @@ void AnimationTree::_process_graph(double p_delta) { if (seeked) { //find whatever should be playing - int idx = a->track_find_key(i, time, !is_external_seeking); + int idx = a->track_find_key(i, time, is_external_seeking ? Animation::FIND_MODE_NEAREST : Animation::FIND_MODE_EXACT); if (idx < 0) { continue; } @@ -1553,7 +1552,7 @@ void AnimationTree::_process_graph(double p_delta) { if (seeked) { //seek - int idx = a->track_find_key(i, time, !is_external_seeking); + int idx = a->track_find_key(i, time, is_external_seeking ? Animation::FIND_MODE_NEAREST : Animation::FIND_MODE_EXACT); if (idx < 0) { continue; } @@ -2037,6 +2036,10 @@ void AnimationTree::_bind_methods() { BIND_ENUM_CONSTANT(ANIMATION_PROCESS_MANUAL); ADD_SIGNAL(MethodInfo("animation_player_changed")); + + // Signals from AnimationNodes. + ADD_SIGNAL(MethodInfo("animation_started", PropertyInfo(Variant::STRING_NAME, "anim_name"))); + ADD_SIGNAL(MethodInfo("animation_finished", PropertyInfo(Variant::STRING_NAME, "anim_name"))); } AnimationTree::AnimationTree() { diff --git a/scene/gui/rich_text_label.cpp b/scene/gui/rich_text_label.cpp index 024c5f8ecf..d5c4fd3f07 100644 --- a/scene/gui/rich_text_label.cpp +++ b/scene/gui/rich_text_label.cpp @@ -3182,7 +3182,8 @@ void RichTextLabel::push_normal() { void RichTextLabel::push_bold() { ERR_FAIL_COND(theme_cache.bold_font.is_null()); - _push_def_font(BOLD_FONT); + ItemFont *item_font = _find_font(current); + _push_def_font((item_font && item_font->def_font == ITALICS_FONT) ? BOLD_ITALICS_FONT : BOLD_FONT); } void RichTextLabel::push_bold_italics() { @@ -3194,7 +3195,8 @@ void RichTextLabel::push_bold_italics() { void RichTextLabel::push_italics() { ERR_FAIL_COND(theme_cache.italics_font.is_null()); - _push_def_font(ITALICS_FONT); + ItemFont *item_font = _find_font(current); + _push_def_font((item_font && item_font->def_font == BOLD_FONT) ? BOLD_ITALICS_FONT : ITALICS_FONT); } void RichTextLabel::push_mono() { diff --git a/scene/resources/animation.cpp b/scene/resources/animation.cpp index 077a53464e..37407edc33 100644 --- a/scene/resources/animation.cpp +++ b/scene/resources/animation.cpp @@ -1319,7 +1319,7 @@ Error Animation::blend_shape_track_interpolate(int p_track, double p_time, float } void Animation::track_remove_key_at_time(int p_track, double p_time) { - int idx = track_find_key(p_track, p_time, true); + int idx = track_find_key(p_track, p_time, FIND_MODE_APPROX); ERR_FAIL_COND(idx < 0); track_remove_key(p_track, idx); } @@ -1400,7 +1400,7 @@ void Animation::track_remove_key(int p_track, int p_idx) { emit_changed(); } -int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { +int Animation::track_find_key(int p_track, double p_time, FindMode p_find_mode) const { ERR_FAIL_INDEX_V(p_track, tracks.size(), -1); Track *t = tracks[p_track]; @@ -1416,7 +1416,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { uint32_t key_index; bool fetch_compressed_success = _fetch_compressed<3>(tt->compressed_track, p_time, key, time, key_next, time_next, &key_index); ERR_FAIL_COND_V(!fetch_compressed_success, -1); - if (p_exact && time != p_time) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(time, p_time)) || (p_find_mode == FIND_MODE_EXACT && time != p_time)) { return -1; } return key_index; @@ -1426,7 +1426,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= tt->positions.size()) { return -1; } - if (tt->positions[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(tt->positions[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && tt->positions[k].time != p_time)) { return -1; } return k; @@ -1443,7 +1443,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { uint32_t key_index; bool fetch_compressed_success = _fetch_compressed<3>(rt->compressed_track, p_time, key, time, key_next, time_next, &key_index); ERR_FAIL_COND_V(!fetch_compressed_success, -1); - if (p_exact && time != p_time) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(time, p_time)) || (p_find_mode == FIND_MODE_EXACT && time != p_time)) { return -1; } return key_index; @@ -1453,7 +1453,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= rt->rotations.size()) { return -1; } - if (rt->rotations[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(rt->rotations[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && rt->rotations[k].time != p_time)) { return -1; } return k; @@ -1470,7 +1470,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { uint32_t key_index; bool fetch_compressed_success = _fetch_compressed<3>(st->compressed_track, p_time, key, time, key_next, time_next, &key_index); ERR_FAIL_COND_V(!fetch_compressed_success, -1); - if (p_exact && time != p_time) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(time, p_time)) || (p_find_mode == FIND_MODE_EXACT && time != p_time)) { return -1; } return key_index; @@ -1480,7 +1480,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= st->scales.size()) { return -1; } - if (st->scales[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(st->scales[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && st->scales[k].time != p_time)) { return -1; } return k; @@ -1497,7 +1497,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { uint32_t key_index; bool fetch_compressed_success = _fetch_compressed<1>(bst->compressed_track, p_time, key, time, key_next, time_next, &key_index); ERR_FAIL_COND_V(!fetch_compressed_success, -1); - if (p_exact && time != p_time) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(time, p_time)) || (p_find_mode == FIND_MODE_EXACT && time != p_time)) { return -1; } return key_index; @@ -1507,7 +1507,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= bst->blend_shapes.size()) { return -1; } - if (bst->blend_shapes[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(bst->blend_shapes[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && bst->blend_shapes[k].time != p_time)) { return -1; } return k; @@ -1519,7 +1519,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= vt->values.size()) { return -1; } - if (vt->values[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(vt->values[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && vt->values[k].time != p_time)) { return -1; } return k; @@ -1531,7 +1531,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= mt->methods.size()) { return -1; } - if (mt->methods[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(mt->methods[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && mt->methods[k].time != p_time)) { return -1; } return k; @@ -1543,7 +1543,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= bt->values.size()) { return -1; } - if (bt->values[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(bt->values[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && bt->values[k].time != p_time)) { return -1; } return k; @@ -1555,7 +1555,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= at->values.size()) { return -1; } - if (at->values[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(at->values[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && at->values[k].time != p_time)) { return -1; } return k; @@ -1567,7 +1567,7 @@ int Animation::track_find_key(int p_track, double p_time, bool p_exact) const { if (k < 0 || k >= at->values.size()) { return -1; } - if (at->values[k].time != p_time && p_exact) { + if ((p_find_mode == FIND_MODE_APPROX && !Math::is_equal_approx(at->values[k].time, p_time)) || (p_find_mode == FIND_MODE_EXACT && at->values[k].time != p_time)) { return -1; } return k; @@ -2944,12 +2944,12 @@ void Animation::track_get_key_indices_in_range(int p_track, double p_time, doubl // Not from_time > to_time but most recent of looping... if (p_looped_flag != Animation::LOOPED_FLAG_NONE) { if (!is_backward && Math::is_equal_approx(from_time, 0)) { - int edge = track_find_key(p_track, 0, true); + int edge = track_find_key(p_track, 0, FIND_MODE_EXACT); if (edge >= 0) { p_indices->push_back(edge); } } else if (is_backward && Math::is_equal_approx(to_time, length)) { - int edge = track_find_key(p_track, length, true); + int edge = track_find_key(p_track, length, FIND_MODE_EXACT); if (edge >= 0) { p_indices->push_back(edge); } @@ -2971,7 +2971,7 @@ void Animation::track_get_key_indices_in_range(int p_track, double p_time, doubl const PositionTrack *tt = static_cast<const PositionTrack *>(t); if (tt->compressed_track >= 0) { _get_compressed_key_indices_in_range<3>(tt->compressed_track, 0, from_time, p_indices); - _get_compressed_key_indices_in_range<3>(tt->compressed_track, CMP_EPSILON, to_time, p_indices); + _get_compressed_key_indices_in_range<3>(tt->compressed_track, 0, to_time, p_indices); } else { _track_get_key_indices_in_range(tt->positions, 0, from_time, p_indices, true); _track_get_key_indices_in_range(tt->positions, 0, to_time, p_indices, false); @@ -2981,7 +2981,7 @@ void Animation::track_get_key_indices_in_range(int p_track, double p_time, doubl const RotationTrack *rt = static_cast<const RotationTrack *>(t); if (rt->compressed_track >= 0) { _get_compressed_key_indices_in_range<3>(rt->compressed_track, 0, from_time, p_indices); - _get_compressed_key_indices_in_range<3>(rt->compressed_track, CMP_EPSILON, to_time, p_indices); + _get_compressed_key_indices_in_range<3>(rt->compressed_track, 0, to_time, p_indices); } else { _track_get_key_indices_in_range(rt->rotations, 0, from_time, p_indices, true); _track_get_key_indices_in_range(rt->rotations, 0, to_time, p_indices, false); @@ -3072,7 +3072,7 @@ void Animation::track_get_key_indices_in_range(int p_track, double p_time, doubl const BlendShapeTrack *bst = static_cast<const BlendShapeTrack *>(t); if (bst->compressed_track >= 0) { _get_compressed_key_indices_in_range<1>(bst->compressed_track, from_time, length, p_indices); - _get_compressed_key_indices_in_range<1>(bst->compressed_track, to_time, length - CMP_EPSILON, p_indices); + _get_compressed_key_indices_in_range<1>(bst->compressed_track, to_time, length, p_indices); } else { _track_get_key_indices_in_range(bst->blend_shapes, from_time, length, p_indices, false); _track_get_key_indices_in_range(bst->blend_shapes, to_time, length, p_indices, true); @@ -3109,9 +3109,9 @@ void Animation::track_get_key_indices_in_range(int p_track, double p_time, doubl // The edge will be pingponged in the next frame and processed there, so let's ignore it now... if (!is_backward && Math::is_equal_approx(to_time, length)) { - to_time = length - CMP_EPSILON; + to_time -= CMP_EPSILON; } else if (is_backward && Math::is_equal_approx(from_time, 0)) { - from_time = CMP_EPSILON; + from_time += CMP_EPSILON; } } break; } @@ -3818,7 +3818,7 @@ void Animation::_bind_methods() { ClassDB::bind_method(D_METHOD("track_get_key_count", "track_idx"), &Animation::track_get_key_count); ClassDB::bind_method(D_METHOD("track_get_key_value", "track_idx", "key_idx"), &Animation::track_get_key_value); ClassDB::bind_method(D_METHOD("track_get_key_time", "track_idx", "key_idx"), &Animation::track_get_key_time); - ClassDB::bind_method(D_METHOD("track_find_key", "track_idx", "time", "exact"), &Animation::track_find_key, DEFVAL(false)); + ClassDB::bind_method(D_METHOD("track_find_key", "track_idx", "time", "find_mode"), &Animation::track_find_key, DEFVAL(FIND_MODE_NEAREST)); ClassDB::bind_method(D_METHOD("track_set_interpolation_type", "track_idx", "interpolation"), &Animation::track_set_interpolation_type); ClassDB::bind_method(D_METHOD("track_get_interpolation_type", "track_idx"), &Animation::track_get_interpolation_type); @@ -3905,6 +3905,10 @@ void Animation::_bind_methods() { BIND_ENUM_CONSTANT(LOOPED_FLAG_NONE); BIND_ENUM_CONSTANT(LOOPED_FLAG_END); BIND_ENUM_CONSTANT(LOOPED_FLAG_START); + + BIND_ENUM_CONSTANT(FIND_MODE_NEAREST); + BIND_ENUM_CONSTANT(FIND_MODE_APPROX); + BIND_ENUM_CONSTANT(FIND_MODE_EXACT); } void Animation::clear() { diff --git a/scene/resources/animation.h b/scene/resources/animation.h index 0ac1279063..b7d5a683db 100644 --- a/scene/resources/animation.h +++ b/scene/resources/animation.h @@ -79,6 +79,12 @@ public: LOOPED_FLAG_START, }; + enum FindMode { + FIND_MODE_NEAREST, + FIND_MODE_APPROX, + FIND_MODE_EXACT, + }; + #ifdef TOOLS_ENABLED enum HandleMode { HANDLE_MODE_FREE, @@ -392,7 +398,7 @@ public: void track_set_key_transition(int p_track, int p_key_idx, real_t p_transition); void track_set_key_value(int p_track, int p_key_idx, const Variant &p_value); void track_set_key_time(int p_track, int p_key_idx, double p_time); - int track_find_key(int p_track, double p_time, bool p_exact = false) const; + int track_find_key(int p_track, double p_time, FindMode p_find_mode = FIND_MODE_NEAREST) const; void track_remove_key(int p_track, int p_idx); void track_remove_key_at_time(int p_track, double p_time); int track_get_key_count(int p_track) const; @@ -489,6 +495,7 @@ VARIANT_ENUM_CAST(Animation::InterpolationType); VARIANT_ENUM_CAST(Animation::UpdateMode); VARIANT_ENUM_CAST(Animation::LoopMode); VARIANT_ENUM_CAST(Animation::LoopedFlag); +VARIANT_ENUM_CAST(Animation::FindMode); #ifdef TOOLS_ENABLED VARIANT_ENUM_CAST(Animation::HandleMode); VARIANT_ENUM_CAST(Animation::HandleSetMode); diff --git a/servers/rendering/shader_compiler.cpp b/servers/rendering/shader_compiler.cpp index 3604756fd8..00b6a8c44c 100644 --- a/servers/rendering/shader_compiler.cpp +++ b/servers/rendering/shader_compiler.cpp @@ -134,6 +134,8 @@ static String _interpstr(SL::DataInterpolation p_interp) { return "flat "; case SL::INTERPOLATION_SMOOTH: return ""; + case SL::INTERPOLATION_DEFAULT: + return ""; } return ""; } @@ -667,6 +669,9 @@ String ShaderCompiler::_dump_node_code(const SL::Node *p_node, int p_level, Gene fragment_varyings.insert(varying_name); continue; } + if (varying.type < SL::TYPE_INT) { + continue; // Ignore boolean types to prevent crashing (if varying is just declared). + } String vcode; String interp_mode = _interpstr(varying.interpolation); diff --git a/servers/rendering/shader_language.cpp b/servers/rendering/shader_language.cpp index 512995dd83..8c732b3d5a 100644 --- a/servers/rendering/shader_language.cpp +++ b/servers/rendering/shader_language.cpp @@ -989,6 +989,18 @@ String ShaderLanguage::get_precision_name(DataPrecision p_type) { return ""; } +String ShaderLanguage::get_interpolation_name(DataInterpolation p_interpolation) { + switch (p_interpolation) { + case INTERPOLATION_FLAT: + return "flat"; + case INTERPOLATION_SMOOTH: + return "smooth"; + default: + break; + } + return ""; +} + String ShaderLanguage::get_datatype_name(DataType p_type) { switch (p_type) { case TYPE_VOID: @@ -4424,6 +4436,10 @@ bool ShaderLanguage::_validate_varying_assign(ShaderNode::Varying &p_varying, St switch (p_varying.stage) { case ShaderNode::Varying::STAGE_UNKNOWN: // first assign if (current_function == varying_function_names.vertex) { + if (p_varying.type < TYPE_INT) { + *r_message = vformat(RTR("Varying with '%s' data type may only be assigned in the 'fragment' function."), get_datatype_name(p_varying.type)); + return false; + } p_varying.stage = ShaderNode::Varying::STAGE_VERTEX; } else if (current_function == varying_function_names.fragment) { p_varying.stage = ShaderNode::Varying::STAGE_FRAGMENT; @@ -5223,7 +5239,7 @@ ShaderLanguage::Node *ShaderLanguage::_parse_expression(BlockNode *p_block, cons if (shader->varyings.has(varname)) { switch (shader->varyings[varname].stage) { case ShaderNode::Varying::STAGE_UNKNOWN: { - _set_error(vformat(RTR("Varying '%s' must be assigned in the vertex or fragment function first."), varname)); + _set_error(vformat(RTR("Varying '%s' must be assigned in the 'vertex' or 'fragment' function first."), varname)); return nullptr; } case ShaderNode::Varying::STAGE_VERTEX_TO_FRAGMENT_LIGHT: @@ -5407,6 +5423,16 @@ ShaderLanguage::Node *ShaderLanguage::_parse_expression(BlockNode *p_block, cons } } else { switch (var.stage) { + case ShaderNode::Varying::STAGE_UNKNOWN: { + if (var.type < TYPE_INT) { + if (current_function == varying_function_names.vertex) { + _set_error(vformat(RTR("Varying with '%s' data type may only be used in the 'fragment' function."), get_datatype_name(var.type))); + } else { + _set_error(vformat(RTR("Varying '%s' must be assigned in the 'fragment' function first."), identifier)); + } + return nullptr; + } + } break; case ShaderNode::Varying::STAGE_VERTEX: if (current_function == varying_function_names.fragment || current_function == varying_function_names.light) { var.stage = ShaderNode::Varying::STAGE_VERTEX_TO_FRAGMENT_LIGHT; @@ -8225,7 +8251,7 @@ Error ShaderLanguage::_parse_shader(const HashMap<StringName, FunctionInfo> &p_f } } DataPrecision precision = PRECISION_DEFAULT; - DataInterpolation interpolation = INTERPOLATION_SMOOTH; + DataInterpolation interpolation = INTERPOLATION_DEFAULT; DataType type; StringName name; int array_size = 0; @@ -8334,6 +8360,11 @@ Error ShaderLanguage::_parse_shader(const HashMap<StringName, FunctionInfo> &p_f return ERR_PARSE_ERROR; } + if (!is_uniform && interpolation != INTERPOLATION_DEFAULT && type < TYPE_INT) { + _set_error(vformat(RTR("Interpolation modifier '%s' cannot be used with boolean types."), get_interpolation_name(interpolation))); + return ERR_PARSE_ERROR; + } + if (!is_uniform && type > TYPE_MAT4) { _set_error(RTR("Invalid data type for varying.")); return ERR_PARSE_ERROR; diff --git a/servers/rendering/shader_language.h b/servers/rendering/shader_language.h index 9c3cc9c5cd..d34114589f 100644 --- a/servers/rendering/shader_language.h +++ b/servers/rendering/shader_language.h @@ -247,6 +247,7 @@ public: enum DataInterpolation { INTERPOLATION_FLAT, INTERPOLATION_SMOOTH, + INTERPOLATION_DEFAULT, }; enum Operator { @@ -774,6 +775,7 @@ public: static bool is_token_arg_qual(TokenType p_type); static DataPrecision get_token_precision(TokenType p_type); static String get_precision_name(DataPrecision p_type); + static String get_interpolation_name(DataInterpolation p_interpolation); static String get_datatype_name(DataType p_type); static String get_uniform_hint_name(ShaderNode::Uniform::Hint p_hint); static String get_texture_filter_name(TextureFilter p_filter); diff --git a/thirdparty/README.md b/thirdparty/README.md index 017dabe9e5..d50d2cc0ba 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -570,7 +570,7 @@ Exclude: ## pcre2 - Upstream: http://www.pcre.org -- Version: 10.40 (3103b8f20a3b9944b177e812fde29fbfb8b90558, 2022) +- Version: 10.42 (52c08847921a324c804cabf2814549f50bce1265, 2022) - License: BSD-3-Clause Files extracted from upstream source: diff --git a/thirdparty/pcre2/src/config.h b/thirdparty/pcre2/src/config.h index 76dc5868b1..5548d18eb2 100644 --- a/thirdparty/pcre2/src/config.h +++ b/thirdparty/pcre2/src/config.h @@ -236,7 +236,7 @@ sure both macros are undefined; an emulation function will then be used. */ #define PACKAGE_NAME "PCRE2" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "PCRE2 10.40" +#define PACKAGE_STRING "PCRE2 10.42" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "pcre2" @@ -245,7 +245,7 @@ sure both macros are undefined; an emulation function will then be used. */ #define PACKAGE_URL "" /* Define to the version of this package. */ -#define PACKAGE_VERSION "10.40" +#define PACKAGE_VERSION "10.42" /* The value of PARENS_NEST_LIMIT specifies the maximum depth of nested parentheses (of any kind) in a pattern. This limits the amount of system @@ -438,7 +438,13 @@ sure both macros are undefined; an emulation function will then be used. */ #endif /* Version number of package */ -#define VERSION "10.40" +#define VERSION "10.42" + +/* Number of bits in a file offset, on hosts where this is settable. */ +/* #undef _FILE_OFFSET_BITS */ + +/* Define for large files, on AIX-style hosts. */ +/* #undef _LARGE_FILES */ /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ diff --git a/thirdparty/pcre2/src/pcre2.h b/thirdparty/pcre2/src/pcre2.h index 8adcede57c..1cbecd0e86 100644 --- a/thirdparty/pcre2/src/pcre2.h +++ b/thirdparty/pcre2/src/pcre2.h @@ -42,9 +42,9 @@ POSSIBILITY OF SUCH DAMAGE. /* The current PCRE version information. */ #define PCRE2_MAJOR 10 -#define PCRE2_MINOR 40 +#define PCRE2_MINOR 42 #define PCRE2_PRERELEASE -#define PCRE2_DATE 2022-04-14 +#define PCRE2_DATE 2022-12-11 /* When an application links to a PCRE DLL in Windows, the symbols that are imported have to be identified as such. When building PCRE2, the appropriate @@ -572,19 +572,19 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION pcre2_config(uint32_t, void *); /* Functions for manipulating contexts. */ #define PCRE2_GENERAL_CONTEXT_FUNCTIONS \ -PCRE2_EXP_DECL pcre2_general_context PCRE2_CALL_CONVENTION \ - *pcre2_general_context_copy(pcre2_general_context *); \ -PCRE2_EXP_DECL pcre2_general_context PCRE2_CALL_CONVENTION \ - *pcre2_general_context_create(void *(*)(PCRE2_SIZE, void *), \ +PCRE2_EXP_DECL pcre2_general_context *PCRE2_CALL_CONVENTION \ + pcre2_general_context_copy(pcre2_general_context *); \ +PCRE2_EXP_DECL pcre2_general_context *PCRE2_CALL_CONVENTION \ + pcre2_general_context_create(void *(*)(PCRE2_SIZE, void *), \ void (*)(void *, void *), void *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_general_context_free(pcre2_general_context *); #define PCRE2_COMPILE_CONTEXT_FUNCTIONS \ -PCRE2_EXP_DECL pcre2_compile_context PCRE2_CALL_CONVENTION \ - *pcre2_compile_context_copy(pcre2_compile_context *); \ -PCRE2_EXP_DECL pcre2_compile_context PCRE2_CALL_CONVENTION \ - *pcre2_compile_context_create(pcre2_general_context *);\ +PCRE2_EXP_DECL pcre2_compile_context *PCRE2_CALL_CONVENTION \ + pcre2_compile_context_copy(pcre2_compile_context *); \ +PCRE2_EXP_DECL pcre2_compile_context *PCRE2_CALL_CONVENTION \ + pcre2_compile_context_create(pcre2_general_context *);\ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_compile_context_free(pcre2_compile_context *); \ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ @@ -604,10 +604,10 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ int (*)(uint32_t, void *), void *); #define PCRE2_MATCH_CONTEXT_FUNCTIONS \ -PCRE2_EXP_DECL pcre2_match_context PCRE2_CALL_CONVENTION \ - *pcre2_match_context_copy(pcre2_match_context *); \ -PCRE2_EXP_DECL pcre2_match_context PCRE2_CALL_CONVENTION \ - *pcre2_match_context_create(pcre2_general_context *); \ +PCRE2_EXP_DECL pcre2_match_context *PCRE2_CALL_CONVENTION \ + pcre2_match_context_copy(pcre2_match_context *); \ +PCRE2_EXP_DECL pcre2_match_context *PCRE2_CALL_CONVENTION \ + pcre2_match_context_create(pcre2_general_context *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_match_context_free(pcre2_match_context *); \ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ @@ -631,10 +631,10 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ void *(*)(PCRE2_SIZE, void *), void (*)(void *, void *), void *); #define PCRE2_CONVERT_CONTEXT_FUNCTIONS \ -PCRE2_EXP_DECL pcre2_convert_context PCRE2_CALL_CONVENTION \ - *pcre2_convert_context_copy(pcre2_convert_context *); \ -PCRE2_EXP_DECL pcre2_convert_context PCRE2_CALL_CONVENTION \ - *pcre2_convert_context_create(pcre2_general_context *); \ +PCRE2_EXP_DECL pcre2_convert_context *PCRE2_CALL_CONVENTION \ + pcre2_convert_context_copy(pcre2_convert_context *); \ +PCRE2_EXP_DECL pcre2_convert_context *PCRE2_CALL_CONVENTION \ + pcre2_convert_context_create(pcre2_general_context *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_convert_context_free(pcre2_convert_context *); \ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ @@ -646,15 +646,15 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ /* Functions concerned with compiling a pattern to PCRE internal code. */ #define PCRE2_COMPILE_FUNCTIONS \ -PCRE2_EXP_DECL pcre2_code PCRE2_CALL_CONVENTION \ - *pcre2_compile(PCRE2_SPTR, PCRE2_SIZE, uint32_t, int *, PCRE2_SIZE *, \ +PCRE2_EXP_DECL pcre2_code *PCRE2_CALL_CONVENTION \ + pcre2_compile(PCRE2_SPTR, PCRE2_SIZE, uint32_t, int *, PCRE2_SIZE *, \ pcre2_compile_context *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_code_free(pcre2_code *); \ -PCRE2_EXP_DECL pcre2_code PCRE2_CALL_CONVENTION \ - *pcre2_code_copy(const pcre2_code *); \ -PCRE2_EXP_DECL pcre2_code PCRE2_CALL_CONVENTION \ - *pcre2_code_copy_with_tables(const pcre2_code *); +PCRE2_EXP_DECL pcre2_code *PCRE2_CALL_CONVENTION \ + pcre2_code_copy(const pcre2_code *); \ +PCRE2_EXP_DECL pcre2_code *PCRE2_CALL_CONVENTION \ + pcre2_code_copy_with_tables(const pcre2_code *); /* Functions that give information about a compiled pattern. */ @@ -670,10 +670,10 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ /* Functions for running a match and inspecting the result. */ #define PCRE2_MATCH_FUNCTIONS \ -PCRE2_EXP_DECL pcre2_match_data PCRE2_CALL_CONVENTION \ - *pcre2_match_data_create(uint32_t, pcre2_general_context *); \ -PCRE2_EXP_DECL pcre2_match_data PCRE2_CALL_CONVENTION \ - *pcre2_match_data_create_from_pattern(const pcre2_code *, \ +PCRE2_EXP_DECL pcre2_match_data *PCRE2_CALL_CONVENTION \ + pcre2_match_data_create(uint32_t, pcre2_general_context *); \ +PCRE2_EXP_DECL pcre2_match_data *PCRE2_CALL_CONVENTION \ + pcre2_match_data_create_from_pattern(const pcre2_code *, \ pcre2_general_context *); \ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ pcre2_dfa_match(const pcre2_code *, PCRE2_SPTR, PCRE2_SIZE, PCRE2_SIZE, \ @@ -689,8 +689,8 @@ PCRE2_EXP_DECL PCRE2_SIZE PCRE2_CALL_CONVENTION \ pcre2_get_match_data_size(pcre2_match_data *); \ PCRE2_EXP_DECL uint32_t PCRE2_CALL_CONVENTION \ pcre2_get_ovector_count(pcre2_match_data *); \ -PCRE2_EXP_DECL PCRE2_SIZE PCRE2_CALL_CONVENTION \ - *pcre2_get_ovector_pointer(pcre2_match_data *); \ +PCRE2_EXP_DECL PCRE2_SIZE *PCRE2_CALL_CONVENTION \ + pcre2_get_ovector_pointer(pcre2_match_data *); \ PCRE2_EXP_DECL PCRE2_SIZE PCRE2_CALL_CONVENTION \ pcre2_get_startchar(pcre2_match_data *); @@ -770,8 +770,8 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ uint32_t, pcre2_match_data *, pcre2_match_context *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_jit_free_unused_memory(pcre2_general_context *); \ -PCRE2_EXP_DECL pcre2_jit_stack PCRE2_CALL_CONVENTION \ - *pcre2_jit_stack_create(PCRE2_SIZE, PCRE2_SIZE, pcre2_general_context *); \ +PCRE2_EXP_DECL pcre2_jit_stack *PCRE2_CALL_CONVENTION \ + pcre2_jit_stack_create(PCRE2_SIZE, PCRE2_SIZE, pcre2_general_context *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_jit_stack_assign(pcre2_match_context *, pcre2_jit_callback, void *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ @@ -783,8 +783,8 @@ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ #define PCRE2_OTHER_FUNCTIONS \ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \ pcre2_get_error_message(int, PCRE2_UCHAR *, PCRE2_SIZE); \ -PCRE2_EXP_DECL const uint8_t PCRE2_CALL_CONVENTION \ - *pcre2_maketables(pcre2_general_context *); \ +PCRE2_EXP_DECL const uint8_t *PCRE2_CALL_CONVENTION \ + pcre2_maketables(pcre2_general_context *); \ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \ pcre2_maketables_free(pcre2_general_context *, const uint8_t *); diff --git a/thirdparty/pcre2/src/pcre2_compile.c b/thirdparty/pcre2/src/pcre2_compile.c index de259c9c40..edf7e82e6e 100644 --- a/thirdparty/pcre2/src/pcre2_compile.c +++ b/thirdparty/pcre2/src/pcre2_compile.c @@ -1266,8 +1266,10 @@ PCRE2_SIZE* ref_count; if (code != NULL) { +#ifdef SUPPORT_JIT if (code->executable_jit != NULL) PRIV(jit_free)(code->executable_jit, &code->memctl); +#endif if ((code->flags & PCRE2_DEREF_TABLES) != 0) { @@ -2687,7 +2689,7 @@ if ((options & PCRE2_EXTENDED_MORE) != 0) options |= PCRE2_EXTENDED; while (ptr < ptrend) { int prev_expect_cond_assert; - uint32_t min_repeat, max_repeat; + uint32_t min_repeat = 0, max_repeat = 0; uint32_t set, unset, *optset; uint32_t terminator; uint32_t prev_meta_quantifier; @@ -8552,7 +8554,7 @@ do { op == OP_SCBRA || op == OP_SCBRAPOS) { int n = GET2(scode, 1+LINK_SIZE); - int new_map = bracket_map | ((n < 32)? (1u << n) : 1); + unsigned int new_map = bracket_map | ((n < 32)? (1u << n) : 1); if (!is_startline(scode, new_map, cb, atomcount, inassert)) return FALSE; } @@ -10620,4 +10622,10 @@ re = NULL; goto EXIT; } +/* These #undefs are here to enable unity builds with CMake. */ + +#undef NLBLOCK /* Block containing newline information */ +#undef PSSTART /* Field containing processed string start */ +#undef PSEND /* Field containing processed string end */ + /* End of pcre2_compile.c */ diff --git a/thirdparty/pcre2/src/pcre2_context.c b/thirdparty/pcre2/src/pcre2_context.c index f904a494a0..8e05ede50c 100644 --- a/thirdparty/pcre2/src/pcre2_context.c +++ b/thirdparty/pcre2/src/pcre2_context.c @@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel Original API code Copyright (c) 1997-2012 University of Cambridge - New API code Copyright (c) 2016-2018 University of Cambridge + New API code Copyright (c) 2016-2022 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -443,8 +443,11 @@ mcontext->offset_limit = limit; return 0; } -/* This function became obsolete at release 10.30. It is kept as a synonym for -backwards compatibility. */ +/* These functions became obsolete at release 10.30. The first is kept as a +synonym for backwards compatibility. The second now does nothing. Exclude both +from coverage reports. */ + +/* LCOV_EXCL_START */ PCRE2_EXP_DEFN int PCRE2_CALL_CONVENTION pcre2_set_recursion_limit(pcre2_match_context *mcontext, uint32_t limit) @@ -464,6 +467,9 @@ pcre2_set_recursion_memory_management(pcre2_match_context *mcontext, return 0; } +/* LCOV_EXCL_STOP */ + + /* ------------ Convert context ------------ */ PCRE2_EXP_DEFN int PCRE2_CALL_CONVENTION diff --git a/thirdparty/pcre2/src/pcre2_convert.c b/thirdparty/pcre2/src/pcre2_convert.c index d45b6fee97..36466e4b91 100644 --- a/thirdparty/pcre2/src/pcre2_convert.c +++ b/thirdparty/pcre2/src/pcre2_convert.c @@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel Original API code Copyright (c) 1997-2012 University of Cambridge - New API code Copyright (c) 2016-2018 University of Cambridge + New API code Copyright (c) 2016-2022 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -65,9 +65,8 @@ POSSIBILITY OF SUCH DAMAGE. #define STR_QUERY_s STR_LEFT_PARENTHESIS STR_QUESTION_MARK STR_s STR_RIGHT_PARENTHESIS #define STR_STAR_NUL STR_LEFT_PARENTHESIS STR_ASTERISK STR_N STR_U STR_L STR_RIGHT_PARENTHESIS -/* States for range and POSIX processing */ +/* States for POSIX processing */ -enum { RANGE_NOT_STARTED, RANGE_STARTING, RANGE_STARTED }; enum { POSIX_START_REGEX, POSIX_ANCHORED, POSIX_NOT_BRACKET, POSIX_CLASS_NOT_STARTED, POSIX_CLASS_STARTING, POSIX_CLASS_STARTED }; diff --git a/thirdparty/pcre2/src/pcre2_dfa_match.c b/thirdparty/pcre2/src/pcre2_dfa_match.c index d29130f2d0..b16e594cc0 100644 --- a/thirdparty/pcre2/src/pcre2_dfa_match.c +++ b/thirdparty/pcre2/src/pcre2_dfa_match.c @@ -350,7 +350,7 @@ Returns: the return from the callout */ static int -do_callout(PCRE2_SPTR code, PCRE2_SIZE *offsets, PCRE2_SPTR current_subject, +do_callout_dfa(PCRE2_SPTR code, PCRE2_SIZE *offsets, PCRE2_SPTR current_subject, PCRE2_SPTR ptr, dfa_match_block *mb, PCRE2_SIZE extracode, PCRE2_SIZE *lengthptr) { @@ -2799,7 +2799,7 @@ for (;;) || code[LINK_SIZE + 1] == OP_CALLOUT_STR) { PCRE2_SIZE callout_length; - rrc = do_callout(code, offsets, current_subject, ptr, mb, + rrc = do_callout_dfa(code, offsets, current_subject, ptr, mb, 1 + LINK_SIZE, &callout_length); if (rrc < 0) return rrc; /* Abandon */ if (rrc > 0) break; /* Fail this thread */ @@ -3196,7 +3196,7 @@ for (;;) case OP_CALLOUT_STR: { PCRE2_SIZE callout_length; - rrc = do_callout(code, offsets, current_subject, ptr, mb, 0, + rrc = do_callout_dfa(code, offsets, current_subject, ptr, mb, 0, &callout_length); if (rrc < 0) return rrc; /* Abandon */ if (rrc == 0) @@ -4057,4 +4057,10 @@ while (rws->next != NULL) return rc; } +/* These #undefs are here to enable unity builds with CMake. */ + +#undef NLBLOCK /* Block containing newline information */ +#undef PSSTART /* Field containing processed string start */ +#undef PSEND /* Field containing processed string end */ + /* End of pcre2_dfa_match.c */ diff --git a/thirdparty/pcre2/src/pcre2_internal.h b/thirdparty/pcre2/src/pcre2_internal.h index fe7a0e005a..92dd3138d4 100644 --- a/thirdparty/pcre2/src/pcre2_internal.h +++ b/thirdparty/pcre2/src/pcre2_internal.h @@ -220,18 +220,17 @@ not rely on this. */ #define COMPILE_ERROR_BASE 100 -/* The initial frames vector for remembering backtracking points in -pcre2_match() is allocated on the system stack, of this size (bytes). The size -must be a multiple of sizeof(PCRE2_SPTR) in all environments, so making it a -multiple of 8 is best. Typical frame sizes are a few hundred bytes (it depends -on the number of capturing parentheses) so 20KiB handles quite a few frames. A -larger vector on the heap is obtained for patterns that need more frames. The -maximum size of this can be limited. */ +/* The initial frames vector for remembering pcre2_match() backtracking points +is allocated on the heap, of this size (bytes) or ten times the frame size if +larger, unless the heap limit is smaller. Typical frame sizes are a few hundred +bytes (it depends on the number of capturing parentheses) so 20KiB handles +quite a few frames. A larger vector on the heap is obtained for matches that +need more frames, subject to the heap limit. */ #define START_FRAMES_SIZE 20480 -/* Similarly, for DFA matching, an initial internal workspace vector is -allocated on the stack. */ +/* For DFA matching, an initial internal workspace vector is allocated on the +stack. The heap is used only if this turns out to be too small. */ #define DFA_START_RWS_SIZE 30720 diff --git a/thirdparty/pcre2/src/pcre2_intmodedep.h b/thirdparty/pcre2/src/pcre2_intmodedep.h index f8a3d25de6..390e737a6e 100644 --- a/thirdparty/pcre2/src/pcre2_intmodedep.h +++ b/thirdparty/pcre2/src/pcre2_intmodedep.h @@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel Original API code Copyright (c) 1997-2012 University of Cambridge - New API code Copyright (c) 2016-2018 University of Cambridge + New API code Copyright (c) 2016-2022 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -649,19 +649,23 @@ the size varies from call to call. As the maximum number of capturing subpatterns is 65535 we must allow for 65536 strings to include the overall match. (See also the heapframe structure below.) */ +struct heapframe; /* Forward reference */ + typedef struct pcre2_real_match_data { - pcre2_memctl memctl; - const pcre2_real_code *code; /* The pattern used for the match */ - PCRE2_SPTR subject; /* The subject that was matched */ - PCRE2_SPTR mark; /* Pointer to last mark */ - PCRE2_SIZE leftchar; /* Offset to leftmost code unit */ - PCRE2_SIZE rightchar; /* Offset to rightmost code unit */ - PCRE2_SIZE startchar; /* Offset to starting code unit */ - uint8_t matchedby; /* Type of match (normal, JIT, DFA) */ - uint8_t flags; /* Various flags */ - uint16_t oveccount; /* Number of pairs */ - int rc; /* The return code from the match */ - PCRE2_SIZE ovector[131072]; /* Must be last in the structure */ + pcre2_memctl memctl; /* Memory control fields */ + const pcre2_real_code *code; /* The pattern used for the match */ + PCRE2_SPTR subject; /* The subject that was matched */ + PCRE2_SPTR mark; /* Pointer to last mark */ + struct heapframe *heapframes; /* Backtracking frames heap memory */ + PCRE2_SIZE heapframes_size; /* Malloc-ed size */ + PCRE2_SIZE leftchar; /* Offset to leftmost code unit */ + PCRE2_SIZE rightchar; /* Offset to rightmost code unit */ + PCRE2_SIZE startchar; /* Offset to starting code unit */ + uint8_t matchedby; /* Type of match (normal, JIT, DFA) */ + uint8_t flags; /* Various flags */ + uint16_t oveccount; /* Number of pairs */ + int rc; /* The return code from the match */ + PCRE2_SIZE ovector[131072]; /* Must be last in the structure */ } pcre2_real_match_data; @@ -854,10 +858,6 @@ doing traditional NFA matching (pcre2_match() and friends). */ typedef struct match_block { pcre2_memctl memctl; /* For general use */ - PCRE2_SIZE frame_vector_size; /* Size of a backtracking frame */ - heapframe *match_frames; /* Points to vector of frames */ - heapframe *match_frames_top; /* Points after the end of the vector */ - heapframe *stack_frames; /* The original vector on the stack */ PCRE2_SIZE heap_limit; /* As it says */ uint32_t match_limit; /* As it says */ uint32_t match_limit_depth; /* As it says */ diff --git a/thirdparty/pcre2/src/pcre2_jit_compile.c b/thirdparty/pcre2/src/pcre2_jit_compile.c index d726c3ca04..0afd27c5ee 100644 --- a/thirdparty/pcre2/src/pcre2_jit_compile.c +++ b/thirdparty/pcre2/src/pcre2_jit_compile.c @@ -542,7 +542,7 @@ typedef struct compare_context { #undef CMP /* Used for accessing the elements of the stack. */ -#define STACK(i) ((i) * (int)sizeof(sljit_sw)) +#define STACK(i) ((i) * SSIZE_OF(sw)) #ifdef SLJIT_PREF_SHIFT_REG #if SLJIT_PREF_SHIFT_REG == SLJIT_R2 @@ -590,8 +590,8 @@ to characters. The vector data is divided into two groups: the first group contains the start / end character pointers, and the second is the start pointers when the end of the capturing group has not yet reached. */ #define OVECTOR_START (common->ovector_start) -#define OVECTOR(i) (OVECTOR_START + (i) * (sljit_sw)sizeof(sljit_sw)) -#define OVECTOR_PRIV(i) (common->cbra_ptr + (i) * (sljit_sw)sizeof(sljit_sw)) +#define OVECTOR(i) (OVECTOR_START + (i) * SSIZE_OF(sw)) +#define OVECTOR_PRIV(i) (common->cbra_ptr + (i) * SSIZE_OF(sw)) #define PRIVATE_DATA(cc) (common->private_data_ptrs[(cc) - common->start]) #if PCRE2_CODE_UNIT_WIDTH == 8 @@ -2151,9 +2151,9 @@ while (cc < ccend) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0)); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); setsom_found = TRUE; } cc += 1; @@ -2168,9 +2168,9 @@ while (cc < ccend) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); setmark_found = TRUE; } cc += 1 + 2 + cc[1]; @@ -2181,27 +2181,27 @@ while (cc < ccend) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0)); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); setsom_found = TRUE; } if (common->mark_ptr != 0 && !setmark_found) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); setmark_found = TRUE; } if (common->capture_last_ptr != 0 && !capture_last_found) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); capture_last_found = TRUE; } cc += 1 + LINK_SIZE; @@ -2215,20 +2215,20 @@ while (cc < ccend) { OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); capture_last_found = TRUE; } offset = (GET2(cc, 1 + LINK_SIZE)) << 1; OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, OVECTOR(offset)); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset)); OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1)); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP2, 0); - stackpos -= (int)sizeof(sljit_sw); + stackpos -= SSIZE_OF(sw); cc += 1 + LINK_SIZE + IMM2_SIZE; break; @@ -3144,7 +3144,7 @@ static SLJIT_INLINE void allocate_stack(compiler_common *common, int size) DEFINE_COMPILER; SLJIT_ASSERT(size > 0); -OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw)); +OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * SSIZE_OF(sw)); #ifdef DESTROY_REGISTERS OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 12345); OP1(SLJIT_MOV, TMP3, 0, TMP1, 0); @@ -3160,7 +3160,7 @@ static SLJIT_INLINE void free_stack(compiler_common *common, int size) DEFINE_COMPILER; SLJIT_ASSERT(size > 0); -OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw)); +OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * SSIZE_OF(sw)); } static sljit_uw * allocate_read_only_data(compiler_common *common, sljit_uw size) @@ -3200,12 +3200,12 @@ if (length < 8) } else { - if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS) + if (sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS) { GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START); OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1); loop = LABEL(); - sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)); + sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)); OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1); JUMPTO(SLJIT_NOT_ZERO, loop); } @@ -3261,8 +3261,8 @@ OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, size - uncleared_size); loop = LABEL(); OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), 0, src, 0); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 3 * sizeof(sljit_sw)); -OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -2 * (sljit_sw)sizeof(sljit_sw), src, 0); -OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -1 * (sljit_sw)sizeof(sljit_sw), src, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -2 * SSIZE_OF(sw), src, 0); +OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -1 * SSIZE_OF(sw), src, 0); CMPTO(SLJIT_LESS, TMP1, 0, TMP2, 0, loop); if (uncleared_size >= sizeof(sljit_sw)) @@ -3289,12 +3289,12 @@ if (length < 8) } else { - if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS) + if (sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS) { GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw)); OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2); loop = LABEL(); - sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); + sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)); OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1); JUMPTO(SLJIT_NOT_ZERO, loop); } @@ -3386,7 +3386,7 @@ else OP2(SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, SLJIT_OFFSETOF(pcre2_match_data, ovector) - sizeof(PCRE2_SIZE)); } -has_pre = sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS; +has_pre = sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS; GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START - (has_pre ? sizeof(sljit_sw) : 0)); OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(HAS_VIRTUAL_REGISTERS ? SLJIT_R0 : ARGUMENTS), SLJIT_OFFSETOF(jit_arguments, begin)); @@ -3394,7 +3394,7 @@ OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(HAS_VIRTUAL_REGISTERS ? SLJIT_R0 : ARGUME loop = LABEL(); if (has_pre) - sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)); + sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)); else { OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0); @@ -3417,14 +3417,14 @@ JUMPTO(SLJIT_NOT_ZERO, loop); /* Calculate the return value, which is the maximum ovector value. */ if (topbracket > 1) { - if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))) == SLJIT_SUCCESS) + if (sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * SSIZE_OF(sw))) == SLJIT_SUCCESS) { GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw)); OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1); /* OVECTOR(0) is never equal to SLJIT_S2. */ loop = LABEL(); - sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))); + sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * SSIZE_OF(sw))); OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop); OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0); @@ -3437,7 +3437,7 @@ if (topbracket > 1) /* OVECTOR(0) is never equal to SLJIT_S2. */ loop = LABEL(); OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0); - OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * (sljit_sw)sizeof(sljit_sw)); + OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * SSIZE_OF(sw)); OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1); CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop); OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0); @@ -4652,8 +4652,8 @@ if (common->nltype != NLTYPE_ANY) /* All newlines are ascii, just skip intermediate octets. */ jump[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0); loop = LABEL(); - if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)) == SLJIT_SUCCESS) - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)) == SLJIT_SUCCESS) + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); else { OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0)); @@ -5886,7 +5886,7 @@ static BOOL check_fast_forward_char_pair_simd(compiler_common *common, fast_forw while (j < i) { b_pri = chars[j].last_count; - if (b_pri > 2 && a_pri + b_pri >= max_pri) + if (b_pri > 2 && (sljit_u32)a_pri + (sljit_u32)b_pri >= max_pri) { b1 = chars[j].chars[0]; b2 = chars[j].chars[1]; @@ -6572,21 +6572,21 @@ GET_LOCAL_BASE(TMP1, 0, 0); /* Drop frames until we reach STACK_TOP. */ mainloop = LABEL(); -OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), -sizeof(sljit_sw)); +OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), -SSIZE_OF(sw)); jump = CMP(SLJIT_SIG_LESS_EQUAL, TMP2, 0, SLJIT_IMM, 0); OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0); if (HAS_VIRTUAL_REGISTERS) { - OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw))); - OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(STACK_TOP), -(3 * sizeof(sljit_sw))); - OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw))); + OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(STACK_TOP), -(3 * SSIZE_OF(sw))); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * SSIZE_OF(sw)); } else { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw))); - OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(3 * sizeof(sljit_sw))); - OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw))); + OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(3 * SSIZE_OF(sw))); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * SSIZE_OF(sw)); OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP1, 0); GET_LOCAL_BASE(TMP1, 0, 0); OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP3, 0); @@ -6603,13 +6603,13 @@ OP2(SLJIT_SUB, TMP2, 0, SLJIT_IMM, 0, TMP2, 0); OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0); if (HAS_VIRTUAL_REGISTERS) { - OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw))); - OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw))); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * SSIZE_OF(sw)); } else { - OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw))); - OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * sizeof(sljit_sw)); + OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw))); + OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * SSIZE_OF(sw)); OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP3, 0); } JUMPTO(SLJIT_JUMP, mainloop); @@ -7159,11 +7159,11 @@ if (char1_reg == STR_END) OP1(SLJIT_MOV, RETURN_ADDR, 0, char2_reg, 0); } -if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) +if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) { label = LABEL(); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); JUMPTO(SLJIT_NOT_ZERO, label); @@ -7171,14 +7171,14 @@ if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_ JUMPHERE(jump); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); } -else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) +else if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) { OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); label = LABEL(); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); JUMPTO(SLJIT_NOT_ZERO, label); @@ -7232,9 +7232,9 @@ else lcc_table = TMP3; } -if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) +if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) opt_type = 1; -else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) +else if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) opt_type = 2; sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); @@ -7253,8 +7253,8 @@ OP1(SLJIT_MOV, lcc_table, 0, SLJIT_IMM, common->lcc); if (opt_type == 1) { label = LABEL(); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); } else if (opt_type == 2) { @@ -7262,8 +7262,8 @@ else if (opt_type == 2) OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); label = LABEL(); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); - sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); + sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); } else { @@ -9689,7 +9689,7 @@ BACKTRACK_AS(recurse_backtrack)->matchingpath = LABEL(); return cc + 1 + LINK_SIZE; } -static sljit_s32 SLJIT_FUNC do_callout(struct jit_arguments *arguments, pcre2_callout_block *callout_block, PCRE2_SPTR *jit_ovector) +static sljit_s32 SLJIT_FUNC do_callout_jit(struct jit_arguments *arguments, pcre2_callout_block *callout_block, PCRE2_SPTR *jit_ovector) { PCRE2_SPTR begin; PCRE2_SIZE *ovector; @@ -9756,7 +9756,7 @@ unsigned int callout_length = (*cc == OP_CALLOUT) sljit_sw value1; sljit_sw value2; sljit_sw value3; -sljit_uw callout_arg_size = (common->re->top_bracket + 1) * 2 * sizeof(sljit_sw); +sljit_uw callout_arg_size = (common->re->top_bracket + 1) * 2 * SSIZE_OF(sw); PUSH_BACKTRACK(sizeof(backtrack_common), cc, NULL); @@ -9806,7 +9806,7 @@ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STR_PTR, 0); /* SLJIT_R0 = arguments */ OP1(SLJIT_MOV, SLJIT_R1, 0, STACK_TOP, 0); GET_LOCAL_BASE(SLJIT_R2, 0, OVECTOR_START); -sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS3(32, W, W, W), SLJIT_IMM, SLJIT_FUNC_ADDR(do_callout)); +sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS3(32, W, W, W), SLJIT_IMM, SLJIT_FUNC_ADDR(do_callout_jit)); OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); free_stack(common, callout_arg_size); @@ -11451,7 +11451,7 @@ struct sljit_label *label; int private_data_ptr = PRIVATE_DATA(cc); int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_SP); int offset0 = (private_data_ptr == 0) ? STACK(0) : private_data_ptr; -int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + (int)sizeof(sljit_sw); +int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + SSIZE_OF(sw); int tmp_base, tmp_offset; #if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32 BOOL use_tmp; @@ -11517,19 +11517,19 @@ if (exact > 1) } } else if (exact == 1) - { compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, TRUE); - if (early_fail_type == type_fail_range) - { - OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr); - OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + (int)sizeof(sljit_sw)); - OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, TMP2, 0); - OP2(SLJIT_SUB, TMP2, 0, STR_PTR, 0, TMP2, 0); - add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_LESS_EQUAL, TMP2, 0, TMP1, 0)); +if (early_fail_type == type_fail_range) + { + /* Range end first, followed by range start. */ + OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr); + OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + SSIZE_OF(sw)); + OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, TMP2, 0); + OP2(SLJIT_SUB, TMP2, 0, STR_PTR, 0, TMP2, 0); + add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_LESS_EQUAL, TMP2, 0, TMP1, 0)); - OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + (int)sizeof(sljit_sw), STR_PTR, 0); - } + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), early_fail_ptr, STR_PTR, 0); + OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + SSIZE_OF(sw), STR_PTR, 0); } switch(opcode) @@ -12428,7 +12428,7 @@ PCRE2_SPTR end; int private_data_ptr = PRIVATE_DATA(cc); int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_SP); int offset0 = (private_data_ptr == 0) ? STACK(0) : private_data_ptr; -int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + (int)sizeof(sljit_sw); +int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + SSIZE_OF(sw); cc = get_iterator_parameters(common, cc, &opcode, &type, &max, &exact, &end); @@ -14148,7 +14148,7 @@ quit_label = common->quit_label; if (common->currententry != NULL) { /* A free bit for each private data. */ - common->recurse_bitset_size = ((private_data_size / (int)sizeof(sljit_sw)) + 7) >> 3; + common->recurse_bitset_size = ((private_data_size / SSIZE_OF(sw)) + 7) >> 3; SLJIT_ASSERT(common->recurse_bitset_size > 0); common->recurse_bitset = (sljit_u8*)SLJIT_MALLOC(common->recurse_bitset_size, allocator_data);; @@ -14384,7 +14384,7 @@ pcre2_jit_compile(pcre2_code *code, uint32_t options) pcre2_real_code *re = (pcre2_real_code *)code; #ifdef SUPPORT_JIT executable_functions *functions; -static int executable_allocator_is_working = 0; +static int executable_allocator_is_working = -1; #endif if (code == NULL) @@ -14447,23 +14447,21 @@ return PCRE2_ERROR_JIT_BADOPTION; if ((re->flags & PCRE2_NOJIT) != 0) return 0; -if (executable_allocator_is_working == 0) +if (executable_allocator_is_working == -1) { /* Checks whether the executable allocator is working. This check might run multiple times in multi-threaded environments, but the result should not be affected by it. */ void *ptr = SLJIT_MALLOC_EXEC(32, NULL); - - executable_allocator_is_working = -1; - if (ptr != NULL) { SLJIT_FREE_EXEC(((sljit_u8*)(ptr)) + SLJIT_EXEC_OFFSET(ptr), NULL); executable_allocator_is_working = 1; } + else executable_allocator_is_working = 0; } -if (executable_allocator_is_working < 0) +if (!executable_allocator_is_working) return PCRE2_ERROR_NOMEMORY; if ((re->overall_options & PCRE2_MATCH_INVALID_UTF) != 0) diff --git a/thirdparty/pcre2/src/pcre2_jit_misc.c b/thirdparty/pcre2/src/pcre2_jit_misc.c index e57afad065..bb6a5589cb 100644 --- a/thirdparty/pcre2/src/pcre2_jit_misc.c +++ b/thirdparty/pcre2/src/pcre2_jit_misc.c @@ -110,8 +110,10 @@ pcre2_jit_free_unused_memory(pcre2_general_context *gcontext) (void)gcontext; /* Suppress warning */ #else /* SUPPORT_JIT */ SLJIT_UNUSED_ARG(gcontext); +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) sljit_free_unused_memory_exec(); -#endif /* SUPPORT_JIT */ +#endif /* SLJIT_EXECUTABLE_ALLOCATOR */ +#endif /* SUPPORT_JIT */ } diff --git a/thirdparty/pcre2/src/pcre2_jit_neon_inc.h b/thirdparty/pcre2/src/pcre2_jit_neon_inc.h index 150da29eba..165602edc0 100644 --- a/thirdparty/pcre2/src/pcre2_jit_neon_inc.h +++ b/thirdparty/pcre2/src/pcre2_jit_neon_inc.h @@ -183,6 +183,8 @@ restart:; #endif #if defined(FFCPS) +if (str_ptr >= str_end) + return NULL; sljit_u8 *p1 = str_ptr - diff; #endif sljit_s32 align_offset = ((uint64_t)str_ptr & 0xf); @@ -327,7 +329,7 @@ match:; return NULL; #if defined(FF_UTF) - if (utf_continue(str_ptr + IN_UCHARS(-offs1))) + if (utf_continue((PCRE2_SPTR)str_ptr - offs1)) { /* Not a match. */ str_ptr += IN_UCHARS(1); diff --git a/thirdparty/pcre2/src/pcre2_jit_simd_inc.h b/thirdparty/pcre2/src/pcre2_jit_simd_inc.h index d99cfc5ce4..1a5ce4ed09 100644 --- a/thirdparty/pcre2/src/pcre2_jit_simd_inc.h +++ b/thirdparty/pcre2/src/pcre2_jit_simd_inc.h @@ -776,7 +776,7 @@ typedef union { } int_char; #if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32 -static SLJIT_INLINE int utf_continue(sljit_u8 *s) +static SLJIT_INLINE int utf_continue(PCRE2_SPTR s) { #if PCRE2_CODE_UNIT_WIDTH == 8 return (*s & 0xc0) == 0x80; diff --git a/thirdparty/pcre2/src/pcre2_match.c b/thirdparty/pcre2/src/pcre2_match.c index 6354e1bb9e..168b9fad01 100644 --- a/thirdparty/pcre2/src/pcre2_match.c +++ b/thirdparty/pcre2/src/pcre2_match.c @@ -204,6 +204,7 @@ Arguments: P a previous frame of interest frame_size the frame size mb points to the match block + match_data points to the match data block s identification text Returns: nothing @@ -211,7 +212,7 @@ Returns: nothing static void display_frames(FILE *f, heapframe *F, heapframe *P, PCRE2_SIZE frame_size, - match_block *mb, const char *s, ...) + match_block *mb, pcre2_match_data *match_data, const char *s, ...) { uint32_t i; heapframe *Q; @@ -223,10 +224,10 @@ vfprintf(f, s, ap); va_end(ap); if (P != NULL) fprintf(f, " P=%lu", - ((char *)P - (char *)(mb->match_frames))/frame_size); + ((char *)P - (char *)(match_data->heapframes))/frame_size); fprintf(f, "\n"); -for (i = 0, Q = mb->match_frames; +for (i = 0, Q = match_data->heapframes; Q <= F; i++, Q = (heapframe *)((char *)Q + frame_size)) { @@ -490,10 +491,16 @@ A version did exist that used individual frames on the heap instead of calling match() recursively, but this ran substantially slower. The current version is a refactoring that uses a vector of frames to remember backtracking points. This runs no slower, and possibly even a bit faster than the original recursive -implementation. An initial vector of size START_FRAMES_SIZE (enough for maybe -50 frames) is allocated on the system stack. If this is not big enough, the -heap is used for a larger vector. - +implementation. + +At first, an initial vector of size START_FRAMES_SIZE (enough for maybe 50 +frames) was allocated on the system stack. If this was not big enough, the heap +was used for a larger vector. However, it turns out that there are environments +where taking as little as 20KiB from the system stack is an embarrassment. +After another refactoring, the heap is used exclusively, but a pointer the +frames vector and its size are cached in the match_data block, so that there is +no new memory allocation if the same match_data block is used for multiple +matches (unless the frames vector has to be extended). ******************************************************************************* ******************************************************************************/ @@ -566,10 +573,9 @@ made performance worse. Arguments: start_eptr starting character in subject start_ecode starting position in compiled code - ovector pointer to the final output vector - oveccount number of pairs in ovector top_bracket number of capturing parentheses in the pattern frame_size size of each backtracking frame + match_data pointer to the match_data block mb pointer to "static" variables block Returns: MATCH_MATCH if matched ) these values are >= 0 @@ -580,17 +586,19 @@ Returns: MATCH_MATCH if matched ) these values are >= 0 */ static int -match(PCRE2_SPTR start_eptr, PCRE2_SPTR start_ecode, PCRE2_SIZE *ovector, - uint16_t oveccount, uint16_t top_bracket, PCRE2_SIZE frame_size, - match_block *mb) +match(PCRE2_SPTR start_eptr, PCRE2_SPTR start_ecode, uint16_t top_bracket, + PCRE2_SIZE frame_size, pcre2_match_data *match_data, match_block *mb) { /* Frame-handling variables */ heapframe *F; /* Current frame pointer */ heapframe *N = NULL; /* Temporary frame pointers */ heapframe *P = NULL; + +heapframe *frames_top; /* End of frames vector */ heapframe *assert_accept_frame = NULL; /* For passing back a frame with captures */ -PCRE2_SIZE frame_copy_size; /* Amount to copy when creating a new frame */ +PCRE2_SIZE heapframes_size; /* Usable size of frames vector */ +PCRE2_SIZE frame_copy_size; /* Amount to copy when creating a new frame */ /* Local variables that do not need to be preserved over calls to RRMATCH(). */ @@ -627,10 +635,14 @@ copied when a new frame is created. */ frame_copy_size = frame_size - offsetof(heapframe, eptr); -/* Set up the first current frame at the start of the vector, and initialize -fields that are not reset for new frames. */ +/* Set up the first frame and the end of the frames vector. We set the local +heapframes_size to the usuable amount of the vector, that is, a whole number of +frames. */ + +F = match_data->heapframes; +heapframes_size = (match_data->heapframes_size / frame_size) * frame_size; +frames_top = (heapframe *)((char *)F + heapframes_size); -F = mb->match_frames; Frdepth = 0; /* "Recursion" depth */ Fcapture_last = 0; /* Number of most recent capture */ Fcurrent_recurse = RECURSE_UNSET; /* Not pattern recursing. */ @@ -646,34 +658,35 @@ backtracking point. */ MATCH_RECURSE: -/* Set up a new backtracking frame. If the vector is full, get a new one -on the heap, doubling the size, but constrained by the heap limit. */ +/* Set up a new backtracking frame. If the vector is full, get a new one, +doubling the size, but constrained by the heap limit (which is in KiB). */ N = (heapframe *)((char *)F + frame_size); -if (N >= mb->match_frames_top) +if (N >= frames_top) { - PCRE2_SIZE newsize = mb->frame_vector_size * 2; heapframe *new; + PCRE2_SIZE newsize = match_data->heapframes_size * 2; - if ((newsize / 1024) > mb->heap_limit) + if (newsize > mb->heap_limit) { - PCRE2_SIZE maxsize = ((mb->heap_limit * 1024)/frame_size) * frame_size; - if (mb->frame_vector_size >= maxsize) return PCRE2_ERROR_HEAPLIMIT; + PCRE2_SIZE maxsize = (mb->heap_limit/frame_size) * frame_size; + if (match_data->heapframes_size >= maxsize) return PCRE2_ERROR_HEAPLIMIT; newsize = maxsize; } - new = mb->memctl.malloc(newsize, mb->memctl.memory_data); + new = match_data->memctl.malloc(newsize, match_data->memctl.memory_data); if (new == NULL) return PCRE2_ERROR_NOMEMORY; - memcpy(new, mb->match_frames, mb->frame_vector_size); + memcpy(new, match_data->heapframes, heapframes_size); - F = (heapframe *)((char *)new + ((char *)F - (char *)mb->match_frames)); + F = (heapframe *)((char *)new + ((char *)F - (char *)match_data->heapframes)); N = (heapframe *)((char *)F + frame_size); - if (mb->match_frames != mb->stack_frames) - mb->memctl.free(mb->match_frames, mb->memctl.memory_data); - mb->match_frames = new; - mb->match_frames_top = (heapframe *)((char *)mb->match_frames + newsize); - mb->frame_vector_size = newsize; + match_data->memctl.free(match_data->heapframes, match_data->memctl.memory_data); + match_data->heapframes = new; + match_data->heapframes_size = newsize; + + heapframes_size = (newsize / frame_size) * frame_size; + frames_top = (heapframe *)((char *)new + heapframes_size); } #ifdef DEBUG_SHOW_RMATCH @@ -731,7 +744,7 @@ recursion value. */ if (group_frame_type != 0) { - Flast_group_offset = (char *)F - (char *)mb->match_frames; + Flast_group_offset = (char *)F - (char *)match_data->heapframes; if (GF_IDMASK(group_frame_type) == GF_RECURSE) Fcurrent_recurse = GF_DATAMASK(group_frame_type); group_frame_type = 0; @@ -773,7 +786,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode); for(;;) { if (offset == PCRE2_UNSET) return PCRE2_ERROR_INTERNAL; - N = (heapframe *)((char *)mb->match_frames + offset); + N = (heapframe *)((char *)match_data->heapframes + offset); P = (heapframe *)((char *)N - frame_size); if (N->group_frame_type == (GF_CAPTURE | number)) break; offset = P->last_group_offset; @@ -811,7 +824,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode); for(;;) { if (offset == PCRE2_UNSET) return PCRE2_ERROR_INTERNAL; - N = (heapframe *)((char *)mb->match_frames + offset); + N = (heapframe *)((char *)match_data->heapframes + offset); P = (heapframe *)((char *)N - frame_size); if (GF_IDMASK(N->group_frame_type) == GF_RECURSE) break; offset = P->last_group_offset; @@ -864,14 +877,15 @@ fprintf(stderr, "++ op=%d\n", *Fecode); mb->mark = Fmark; /* and the last success mark */ if (Feptr > mb->last_used_ptr) mb->last_used_ptr = Feptr; - ovector[0] = Fstart_match - mb->start_subject; - ovector[1] = Feptr - mb->start_subject; + match_data->ovector[0] = Fstart_match - mb->start_subject; + match_data->ovector[1] = Feptr - mb->start_subject; /* Set i to the smaller of the sizes of the external and frame ovectors. */ - i = 2 * ((top_bracket + 1 > oveccount)? oveccount : top_bracket + 1); - memcpy(ovector + 2, Fovector, (i - 2) * sizeof(PCRE2_SIZE)); - while (--i >= Foffset_top + 2) ovector[i] = PCRE2_UNSET; + i = 2 * ((top_bracket + 1 > match_data->oveccount)? + match_data->oveccount : top_bracket + 1); + memcpy(match_data->ovector + 2, Fovector, (i - 2) * sizeof(PCRE2_SIZE)); + while (--i >= Foffset_top + 2) match_data->ovector[i] = PCRE2_UNSET; return MATCH_MATCH; /* Note: NOT RRETURN */ @@ -5328,7 +5342,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode); offset = Flast_group_offset; while (offset != PCRE2_UNSET) { - N = (heapframe *)((char *)mb->match_frames + offset); + N = (heapframe *)((char *)match_data->heapframes + offset); P = (heapframe *)((char *)N - frame_size); if (N->group_frame_type == (GF_RECURSE | number)) { @@ -5729,7 +5743,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode); if (*bracode != OP_BRA && *bracode != OP_COND) { - N = (heapframe *)((char *)mb->match_frames + Flast_group_offset); + N = (heapframe *)((char *)match_data->heapframes + Flast_group_offset); P = (heapframe *)((char *)N - frame_size); Flast_group_offset = P->last_group_offset; @@ -6346,6 +6360,7 @@ BOOL jit_checked_utf = FALSE; #endif /* SUPPORT_UNICODE */ PCRE2_SIZE frame_size; +PCRE2_SIZE heapframes_size; /* We need to have mb as a pointer to a match block, because the IS_NEWLINE macro is used below, and it expects NLBLOCK to be defined as a pointer. */ @@ -6354,15 +6369,6 @@ pcre2_callout_block cb; match_block actual_match_block; match_block *mb = &actual_match_block; -/* Allocate an initial vector of backtracking frames on the stack. If this -proves to be too small, it is replaced by a larger one on the heap. To get a -vector of the size required that is aligned for pointers, allocate it as a -vector of pointers. */ - -PCRE2_SPTR stack_frames_vector[START_FRAMES_SIZE/sizeof(PCRE2_SPTR)] - PCRE2_KEEP_UNINITIALIZED; -mb->stack_frames = (heapframe *)stack_frames_vector; - /* Recognize NULL, length 0 as an empty string. */ if (subject == NULL && length == 0) subject = (PCRE2_SPTR)""; @@ -6793,15 +6799,11 @@ switch(re->newline_convention) vector at the end, whose size depends on the number of capturing parentheses in the pattern. It is not used at all if there are no capturing parentheses. - frame_size is the total size of each frame - mb->frame_vector_size is the total usable size of the vector (rounded down - to a whole number of frames) - -The last of these is changed within the match() function if the frame vector -has to be expanded. We therefore put it into the match block so that it is -correct when calling match() more than once for non-anchored patterns. + frame_size is the total size of each frame + match_data->heapframes is the pointer to the frames vector + match_data->heapframes_size is the total size of the vector -We must also pad frame_size for alignment to ensure subsequent frames are as +We must pad the frame_size for alignment to ensure subsequent frames are as aligned as heapframe. Whilst ovector is word-aligned due to being a PCRE2_SIZE array, that does not guarantee it is suitably aligned for pointers, as some architectures have pointers that are larger than a size_t. */ @@ -6813,8 +6815,8 @@ frame_size = (offsetof(heapframe, ovector) + /* Limits set in the pattern override the match context only if they are smaller. */ -mb->heap_limit = (mcontext->heap_limit < re->limit_heap)? - mcontext->heap_limit : re->limit_heap; +mb->heap_limit = ((mcontext->heap_limit < re->limit_heap)? + mcontext->heap_limit : re->limit_heap) * 1024; mb->match_limit = (mcontext->match_limit < re->limit_match)? mcontext->match_limit : re->limit_match; @@ -6823,35 +6825,40 @@ mb->match_limit_depth = (mcontext->depth_limit < re->limit_depth)? mcontext->depth_limit : re->limit_depth; /* If a pattern has very many capturing parentheses, the frame size may be very -large. Ensure that there are at least 10 available frames by getting an initial -vector on the heap if necessary, except when the heap limit prevents this. Get -fewer if possible. (The heap limit is in kibibytes.) */ - -if (frame_size <= START_FRAMES_SIZE/10) +large. Set the initial frame vector size to ensure that there are at least 10 +available frames, but enforce a minimum of START_FRAMES_SIZE. If this is +greater than the heap limit, get as large a vector as possible. Always round +the size to a multiple of the frame size. */ + +heapframes_size = frame_size * 10; +if (heapframes_size < START_FRAMES_SIZE) heapframes_size = START_FRAMES_SIZE; +if (heapframes_size > mb->heap_limit) { - mb->match_frames = mb->stack_frames; /* Initial frame vector on the stack */ - mb->frame_vector_size = ((START_FRAMES_SIZE/frame_size) * frame_size); + if (frame_size > mb->heap_limit ) return PCRE2_ERROR_HEAPLIMIT; + heapframes_size = mb->heap_limit; } -else + +/* If an existing frame vector in the match_data block is large enough, we can +use it.Otherwise, free any pre-existing vector and get a new one. */ + +if (match_data->heapframes_size < heapframes_size) { - mb->frame_vector_size = frame_size * 10; - if ((mb->frame_vector_size / 1024) > mb->heap_limit) + match_data->memctl.free(match_data->heapframes, + match_data->memctl.memory_data); + match_data->heapframes = match_data->memctl.malloc(heapframes_size, + match_data->memctl.memory_data); + if (match_data->heapframes == NULL) { - if (frame_size > mb->heap_limit * 1024) return PCRE2_ERROR_HEAPLIMIT; - mb->frame_vector_size = ((mb->heap_limit * 1024)/frame_size) * frame_size; + match_data->heapframes_size = 0; + return PCRE2_ERROR_NOMEMORY; } - mb->match_frames = mb->memctl.malloc(mb->frame_vector_size, - mb->memctl.memory_data); - if (mb->match_frames == NULL) return PCRE2_ERROR_NOMEMORY; + match_data->heapframes_size = heapframes_size; } -mb->match_frames_top = - (heapframe *)((char *)mb->match_frames + mb->frame_vector_size); - /* Write to the ovector within the first frame to mark every capture unset and to avoid uninitialized memory read errors when it is copied to a new frame. */ -memset((char *)(mb->match_frames) + offsetof(heapframe, ovector), 0xff, +memset((char *)(match_data->heapframes) + offsetof(heapframe, ovector), 0xff, frame_size - offsetof(heapframe, ovector)); /* Pointers to the individual character tables */ @@ -7279,8 +7286,8 @@ for(;;) mb->end_offset_top = 0; mb->skip_arg_count = 0; - rc = match(start_match, mb->start_code, match_data->ovector, - match_data->oveccount, re->top_bracket, frame_size, mb); + rc = match(start_match, mb->start_code, re->top_bracket, frame_size, + match_data, mb); if (mb->hitend && start_partial == NULL) { @@ -7463,11 +7470,6 @@ if (utf && end_subject != true_end_subject && } #endif /* SUPPORT_UNICODE */ -/* Release an enlarged frame vector that is on the heap. */ - -if (mb->match_frames != mb->stack_frames) - mb->memctl.free(mb->match_frames, mb->memctl.memory_data); - /* Fill in fields that are always returned in the match data. */ match_data->code = re; @@ -7533,4 +7535,10 @@ else match_data->rc = PCRE2_ERROR_NOMATCH; return match_data->rc; } +/* These #undefs are here to enable unity builds with CMake. */ + +#undef NLBLOCK /* Block containing newline information */ +#undef PSSTART /* Field containing processed string start */ +#undef PSEND /* Field containing processed string end */ + /* End of pcre2_match.c */ diff --git a/thirdparty/pcre2/src/pcre2_match_data.c b/thirdparty/pcre2/src/pcre2_match_data.c index 53e4698707..fa129b8bc5 100644 --- a/thirdparty/pcre2/src/pcre2_match_data.c +++ b/thirdparty/pcre2/src/pcre2_match_data.c @@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel Original API code Copyright (c) 1997-2012 University of Cambridge - New API code Copyright (c) 2016-2019 University of Cambridge + New API code Copyright (c) 2016-2022 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -51,19 +51,23 @@ POSSIBILITY OF SUCH DAMAGE. * Create a match data block given ovector size * *************************************************/ -/* A minimum of 1 is imposed on the number of ovector pairs. */ +/* A minimum of 1 is imposed on the number of ovector pairs. A maximum is also +imposed because the oveccount field in a match data block is uintt6_t. */ PCRE2_EXP_DEFN pcre2_match_data * PCRE2_CALL_CONVENTION pcre2_match_data_create(uint32_t oveccount, pcre2_general_context *gcontext) { pcre2_match_data *yield; if (oveccount < 1) oveccount = 1; +if (oveccount > UINT16_MAX) oveccount = UINT16_MAX; yield = PRIV(memctl_malloc)( offsetof(pcre2_match_data, ovector) + 2*oveccount*sizeof(PCRE2_SIZE), (pcre2_memctl *)gcontext); if (yield == NULL) return NULL; yield->oveccount = oveccount; yield->flags = 0; +yield->heapframes = NULL; +yield->heapframes_size = 0; return yield; } @@ -95,6 +99,9 @@ pcre2_match_data_free(pcre2_match_data *match_data) { if (match_data != NULL) { + if (match_data->heapframes != NULL) + match_data->memctl.free(match_data->heapframes, + match_data->memctl.memory_data); if ((match_data->flags & PCRE2_MD_COPIED_SUBJECT) != 0) match_data->memctl.free((void *)match_data->subject, match_data->memctl.memory_data); diff --git a/thirdparty/pcre2/src/pcre2_substitute.c b/thirdparty/pcre2/src/pcre2_substitute.c index 8b2c369ccc..edbb78c6d7 100644 --- a/thirdparty/pcre2/src/pcre2_substitute.c +++ b/thirdparty/pcre2/src/pcre2_substitute.c @@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel Original API code Copyright (c) 1997-2012 University of Cambridge - New API code Copyright (c) 2016-2021 University of Cambridge + New API code Copyright (c) 2016-2022 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without @@ -259,16 +259,16 @@ PCRE2_UNSET, so as not to imply an offset in the replacement. */ if ((options & (PCRE2_PARTIAL_HARD|PCRE2_PARTIAL_SOFT)) != 0) return PCRE2_ERROR_BADOPTION; - -/* Validate length and find the end of the replacement. A NULL replacement of + +/* Validate length and find the end of the replacement. A NULL replacement of zero length is interpreted as an empty string. */ -if (replacement == NULL) +if (replacement == NULL) { if (rlength != 0) return PCRE2_ERROR_NULL; - replacement = (PCRE2_SPTR)""; - } - + replacement = (PCRE2_SPTR)""; + } + if (rlength == PCRE2_ZERO_TERMINATED) rlength = PRIV(strlen)(replacement); repend = replacement + rlength; @@ -282,8 +282,9 @@ replacement_only = ((options & PCRE2_SUBSTITUTE_REPLACEMENT_ONLY) != 0); match data block. We create an internal match_data block in two cases: (a) an external one is not supplied (and we are not starting from an existing match); (b) an existing match is to be used for the first substitution. In the latter -case, we copy the existing match into the internal block. This ensures that no -changes are made to the existing match data block. */ +case, we copy the existing match into the internal block, except for any cached +heap frame size and pointer. This ensures that no changes are made to the +external match data block. */ if (match_data == NULL) { @@ -309,6 +310,8 @@ else if (use_existing_match) if (internal_match_data == NULL) return PCRE2_ERROR_NOMEMORY; memcpy(internal_match_data, match_data, offsetof(pcre2_match_data, ovector) + 2*pairs*sizeof(PCRE2_SIZE)); + internal_match_data->heapframes = NULL; + internal_match_data->heapframes_size = 0; match_data = internal_match_data; } @@ -328,9 +331,9 @@ scb.ovector = ovector; if (subject == NULL) { - if (length != 0) return PCRE2_ERROR_NULL; + if (length != 0) return PCRE2_ERROR_NULL; subject = (PCRE2_SPTR)""; - } + } /* Find length of zero-terminated subject */ diff --git a/thirdparty/pcre2/src/sljit/sljitConfig.h b/thirdparty/pcre2/src/sljit/sljitConfig.h index 1c821d287d..5fba7aa638 100644 --- a/thirdparty/pcre2/src/sljit/sljitConfig.h +++ b/thirdparty/pcre2/src/sljit/sljitConfig.h @@ -53,7 +53,8 @@ extern "C" { /* #define SLJIT_CONFIG_PPC_64 1 */ /* #define SLJIT_CONFIG_MIPS_32 1 */ /* #define SLJIT_CONFIG_MIPS_64 1 */ -/* #define SLJIT_CONFIG_SPARC_32 1 */ +/* #define SLJIT_CONFIG_RISCV_32 1 */ +/* #define SLJIT_CONFIG_RISCV_64 1 */ /* #define SLJIT_CONFIG_S390X 1 */ /* #define SLJIT_CONFIG_AUTO 1 */ @@ -127,17 +128,6 @@ extern "C" { #endif /* !SLJIT_EXECUTABLE_ALLOCATOR */ -/* Force cdecl calling convention even if a better calling - convention (e.g. fastcall) is supported by the C compiler. - If this option is disabled (this is the default), functions - called from JIT should be defined with SLJIT_FUNC attribute. - Standard C functions can still be called by using the - SLJIT_CALL_CDECL jump type. */ -#ifndef SLJIT_USE_CDECL_CALLING_CONVENTION -/* Disabled by default */ -#define SLJIT_USE_CDECL_CALLING_CONVENTION 0 -#endif - /* Return with error when an invalid argument is passed. */ #ifndef SLJIT_ARGUMENT_CHECKS /* Disabled by default */ diff --git a/thirdparty/pcre2/src/sljit/sljitConfigInternal.h b/thirdparty/pcre2/src/sljit/sljitConfigInternal.h index 55e4e39f13..cd3ce69734 100644 --- a/thirdparty/pcre2/src/sljit/sljitConfigInternal.h +++ b/thirdparty/pcre2/src/sljit/sljitConfigInternal.h @@ -59,7 +59,8 @@ extern "C" { SLJIT_64BIT_ARCHITECTURE : 64 bit architecture SLJIT_LITTLE_ENDIAN : little endian architecture SLJIT_BIG_ENDIAN : big endian architecture - SLJIT_UNALIGNED : allows unaligned memory accesses for non-fpu operations (only!) + SLJIT_UNALIGNED : unaligned memory accesses for non-fpu operations are supported + SLJIT_FPU_UNALIGNED : unaligned memory accesses for fpu operations are supported SLJIT_INDIRECT_CALL : see SLJIT_FUNC_ADDR() for more information Constants: @@ -98,7 +99,8 @@ extern "C" { + (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ + (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ + (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ - + (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ + + (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) \ + + (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \ + (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \ + (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \ + (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) >= 2 @@ -115,7 +117,8 @@ extern "C" { && !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ && !(defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ && !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ - && !(defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ + && !(defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) \ + && !(defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \ && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \ && !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) \ && !(defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) @@ -156,8 +159,10 @@ extern "C" { #define SLJIT_CONFIG_MIPS_32 1 #elif defined(__mips64) #define SLJIT_CONFIG_MIPS_64 1 -#elif (defined(__sparc__) || defined(__sparc)) && !defined(_LP64) -#define SLJIT_CONFIG_SPARC_32 1 +#elif defined (__riscv_xlen) && (__riscv_xlen == 32) +#define SLJIT_CONFIG_RISCV_32 1 +#elif defined (__riscv_xlen) && (__riscv_xlen == 64) +#define SLJIT_CONFIG_RISCV_64 1 #elif defined(__s390x__) #define SLJIT_CONFIG_S390X 1 #else @@ -205,8 +210,8 @@ extern "C" { #define SLJIT_CONFIG_PPC 1 #elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) #define SLJIT_CONFIG_MIPS 1 -#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) || (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64) -#define SLJIT_CONFIG_SPARC 1 +#elif (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) || (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) +#define SLJIT_CONFIG_RISCV 1 #endif /***********************************************************/ @@ -330,8 +335,14 @@ extern "C" { * older versions are known to abort in some targets * https://github.com/PhilipHazel/pcre2/issues/92 * - * beware APPLE is known to have removed the code in iOS so - * it will need to be excempted or result in broken builds + * beware some vendors (ex: Microsoft, Apple) are known to have + * removed the code to support this builtin even if the call for + * __has_builtin reports it is available. + * + * make sure linking doesn't fail because __clear_cache() is + * missing before changing it or add an exception so that the + * system provided method that should be defined below is used + * instead. */ #if (!defined SLJIT_CACHE_FLUSH && defined __has_builtin) #if __has_builtin(__builtin___clear_cache) && !defined(__clang__) @@ -339,9 +350,9 @@ extern "C" { /* * https://gcc.gnu.org/bugzilla//show_bug.cgi?id=91248 * https://gcc.gnu.org/bugzilla//show_bug.cgi?id=93811 - * gcc's clear_cache builtin for power and sparc are broken + * gcc's clear_cache builtin for power is broken */ -#if !defined(SLJIT_CONFIG_PPC) && !defined(SLJIT_CONFIG_SPARC_32) +#if !defined(SLJIT_CONFIG_PPC) #define SLJIT_CACHE_FLUSH(from, to) \ __builtin___clear_cache((char*)(from), (char*)(to)) #endif @@ -373,12 +384,10 @@ extern "C" { ppc_cache_flush((from), (to)) #define SLJIT_CACHE_FLUSH_OWN_IMPL 1 -#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#elif defined(_WIN32) -/* The __clear_cache() implementation of GCC is a dummy function on Sparc. */ #define SLJIT_CACHE_FLUSH(from, to) \ - sparc_cache_flush((from), (to)) -#define SLJIT_CACHE_FLUSH_OWN_IMPL 1 + FlushInstructionCache(GetCurrentProcess(), (void*)(from), (char*)(to) - (char*)(from)) #elif (defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) || defined(__clang__) @@ -392,11 +401,6 @@ extern "C" { #define SLJIT_CACHE_FLUSH(from, to) \ cacheflush((long)(from), (long)(to), 0) -#elif defined _WIN32 - -#define SLJIT_CACHE_FLUSH(from, to) \ - FlushInstructionCache(GetCurrentProcess(), (void*)(from), (char*)(to) - (char*)(from)) - #else /* Call __ARM_NR_cacheflush on ARM-Linux or the corresponding MIPS syscall. */ @@ -435,6 +439,7 @@ typedef long int sljit_sw; && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ && !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ && !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ + && !(defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \ && !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) #define SLJIT_32BIT_ARCHITECTURE 1 #define SLJIT_WORD_SHIFT 2 @@ -495,8 +500,7 @@ typedef double sljit_f64; #if !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) /* These macros are mostly useful for the applications. */ -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ - || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) #ifdef __LITTLE_ENDIAN__ #define SLJIT_LITTLE_ENDIAN 1 @@ -504,8 +508,7 @@ typedef double sljit_f64; #define SLJIT_BIG_ENDIAN 1 #endif -#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ - || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) +#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) #ifdef __MIPSEL__ #define SLJIT_LITTLE_ENDIAN 1 @@ -532,8 +535,7 @@ typedef double sljit_f64; #endif /* !SLJIT_MIPS_REV */ -#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ - || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) +#elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) #define SLJIT_BIG_ENDIAN 1 @@ -554,19 +556,30 @@ typedef double sljit_f64; #ifndef SLJIT_UNALIGNED -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ - || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \ || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ - || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ - || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ + || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \ + || (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \ || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) #define SLJIT_UNALIGNED 1 #endif #endif /* !SLJIT_UNALIGNED */ +#ifndef SLJIT_FPU_UNALIGNED + +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \ + || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ + || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \ + || (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \ + || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) +#define SLJIT_FPU_UNALIGNED 1 +#endif + +#endif /* !SLJIT_FPU_UNALIGNED */ + #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) /* Auto detect SSE2 support using CPUID. On 64 bit x86 cpus, sse2 must be present. */ @@ -578,38 +591,7 @@ typedef double sljit_f64; /*****************************************************************************************/ #ifndef SLJIT_FUNC - -#if (defined SLJIT_USE_CDECL_CALLING_CONVENTION && SLJIT_USE_CDECL_CALLING_CONVENTION) \ - || !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - #define SLJIT_FUNC - -#elif defined(__GNUC__) && !defined(__APPLE__) - -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) -#define SLJIT_FUNC __attribute__ ((fastcall)) -#define SLJIT_X86_32_FASTCALL 1 -#else -#define SLJIT_FUNC -#endif /* gcc >= 3.4 */ - -#elif defined(_MSC_VER) - -#define SLJIT_FUNC __fastcall -#define SLJIT_X86_32_FASTCALL 1 - -#elif defined(__BORLANDC__) - -#define SLJIT_FUNC __msfastcall -#define SLJIT_X86_32_FASTCALL 1 - -#else /* Unknown compiler. */ - -/* The cdecl calling convention is usually the x86 default. */ -#define SLJIT_FUNC - -#endif /* SLJIT_USE_CDECL_CALLING_CONVENTION */ - #endif /* !SLJIT_FUNC */ #ifndef SLJIT_INDIRECT_CALL @@ -621,14 +603,10 @@ typedef double sljit_f64; #endif #endif /* SLJIT_INDIRECT_CALL */ -/* The offset which needs to be substracted from the return address to +/* The offset which needs to be subtracted from the return address to determine the next executed instruction after return. */ #ifndef SLJIT_RETURN_ADDRESS_OFFSET -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -#define SLJIT_RETURN_ADDRESS_OFFSET 8 -#else #define SLJIT_RETURN_ADDRESS_OFFSET 0 -#endif #endif /* SLJIT_RETURN_ADDRESS_OFFSET */ /***************************************************/ @@ -666,10 +644,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) #define SLJIT_NUMBER_OF_REGISTERS 12 -#define SLJIT_NUMBER_OF_SAVED_REGISTERS 9 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 7 #define SLJIT_NUMBER_OF_FLOAT_REGISTERS 7 #define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0 -#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset) +#define SLJIT_LOCALS_OFFSET_BASE (8 * SSIZE_OF(sw)) #define SLJIT_PREF_SHIFT_REG SLJIT_R2 #elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) @@ -683,7 +661,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr); #else /* _WIN64 */ #define SLJIT_NUMBER_OF_SAVED_REGISTERS 8 #define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 10 -#define SLJIT_LOCALS_OFFSET_BASE (4 * (sljit_s32)sizeof(sljit_sw)) +#define SLJIT_LOCALS_OFFSET_BASE (4 * SSIZE_OF(sw)) #endif /* !_WIN64 */ #define SLJIT_PREF_SHIFT_REG SLJIT_R3 @@ -740,17 +718,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr); #define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 8 #endif -#elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) +#elif (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) -#define SLJIT_NUMBER_OF_REGISTERS 18 -#define SLJIT_NUMBER_OF_SAVED_REGISTERS 14 -#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 14 -#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0 -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -/* saved registers (16), return struct pointer (1), space for 6 argument words (1), - 4th double arg (2), double alignment (1). */ -#define SLJIT_LOCALS_OFFSET_BASE ((16 + 1 + 6 + 2 + 1) * (sljit_s32)sizeof(sljit_sw)) -#endif +#define SLJIT_NUMBER_OF_REGISTERS 23 +#define SLJIT_NUMBER_OF_SAVED_REGISTERS 12 +#define SLJIT_LOCALS_OFFSET_BASE 0 +#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 30 +#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 12 #elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) @@ -806,7 +780,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr); #if (defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) \ || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \ || (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) \ - || (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) \ + || (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \ || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) #define SLJIT_HAS_STATUS_FLAGS_STATE 1 #endif diff --git a/thirdparty/pcre2/src/sljit/sljitExecAllocator.c b/thirdparty/pcre2/src/sljit/sljitExecAllocator.c index 6359848cd5..92d940ddc2 100644 --- a/thirdparty/pcre2/src/sljit/sljitExecAllocator.c +++ b/thirdparty/pcre2/src/sljit/sljitExecAllocator.c @@ -152,9 +152,6 @@ static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec) { #if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000 pthread_jit_write_protect_np(enable_exec); -#elif defined(__clang__) - if (__builtin_available(macOS 11.0, *)) - pthread_jit_write_protect_np(enable_exec); #else #error "Must target Big Sur or newer" #endif /* BigSur */ diff --git a/thirdparty/pcre2/src/sljit/sljitLir.c b/thirdparty/pcre2/src/sljit/sljitLir.c index 313a061dd3..abafe1add9 100644 --- a/thirdparty/pcre2/src/sljit/sljitLir.c +++ b/thirdparty/pcre2/src/sljit/sljitLir.c @@ -133,6 +133,14 @@ #define SLJIT_ARG_MASK 0x7 #define SLJIT_ARG_FULL_MASK (SLJIT_ARG_MASK | SLJIT_ARG_TYPE_SCRATCH_REG) +/* Mask for sljit_emit_mem. */ +#define REG_PAIR_MASK 0xff00 +#define REG_PAIR_FIRST(reg) ((reg) & 0xff) +#define REG_PAIR_SECOND(reg) ((reg) >> 8) + +/* Mask for sljit_emit_enter. */ +#define SLJIT_KEPT_SAVEDS_COUNT(options) ((options) & 0x3) + /* Jump flags. */ #define JUMP_LABEL 0x1 #define JUMP_ADDR 0x2 @@ -145,16 +153,16 @@ # define PATCH_MD 0x10 #endif # define TYPE_SHIFT 13 -#endif +#endif /* SLJIT_CONFIG_X86 */ #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) # define IS_BL 0x4 # define PATCH_B 0x8 -#endif +#endif /* SLJIT_CONFIG_ARM_V5 || SLJIT_CONFIG_ARM_V7 */ #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) # define CPOOL_SIZE 512 -#endif +#endif /* SLJIT_CONFIG_ARM_V5 */ #if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) # define IS_COND 0x04 @@ -172,7 +180,7 @@ /* BL + imm24 */ # define PATCH_BL 0x60 /* 0xf00 cc code for branches */ -#endif +#endif /* SLJIT_CONFIG_ARM_THUMB2 */ #if (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) # define IS_COND 0x004 @@ -182,7 +190,7 @@ # define PATCH_COND 0x040 # define PATCH_ABS48 0x080 # define PATCH_ABS64 0x100 -#endif +#endif /* SLJIT_CONFIG_ARM_64 */ #if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) # define IS_COND 0x004 @@ -192,9 +200,9 @@ #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) # define PATCH_ABS32 0x040 # define PATCH_ABS48 0x080 -#endif +#endif /* SLJIT_CONFIG_PPC_64 */ # define REMOVE_COND 0x100 -#endif +#endif /* SLJIT_CONFIG_PPC */ #if (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) # define IS_MOVABLE 0x004 @@ -212,7 +220,7 @@ #if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) # define PATCH_ABS32 0x400 # define PATCH_ABS48 0x800 -#endif +#endif /* SLJIT_CONFIG_MIPS_64 */ /* instruction types */ # define MOVABLE_INS 0 @@ -221,28 +229,24 @@ # define UNMOVABLE_INS 32 /* FPU status register */ # define FCSR_FCC 33 -#endif - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -# define IS_MOVABLE 0x04 -# define IS_COND 0x08 -# define IS_CALL 0x10 +#endif /* SLJIT_CONFIG_MIPS */ -# define PATCH_B 0x20 -# define PATCH_CALL 0x40 - - /* instruction types */ -# define MOVABLE_INS 0 - /* 1 - 31 last destination register */ - /* no destination (i.e: store) */ -# define UNMOVABLE_INS 32 - -# define DST_INS_MASK 0xff +#if (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) +# define IS_COND 0x004 +# define IS_CALL 0x008 - /* ICC_SET is the same as SET_FLAGS. */ -# define ICC_IS_SET (1 << 23) -# define FCC_IS_SET (1 << 24) -#endif +# define PATCH_B 0x010 +# define PATCH_J 0x020 + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) +# define PATCH_REL32 0x040 +# define PATCH_ABS32 0x080 +# define PATCH_ABS44 0x100 +# define PATCH_ABS52 0x200 +#else /* !SLJIT_CONFIG_RISCV_64 */ +# define PATCH_REL32 0x0 +#endif /* SLJIT_CONFIG_RISCV_64 */ +#endif /* SLJIT_CONFIG_RISCV */ /* Stack management. */ @@ -385,7 +389,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo invalid_integer_types); SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_32, rewritable_jump_and_single_op_must_not_be_the_same); - SLJIT_COMPILE_ASSERT(!(SLJIT_EQUAL & 0x1) && !(SLJIT_LESS & 0x1) && !(SLJIT_EQUAL_F64 & 0x1) && !(SLJIT_JUMP & 0x1), + SLJIT_COMPILE_ASSERT(!(SLJIT_EQUAL & 0x1) && !(SLJIT_LESS & 0x1) && !(SLJIT_F_EQUAL & 0x1) && !(SLJIT_JUMP & 0x1), conditional_flags_must_be_even_numbers); /* Only the non-zero members must be set. */ @@ -437,10 +441,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo compiler->delay_slot = UNMOVABLE_INS; #endif -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - compiler->delay_slot = UNMOVABLE_INS; -#endif - #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ || (defined SLJIT_DEBUG && SLJIT_DEBUG) compiler->last_flags = 0; @@ -822,6 +822,9 @@ static sljit_s32 function_check_src_mem(struct sljit_compiler *compiler, sljit_s if (!(p & SLJIT_MEM)) return 0; + if (p == SLJIT_MEM1(SLJIT_SP)) + return (i >= 0 && i < compiler->logical_local_size); + if (!(!(p & REG_MASK) || FUNCTION_CHECK_IS_REG(p & REG_MASK))) return 0; @@ -859,9 +862,6 @@ static sljit_s32 function_check_src(struct sljit_compiler *compiler, sljit_s32 p if (p == SLJIT_IMM) return 1; - if (p == SLJIT_MEM1(SLJIT_SP)) - return (i >= 0 && i < compiler->logical_local_size); - return function_check_src_mem(compiler, p, i); } @@ -876,9 +876,6 @@ static sljit_s32 function_check_dst(struct sljit_compiler *compiler, sljit_s32 p if (FUNCTION_CHECK_IS_REG(p)) return (i == 0); - if (p == SLJIT_MEM1(SLJIT_SP)) - return (i >= 0 && i < compiler->logical_local_size); - return function_check_src_mem(compiler, p, i); } @@ -893,9 +890,6 @@ static sljit_s32 function_fcheck(struct sljit_compiler *compiler, sljit_s32 p, s if (FUNCTION_CHECK_IS_FREG(p)) return (i == 0); - if (p == SLJIT_MEM1(SLJIT_SP)) - return (i >= 0 && i < compiler->logical_local_size); - return function_check_src_mem(compiler, p, i); } @@ -913,7 +907,11 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *comp #if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) #ifdef _WIN64 +#ifdef __GNUC__ +# define SLJIT_PRINT_D "ll" +#else # define SLJIT_PRINT_D "I64" +#endif #else # define SLJIT_PRINT_D "l" #endif @@ -995,13 +993,14 @@ static const char* op0_names[] = { static const char* op1_names[] = { "", ".u8", ".s8", ".u16", ".s16", ".u32", ".s32", "32", - ".p", "not", "clz", + ".p", "not", "clz", "ctz" }; static const char* op2_names[] = { "add", "addc", "sub", "subc", "mul", "and", "or", "xor", - "shl", "lshr", "ashr", + "shl", "mshl", "lshr", "mlshr", + "ashr", "mashr", "rotl", "rotr" }; static const char* op_src_names[] = { @@ -1020,10 +1019,6 @@ static const char* fop2_names[] = { "add", "sub", "mul", "div" }; -#define JUMP_POSTFIX(type) \ - ((type & 0xff) <= SLJIT_NOT_OVERFLOW ? ((type & SLJIT_32) ? "32" : "") \ - : ((type & 0xff) <= SLJIT_ORDERED_F64 ? ((type & SLJIT_32) ? ".f32" : ".f64") : "")) - static const char* jump_names[] = { "equal", "not_equal", "less", "greater_equal", @@ -1032,12 +1027,18 @@ static const char* jump_names[] = { "sig_greater", "sig_less_equal", "overflow", "not_overflow", "carry", "", - "equal", "not_equal", - "less", "greater_equal", - "greater", "less_equal", + "f_equal", "f_not_equal", + "f_less", "f_greater_equal", + "f_greater", "f_less_equal", "unordered", "ordered", + "ordered_equal", "unordered_or_not_equal", + "ordered_less", "unordered_or_greater_equal", + "ordered_greater", "unordered_or_less_equal", + "unordered_or_equal", "ordered_not_equal", + "unordered_or_less", "ordered_greater_equal", + "unordered_or_greater", "ordered_less_equal", "jump", "fast_call", - "call", "call.cdecl" + "call", "call_reg_arg" }; static const char* call_arg_names[] = { @@ -1053,6 +1054,8 @@ static const char* call_arg_names[] = { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ || (defined SLJIT_VERBOSE && SLJIT_VERBOSE) +#define SLJIT_SKIP_CHECKS(compiler) (compiler)->skip_checks = 1 + static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_generate_code(struct sljit_compiler *compiler) { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) @@ -1080,7 +1083,12 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_enter(struct sljit_compil SLJIT_UNUSED_ARG(compiler); #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(!(options & ~SLJIT_ENTER_CDECL)); + if (options & SLJIT_ENTER_REG_ARG) { + CHECK_ARGUMENT(!(options & ~(0x3 | SLJIT_ENTER_REG_ARG))); + } else { + CHECK_ARGUMENT(options == 0); + } + CHECK_ARGUMENT(SLJIT_KEPT_SAVEDS_COUNT(options) <= 3 && SLJIT_KEPT_SAVEDS_COUNT(options) <= saveds); CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS); CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_SAVED_REGISTERS); CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS); @@ -1088,8 +1096,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_enter(struct sljit_compil CHECK_ARGUMENT(fsaveds >= 0 && fsaveds <= SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS); CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); - CHECK_ARGUMENT((arg_types & SLJIT_ARG_FULL_MASK) < SLJIT_ARG_TYPE_F64); - CHECK_ARGUMENT(function_check_arguments(arg_types, scratches, saveds, fscratches)); + CHECK_ARGUMENT((arg_types & SLJIT_ARG_FULL_MASK) <= SLJIT_ARG_TYPE_F32); + CHECK_ARGUMENT(function_check_arguments(arg_types, scratches, (options & SLJIT_ENTER_REG_ARG) ? 0 : saveds, fscratches)); compiler->last_flags = 0; #endif @@ -1109,8 +1117,16 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_enter(struct sljit_compil } while (arg_types); } - fprintf(compiler->verbose, "],%s scratches:%d, saveds:%d, fscratches:%d, fsaveds:%d, local_size:%d\n", - (options & SLJIT_ENTER_CDECL) ? " enter:cdecl," : "", + fprintf(compiler->verbose, "],"); + + if (options & SLJIT_ENTER_REG_ARG) { + fprintf(compiler->verbose, " enter:reg_arg,"); + + if (SLJIT_KEPT_SAVEDS_COUNT(options) > 0) + fprintf(compiler->verbose, " keep:%d,", SLJIT_KEPT_SAVEDS_COUNT(options)); + } + + fprintf(compiler->verbose, "scratches:%d, saveds:%d, fscratches:%d, fsaveds:%d, local_size:%d\n", scratches, saveds, fscratches, fsaveds, local_size); } #endif @@ -1124,7 +1140,12 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_set_context(struct sljit_compi SLJIT_UNUSED_ARG(compiler); #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(!(options & ~SLJIT_ENTER_CDECL)); + if (options & SLJIT_ENTER_REG_ARG) { + CHECK_ARGUMENT(!(options & ~(0x3 | SLJIT_ENTER_REG_ARG))); + } else { + CHECK_ARGUMENT(options == 0); + } + CHECK_ARGUMENT(SLJIT_KEPT_SAVEDS_COUNT(options) <= 3 && SLJIT_KEPT_SAVEDS_COUNT(options) <= saveds); CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS); CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_SAVED_REGISTERS); CHECK_ARGUMENT(scratches + saveds <= SLJIT_NUMBER_OF_REGISTERS); @@ -1133,7 +1154,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_set_context(struct sljit_compi CHECK_ARGUMENT(fscratches + fsaveds <= SLJIT_NUMBER_OF_FLOAT_REGISTERS); CHECK_ARGUMENT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); CHECK_ARGUMENT((arg_types & SLJIT_ARG_FULL_MASK) < SLJIT_ARG_TYPE_F64); - CHECK_ARGUMENT(function_check_arguments(arg_types, scratches, saveds, fscratches)); + CHECK_ARGUMENT(function_check_arguments(arg_types, scratches, (options & SLJIT_ENTER_REG_ARG) ? 0 : saveds, fscratches)); compiler->last_flags = 0; #endif @@ -1153,8 +1174,16 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_set_context(struct sljit_compi } while (arg_types); } - fprintf(compiler->verbose, "],%s scratches:%d, saveds:%d, fscratches:%d, fsaveds:%d, local_size:%d\n", - (options & SLJIT_ENTER_CDECL) ? " enter:cdecl," : "", + fprintf(compiler->verbose, "],"); + + if (options & SLJIT_ENTER_REG_ARG) { + fprintf(compiler->verbose, " enter:reg_arg,"); + + if (SLJIT_KEPT_SAVEDS_COUNT(options) > 0) + fprintf(compiler->verbose, " keep:%d,", SLJIT_KEPT_SAVEDS_COUNT(options)); + } + + fprintf(compiler->verbose, " scratches:%d, saveds:%d, fscratches:%d, fsaveds:%d, local_size:%d\n", scratches, saveds, fscratches, fsaveds, local_size); } #endif @@ -1195,18 +1224,52 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_return(struct sljit_compi case SLJIT_ARG_TYPE_P: CHECK_ARGUMENT(op == SLJIT_MOV_P); break; + case SLJIT_ARG_TYPE_F64: + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(op == SLJIT_MOV_F64); + break; + case SLJIT_ARG_TYPE_F32: + CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); + CHECK_ARGUMENT(op == SLJIT_MOV_F32); + break; default: /* Context not initialized, void, etc. */ CHECK_ARGUMENT(0); break; } - FUNCTION_CHECK_SRC(src, srcw); + + if (GET_OPCODE(op) < SLJIT_MOV_F64) { + FUNCTION_CHECK_SRC(src, srcw); + } else { + FUNCTION_FCHECK(src, srcw); + } compiler->last_flags = 0; #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " return%s%s ", !(op & SLJIT_32) ? "" : "32", - op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE]); + if (GET_OPCODE(op) < SLJIT_MOV_F64) { + fprintf(compiler->verbose, " return%s%s ", !(op & SLJIT_32) ? "" : "32", + op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE]); + sljit_verbose_param(compiler, src, srcw); + } else { + fprintf(compiler->verbose, " return%s ", !(op & SLJIT_32) ? ".f64" : ".f32"); + sljit_verbose_fparam(compiler, src, srcw); + } + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + FUNCTION_CHECK_SRC(src, srcw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " return_to "); sljit_verbose_param(compiler, src, srcw); fprintf(compiler->verbose, "\n"); } @@ -1263,7 +1326,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op1(struct sljit_compiler } #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_CLZ); + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_CTZ); switch (GET_OPCODE(op)) { case SLJIT_NOT: @@ -1324,15 +1387,18 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler } #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD && GET_OPCODE(op) <= SLJIT_ASHR); + CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD && GET_OPCODE(op) <= SLJIT_ROTR); switch (GET_OPCODE(op)) { case SLJIT_AND: case SLJIT_OR: case SLJIT_XOR: case SLJIT_SHL: + case SLJIT_MSHL: case SLJIT_LSHR: + case SLJIT_MLSHR: case SLJIT_ASHR: + case SLJIT_MASHR: CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK)); break; case SLJIT_MUL: @@ -1357,6 +1423,10 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler CHECK_ARGUMENT((compiler->last_flags & 0xff) == GET_FLAG_TYPE(SLJIT_SET_CARRY)); CHECK_ARGUMENT((op & SLJIT_32) == (compiler->last_flags & SLJIT_32)); break; + case SLJIT_ROTL: + case SLJIT_ROTR: + CHECK_ARGUMENT(!(op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK))); + break; default: SLJIT_UNREACHABLE(); break; @@ -1390,6 +1460,35 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op2(struct sljit_compiler CHECK_RETURN_OK; } +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_LSHR + || GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR); + CHECK_ARGUMENT((op & ~(0xff | SLJIT_32 | SLJIT_SHIFT_INTO_NON_ZERO)) == 0); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src_dst)); + FUNCTION_CHECK_SRC(src1, src1w); + FUNCTION_CHECK_SRC(src2, src2w); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s.into%s ", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], !(op & SLJIT_32) ? "" : "32", + (op & SLJIT_SHIFT_INTO_NON_ZERO) ? ".nz" : ""); + + sljit_verbose_reg(compiler, src_dst); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(compiler, src1, src1w); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(compiler, src2, src2w); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -1510,7 +1609,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_com sljit_s32 src2, sljit_sw src2w) { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->last_flags = GET_FLAG_TYPE(op) | (op & (SLJIT_32 | SLJIT_SET_Z)); + compiler->last_flags = GET_FLAG_TYPE(op) | (op & SLJIT_32); #endif if (SLJIT_UNLIKELY(compiler->skip_checks)) { @@ -1523,7 +1622,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_com CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_CMP_F64); CHECK_ARGUMENT(!(op & SLJIT_SET_Z)); CHECK_ARGUMENT((op & VARIABLE_FLAG_MASK) - || (GET_FLAG_TYPE(op) >= SLJIT_EQUAL_F64 && GET_FLAG_TYPE(op) <= SLJIT_ORDERED_F64)); + || (GET_FLAG_TYPE(op) >= SLJIT_F_EQUAL && GET_FLAG_TYPE(op) <= SLJIT_ORDERED_LESS_EQUAL)); FUNCTION_FCHECK(src1, src1w); FUNCTION_FCHECK(src2, src2w); #endif @@ -1531,7 +1630,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_cmp(struct sljit_com if (SLJIT_UNLIKELY(!!compiler->verbose)) { fprintf(compiler->verbose, " %s%s", fop1_names[SLJIT_CMP_F64 - SLJIT_FOP1_BASE], (op & SLJIT_32) ? ".f32" : ".f64"); if (op & VARIABLE_FLAG_MASK) { - fprintf(compiler->verbose, ".%s_f", jump_names[GET_FLAG_TYPE(op)]); + fprintf(compiler->verbose, ".%s", jump_names[GET_FLAG_TYPE(op)]); } fprintf(compiler->verbose, " "); sljit_verbose_fparam(compiler, src1, src1w); @@ -1650,6 +1749,17 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_label(struct sljit_compil CHECK_RETURN_OK; } +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) +#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \ + || (defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) +#define CHECK_UNORDERED(type, last_flags) \ + ((((type) & 0xff) == SLJIT_UNORDERED || ((type) & 0xff) == SLJIT_ORDERED) && \ + ((last_flags) & 0xff) >= SLJIT_UNORDERED && ((last_flags) & 0xff) <= SLJIT_ORDERED_LESS_EQUAL) +#else +#define CHECK_UNORDERED(type, last_flags) 0 +#endif +#endif /* SLJIT_ARGUMENT_CHECKS */ + static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) { if (SLJIT_UNLIKELY(compiler->skip_checks)) { @@ -1658,9 +1768,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compile } #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_32))); + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP))); CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_FAST_CALL); - CHECK_ARGUMENT((type & 0xff) < SLJIT_JUMP || !(type & SLJIT_32)); if ((type & 0xff) < SLJIT_JUMP) { if ((type & 0xff) <= SLJIT_NOT_ZERO) @@ -1670,13 +1779,14 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_jump(struct sljit_compile compiler->last_flags = 0; } else CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff) - || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW)); + || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW) + || CHECK_UNORDERED(type, compiler->last_flags)); } #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) - fprintf(compiler->verbose, " jump%s %s%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", - jump_names[type & 0xff], JUMP_POSTFIX(type)); + fprintf(compiler->verbose, " jump%s %s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", + jump_names[type & 0xff]); #endif CHECK_RETURN_OK; } @@ -1686,11 +1796,17 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_call(struct sljit_compile { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_CALL_RETURN))); - CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL || (type & 0xff) == SLJIT_CALL_CDECL); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_CALL && (type & 0xff) <= SLJIT_CALL_REG_ARG); CHECK_ARGUMENT(function_check_arguments(arg_types, compiler->scratches, -1, compiler->fscratches)); if (type & SLJIT_CALL_RETURN) { CHECK_ARGUMENT((arg_types & SLJIT_ARG_MASK) == compiler->last_return); + + if (compiler->options & SLJIT_ENTER_REG_ARG) { + CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL_REG_ARG); + } else { + CHECK_ARGUMENT((type & 0xff) != SLJIT_CALL_REG_ARG); + } } #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) @@ -1729,8 +1845,8 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmp(struct sljit_compiler #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " cmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", - jump_names[type & 0xff], (type & SLJIT_32) ? "32" : ""); + fprintf(compiler->verbose, " cmp%s%s %s, ", (type & SLJIT_32) ? "32" : "", + !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); sljit_verbose_param(compiler, src1, src1w); fprintf(compiler->verbose, ", "); sljit_verbose_param(compiler, src2, src2w); @@ -1747,15 +1863,16 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fcmp(struct sljit_compile #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) CHECK_ARGUMENT(sljit_has_cpu_feature(SLJIT_HAS_FPU)); CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_32))); - CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL_F64 && (type & 0xff) <= SLJIT_ORDERED_F64); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_F_EQUAL && (type & 0xff) <= SLJIT_ORDERED_LESS_EQUAL + && ((type & 0xff) <= SLJIT_ORDERED || sljit_cmp_info(type & 0xff))); FUNCTION_FCHECK(src1, src1w); FUNCTION_FCHECK(src2, src2w); compiler->last_flags = 0; #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " fcmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", - jump_names[type & 0xff], (type & SLJIT_32) ? ".f32" : ".f64"); + fprintf(compiler->verbose, " fcmp%s%s %s, ", (type & SLJIT_32) ? ".f32" : ".f64", + !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); sljit_verbose_fparam(compiler, src1, src1w); fprintf(compiler->verbose, ", "); sljit_verbose_fparam(compiler, src2, src2w); @@ -1793,12 +1910,18 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_icall(struct sljit_compil { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_CALL_RETURN))); - CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL || (type & 0xff) == SLJIT_CALL_CDECL); + CHECK_ARGUMENT((type & 0xff) >= SLJIT_CALL && (type & 0xff) <= SLJIT_CALL_REG_ARG); CHECK_ARGUMENT(function_check_arguments(arg_types, compiler->scratches, -1, compiler->fscratches)); FUNCTION_CHECK_SRC(src, srcw); if (type & SLJIT_CALL_RETURN) { CHECK_ARGUMENT((arg_types & SLJIT_ARG_MASK) == compiler->last_return); + + if (compiler->options & SLJIT_ENTER_REG_ARG) { + CHECK_ARGUMENT((type & 0xff) == SLJIT_CALL_REG_ARG); + } else { + CHECK_ARGUMENT((type & 0xff) != SLJIT_CALL_REG_ARG); + } } #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) @@ -1830,18 +1953,18 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_flags(struct sljit_com sljit_s32 type) { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_32))); - CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64); + CHECK_ARGUMENT(type >= SLJIT_EQUAL && type <= SLJIT_ORDERED_LESS_EQUAL); CHECK_ARGUMENT(op == SLJIT_MOV || op == SLJIT_MOV32 || (GET_OPCODE(op) >= SLJIT_AND && GET_OPCODE(op) <= SLJIT_XOR)); CHECK_ARGUMENT(!(op & VARIABLE_FLAG_MASK)); - if ((type & 0xff) <= SLJIT_NOT_ZERO) + if (type <= SLJIT_NOT_ZERO) CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z); else - CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff) - || ((type & 0xff) == SLJIT_NOT_CARRY && (compiler->last_flags & 0xff) == SLJIT_CARRY) - || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW)); + CHECK_ARGUMENT(type == (compiler->last_flags & 0xff) + || (type == SLJIT_NOT_CARRY && (compiler->last_flags & 0xff) == SLJIT_CARRY) + || (type == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW) + || CHECK_UNORDERED(type, compiler->last_flags)); FUNCTION_CHECK_DST(dst, dstw); @@ -1850,12 +1973,12 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op_flags(struct sljit_com #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " flags%s %s%s, ", - !(op & SLJIT_SET_Z) ? "" : ".z", + fprintf(compiler->verbose, " flags.%s%s%s ", GET_OPCODE(op) < SLJIT_OP2_BASE ? "mov" : op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], - GET_OPCODE(op) < SLJIT_OP2_BASE ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : ((op & SLJIT_32) ? "32" : "")); + GET_OPCODE(op) < SLJIT_OP2_BASE ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : ((op & SLJIT_32) ? "32" : ""), + !(op & SLJIT_SET_Z) ? "" : ".z"); sljit_verbose_param(compiler, dst, dstw); - fprintf(compiler->verbose, ", %s%s\n", jump_names[type & 0xff], JUMP_POSTFIX(type)); + fprintf(compiler->verbose, ", %s\n", jump_names[type]); } #endif CHECK_RETURN_OK; @@ -1866,28 +1989,31 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_cmov(struct sljit_compile sljit_s32 src, sljit_sw srcw) { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_32))); - CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64); + sljit_s32 cond = type & ~SLJIT_32; + + CHECK_ARGUMENT(cond >= SLJIT_EQUAL && cond <= SLJIT_ORDERED_LESS_EQUAL); CHECK_ARGUMENT(compiler->scratches != -1 && compiler->saveds != -1); - CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg & ~SLJIT_32)); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg)); if (src != SLJIT_IMM) { CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(src)); CHECK_ARGUMENT(srcw == 0); } - if ((type & 0xff) <= SLJIT_NOT_ZERO) + if (cond <= SLJIT_NOT_ZERO) CHECK_ARGUMENT(compiler->last_flags & SLJIT_SET_Z); else - CHECK_ARGUMENT((type & 0xff) == (compiler->last_flags & 0xff) - || ((type & 0xff) == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW)); + CHECK_ARGUMENT(cond == (compiler->last_flags & 0xff) + || (cond == SLJIT_NOT_CARRY && (compiler->last_flags & 0xff) == SLJIT_CARRY) + || (cond == SLJIT_NOT_OVERFLOW && (compiler->last_flags & 0xff) == SLJIT_OVERFLOW) + || CHECK_UNORDERED(cond, compiler->last_flags)); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " cmov%s %s%s, ", - !(dst_reg & SLJIT_32) ? "" : "32", - jump_names[type & 0xff], JUMP_POSTFIX(type)); - sljit_verbose_reg(compiler, dst_reg & ~SLJIT_32); + fprintf(compiler->verbose, " cmov%s %s, ", + !(type & SLJIT_32) ? "" : "32", + jump_names[type & ~SLJIT_32]); + sljit_verbose_reg(compiler, dst_reg); fprintf(compiler->verbose, ", "); sljit_verbose_param(compiler, src, srcw); fprintf(compiler->verbose, "\n"); @@ -1900,28 +2026,123 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_mem(struct sljit_compiler sljit_s32 reg, sljit_s32 mem, sljit_sw memw) { + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + CHECK_RETURN_OK; + } + #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P); - CHECK_ARGUMENT(!(type & SLJIT_32) || ((type & 0xff) != SLJIT_MOV && (type & 0xff) != SLJIT_MOV_U32 && (type & 0xff) != SLJIT_MOV_P)); - CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST)); - CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST)); - CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0); + sljit_s32 allowed_flags; + + if (type & SLJIT_MEM_UNALIGNED) { + CHECK_ARGUMENT(!(type & (SLJIT_MEM_UNALIGNED_16 | SLJIT_MEM_UNALIGNED_32))); + } else if (type & SLJIT_MEM_UNALIGNED_16) { + CHECK_ARGUMENT(!(type & SLJIT_MEM_UNALIGNED_32)); + } else { + CHECK_ARGUMENT((reg & REG_PAIR_MASK) || (type & SLJIT_MEM_UNALIGNED_32)); + } + + allowed_flags = SLJIT_MEM_UNALIGNED; + + switch (type & 0xff) { + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: + case SLJIT_MOV32: + allowed_flags = SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16; + break; + case SLJIT_MOV: + case SLJIT_MOV_P: + allowed_flags = SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16 | SLJIT_MEM_UNALIGNED_32; + break; + } + + CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | allowed_flags)) == 0); + + if (reg & REG_PAIR_MASK) { + CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(REG_PAIR_FIRST(reg))); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(REG_PAIR_SECOND(reg))); + CHECK_ARGUMENT(REG_PAIR_FIRST(reg) != REG_PAIR_SECOND(reg)); + } else { + CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P); + CHECK_ARGUMENT(!(type & SLJIT_32) || ((type & 0xff) >= SLJIT_MOV_U8 && (type & 0xff) <= SLJIT_MOV_S16)); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg)); + } FUNCTION_CHECK_SRC_MEM(mem, memw); - CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(reg)); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + if ((type & 0xff) == SLJIT_MOV32) + fprintf(compiler->verbose, " %s32", + (type & SLJIT_MEM_STORE) ? "store" : "load"); + else + fprintf(compiler->verbose, " %s%s%s", + (type & SLJIT_MEM_STORE) ? "store" : "load", + !(type & SLJIT_32) ? "" : "32", + op1_names[(type & 0xff) - SLJIT_OP1_BASE]); + + if (type & SLJIT_MEM_UNALIGNED) + printf(".un"); + else if (type & SLJIT_MEM_UNALIGNED_16) + printf(".un16"); + else if (type & SLJIT_MEM_UNALIGNED_32) + printf(".un32"); + + if (reg & REG_PAIR_MASK) { + fprintf(compiler->verbose, " {"); + sljit_verbose_reg(compiler, REG_PAIR_FIRST(reg)); + fprintf(compiler->verbose, ", "); + sljit_verbose_reg(compiler, REG_PAIR_SECOND(reg)); + fprintf(compiler->verbose, "}, "); + } else { + fprintf(compiler->verbose, " "); + sljit_verbose_reg(compiler, reg); + fprintf(compiler->verbose, ", "); + } + sljit_verbose_param(compiler, mem, memw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + CHECK_RETURN_OK; + } + +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT((type & 0xff) >= SLJIT_MOV && (type & 0xff) <= SLJIT_MOV_P); + CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_POST)) == 0); CHECK_ARGUMENT((mem & REG_MASK) != 0 && (mem & REG_MASK) != reg); + + FUNCTION_CHECK_SRC_MEM(mem, memw); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) { - if (sljit_emit_mem(compiler, type | SLJIT_MEM_SUPP, reg, mem, memw) == SLJIT_ERR_UNSUPPORTED) - fprintf(compiler->verbose, " //"); + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + if (type & SLJIT_MEM_SUPP) + CHECK_RETURN_OK; + if (sljit_emit_mem_update(compiler, type | SLJIT_MEM_SUPP, reg, mem, memw) == SLJIT_ERR_UNSUPPORTED) { + fprintf(compiler->verbose, " # mem: unsupported form, no instructions are emitted\n"); + CHECK_RETURN_OK; + } + + if ((type & 0xff) == SLJIT_MOV32) + fprintf(compiler->verbose, " %s32.%s ", + (type & SLJIT_MEM_STORE) ? "store" : "load", + (type & SLJIT_MEM_POST) ? "post" : "pre"); + else + fprintf(compiler->verbose, " %s%s%s.%s ", + (type & SLJIT_MEM_STORE) ? "store" : "load", + !(type & SLJIT_32) ? "" : "32", + op1_names[(type & 0xff) - SLJIT_OP1_BASE], + (type & SLJIT_MEM_POST) ? "post" : "pre"); - fprintf(compiler->verbose, " mem%s.%s%s%s ", - !(type & SLJIT_32) ? "" : "32", - (type & SLJIT_MEM_STORE) ? "st" : "ld", - op1_names[(type & 0xff) - SLJIT_OP1_BASE], - (type & SLJIT_MEM_PRE) ? ".pre" : ".post"); sljit_verbose_reg(compiler, reg); fprintf(compiler->verbose, ", "); sljit_verbose_param(compiler, mem, memw); @@ -1937,22 +2158,67 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem(struct sljit_compile { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV_F64); - CHECK_ARGUMENT((type & SLJIT_MEM_PRE) || (type & SLJIT_MEM_POST)); - CHECK_ARGUMENT((type & (SLJIT_MEM_PRE | SLJIT_MEM_POST)) != (SLJIT_MEM_PRE | SLJIT_MEM_POST)); - CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_PRE | SLJIT_MEM_POST)) == 0); + if (type & SLJIT_MEM_UNALIGNED) { + CHECK_ARGUMENT(!(type & (SLJIT_MEM_UNALIGNED_16 | SLJIT_MEM_UNALIGNED_32))); + } else if (type & SLJIT_MEM_UNALIGNED_16) { + CHECK_ARGUMENT(!(type & SLJIT_MEM_UNALIGNED_32)); + } else { + CHECK_ARGUMENT(type & SLJIT_MEM_UNALIGNED_32); + CHECK_ARGUMENT(!(type & SLJIT_32)); + } + + CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16 | SLJIT_MEM_UNALIGNED_32))); + CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg)); + FUNCTION_CHECK_SRC_MEM(mem, memw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s.%s", + (type & SLJIT_MEM_STORE) ? "store" : "load", + !(type & SLJIT_32) ? "f64" : "f32"); + + if (type & SLJIT_MEM_UNALIGNED) + printf(".un"); + else if (type & SLJIT_MEM_UNALIGNED_16) + printf(".un16"); + else if (type & SLJIT_MEM_UNALIGNED_32) + printf(".un32"); + + fprintf(compiler->verbose, " "); + sljit_verbose_freg(compiler, freg); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(compiler, mem, memw); + fprintf(compiler->verbose, "\n"); + } +#endif + CHECK_RETURN_OK; +} + +static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ +#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) + CHECK_ARGUMENT((type & 0xff) == SLJIT_MOV_F64); + CHECK_ARGUMENT((type & ~(0xff | SLJIT_32 | SLJIT_MEM_STORE | SLJIT_MEM_SUPP | SLJIT_MEM_POST)) == 0); FUNCTION_CHECK_SRC_MEM(mem, memw); CHECK_ARGUMENT(FUNCTION_CHECK_IS_FREG(freg)); #endif #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - if (!(type & SLJIT_MEM_SUPP) && SLJIT_UNLIKELY(!!compiler->verbose)) { - if (sljit_emit_fmem(compiler, type | SLJIT_MEM_SUPP, freg, mem, memw) == SLJIT_ERR_UNSUPPORTED) - fprintf(compiler->verbose, " //"); - - fprintf(compiler->verbose, " fmem.%s%s%s ", - (type & SLJIT_MEM_STORE) ? "st" : "ld", - !(type & SLJIT_32) ? ".f64" : ".f32", - (type & SLJIT_MEM_PRE) ? ".pre" : ".post"); + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + if (type & SLJIT_MEM_SUPP) + CHECK_RETURN_OK; + if (sljit_emit_fmem_update(compiler, type | SLJIT_MEM_SUPP, freg, mem, memw) == SLJIT_ERR_UNSUPPORTED) { + fprintf(compiler->verbose, " # fmem: unsupported form, no instructions are emitted\n"); + CHECK_RETURN_OK; + } + + fprintf(compiler->verbose, " %s.%s.%s ", + (type & SLJIT_MEM_STORE) ? "store" : "load", + !(type & SLJIT_32) ? "f64" : "f32", + (type & SLJIT_MEM_POST) ? "post" : "pre"); + sljit_verbose_freg(compiler, freg); fprintf(compiler->verbose, ", "); sljit_verbose_param(compiler, mem, memw); @@ -1960,6 +2226,7 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fmem(struct sljit_compile } #endif CHECK_RETURN_OK; + } static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) @@ -2012,6 +2279,10 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_put_label(struct sljit_co CHECK_RETURN_OK; } +#else /* !SLJIT_ARGUMENT_CHECKS && !SLJIT_VERBOSE */ + +#define SLJIT_SKIP_CHECKS(compiler) + #endif /* SLJIT_ARGUMENT_CHECKS || SLJIT_VERBOSE */ #define SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw) \ @@ -2039,46 +2310,11 @@ static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_put_label(struct sljit_co ADJUST_LOCAL_OFFSET(dst, dstw); \ ADJUST_LOCAL_OFFSET(src, srcw); -static SLJIT_INLINE sljit_s32 emit_mov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) -{ -#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) - /* At the moment the pointer size is always equal to sljit_sw. May be changed in the future. */ - if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_P)) - return SLJIT_SUCCESS; -#else - if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P)) - return SLJIT_SUCCESS; -#endif - -#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ - || (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - compiler->skip_checks = 1; -#endif - return sljit_emit_op1(compiler, op, SLJIT_RETURN_REG, 0, src, srcw); -} - -#if !(defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_return(compiler, op, src, srcw)); - - FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); - -#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ - || (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - compiler->skip_checks = 1; -#endif - return sljit_emit_return_void(compiler); -} - -#endif - #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \ || (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \ - || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ - || ((defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) && !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)) + || ((defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) && !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1 && SLJIT_MIPS_REV < 6)) \ + || (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \ + || (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) static SLJIT_INLINE sljit_s32 sljit_emit_cmov_generic(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 dst_reg, @@ -2086,33 +2322,57 @@ static SLJIT_INLINE sljit_s32 sljit_emit_cmov_generic(struct sljit_compiler *com { struct sljit_label *label; struct sljit_jump *jump; - sljit_s32 op = (dst_reg & SLJIT_32) ? SLJIT_MOV32 : SLJIT_MOV; + sljit_s32 op = (type & SLJIT_32) ? SLJIT_MOV32 : SLJIT_MOV; -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - jump = sljit_emit_jump(compiler, type ^ 0x1); + SLJIT_SKIP_CHECKS(compiler); + jump = sljit_emit_jump(compiler, (type & ~SLJIT_32) ^ 0x1); FAIL_IF(!jump); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - FAIL_IF(sljit_emit_op1(compiler, op, dst_reg & ~SLJIT_32, 0, src, srcw)); + SLJIT_SKIP_CHECKS(compiler); + FAIL_IF(sljit_emit_op1(compiler, op, dst_reg, 0, src, srcw)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); label = sljit_emit_label(compiler); FAIL_IF(!label); + sljit_set_label(jump, label); return SLJIT_SUCCESS; } #endif +#if (!(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) || (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)) \ + && !(defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + +static sljit_s32 sljit_emit_mem_unaligned(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + SLJIT_SKIP_CHECKS(compiler); + + if (type & SLJIT_MEM_STORE) + return sljit_emit_op1(compiler, type & (0xff | SLJIT_32), mem, memw, reg, 0); + return sljit_emit_op1(compiler, type & (0xff | SLJIT_32), reg, 0, mem, memw); +} + +#endif /* (!SLJIT_CONFIG_MIPS || SLJIT_MIPS_REV >= 6) && !SLJIT_CONFIG_ARM_V5 */ + +#if (!(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) || (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)) \ + && !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) + +static sljit_s32 sljit_emit_fmem_unaligned(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ + SLJIT_SKIP_CHECKS(compiler); + + if (type & SLJIT_MEM_STORE) + return sljit_emit_fop1(compiler, type & (0xff | SLJIT_32), mem, memw, freg, 0); + return sljit_emit_fop1(compiler, type & (0xff | SLJIT_32), freg, 0, mem, memw); +} + +#endif /* (!SLJIT_CONFIG_MIPS || SLJIT_MIPS_REV >= 6) && !SLJIT_CONFIG_ARM */ + /* CPU description section */ #if (defined SLJIT_32BIT_ARCHITECTURE && SLJIT_32BIT_ARCHITECTURE) @@ -2153,13 +2413,58 @@ static SLJIT_INLINE sljit_s32 sljit_emit_cmov_generic(struct sljit_compiler *com # include "sljitNativePPC_common.c" #elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) # include "sljitNativeMIPS_common.c" -#elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) -# include "sljitNativeSPARC_common.c" +#elif (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) +# include "sljitNativeRISCV_common.c" #elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) # include "sljitNativeS390X.c" #endif -#if !(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) +static SLJIT_INLINE sljit_s32 emit_mov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ +#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) + /* At the moment the pointer size is always equal to sljit_sw. May be changed in the future. */ + if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_P)) + return SLJIT_SUCCESS; +#else + if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P)) + return SLJIT_SUCCESS; +#endif + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op1(compiler, op, SLJIT_RETURN_REG, 0, src, srcw); +} + +#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ + && !((defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) && defined __SOFTFP__) + +static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + if (src == SLJIT_FR0) + return SLJIT_SUCCESS; + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw); +} + +#endif /* !SLJIT_CONFIG_X86_32 && !(SLJIT_CONFIG_ARM_32 && __SOFTFP__) */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return(compiler, op, src, srcw)); + + if (GET_OPCODE(op) < SLJIT_MOV_F64) { + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + } else { + FAIL_IF(emit_fmov_before_return(compiler, op, src, srcw)); + } + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_return_void(compiler); +} + +#if !(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) \ + && !(defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src1, sljit_sw src1w, @@ -2229,20 +2534,33 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler else flags = condition << VARIABLE_FLAG_SHIFT; -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); PTR_FAIL_IF(sljit_emit_op2u(compiler, SLJIT_SUB | flags | (type & SLJIT_32), src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, condition | (type & (SLJIT_REWRITABLE_JUMP | SLJIT_32))); } -#endif +#endif /* !SLJIT_CONFIG_MIPS */ + +#if (defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type) +{ + if (type < SLJIT_UNORDERED || type > SLJIT_ORDERED_LESS_EQUAL) + return 0; + + switch (type) { + case SLJIT_UNORDERED_OR_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + return 0; + } + + return 1; +} + +#endif /* SLJIT_CONFIG_ARM */ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src1, sljit_sw src1w, @@ -2251,61 +2569,65 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compile CHECK_ERROR_PTR(); CHECK_PTR(check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); sljit_emit_fop1(compiler, SLJIT_CMP_F64 | ((type & 0xff) << VARIABLE_FLAG_SHIFT) | (type & SLJIT_32), src1, src1w, src2, src2w); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, type); } -#if !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \ - && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ +#if !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) \ && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw) { - SLJIT_UNUSED_ARG(compiler); + CHECK_ERROR(); + CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw)); SLJIT_UNUSED_ARG(type); SLJIT_UNUSED_ARG(reg); SLJIT_UNUSED_ARG(mem); SLJIT_UNUSED_ARG(memw); + return SLJIT_ERR_UNSUPPORTED; +} + +#endif /* !SLJIT_CONFIG_ARM && !SLJIT_CONFIG_PPC */ + +#if !(defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) \ + && !(defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ CHECK_ERROR(); - CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); - return SLJIT_ERR_UNSUPPORTED; + return sljit_emit_fmem_unaligned(compiler, type, freg, mem, memw); } -#endif +#endif /* !SLJIT_CONFIG_ARM_32 && !SLJIT_CONFIG_MIPS */ #if !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ && !(defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw) { - SLJIT_UNUSED_ARG(compiler); + CHECK_ERROR(); + CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw)); SLJIT_UNUSED_ARG(type); SLJIT_UNUSED_ARG(freg); SLJIT_UNUSED_ARG(mem); SLJIT_UNUSED_ARG(memw); - CHECK_ERROR(); - CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); - return SLJIT_ERR_UNSUPPORTED; } -#endif +#endif /* !SLJIT_CONFIG_ARM_64 && !SLJIT_CONFIG_PPC */ #if !(defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \ && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) @@ -2316,10 +2638,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *c CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset)); ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_SP), offset); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + + SLJIT_SKIP_CHECKS(compiler); + if (offset != 0) return sljit_emit_op2(compiler, SLJIT_ADD, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset); return sljit_emit_op1(compiler, SLJIT_MOV, dst, dstw, SLJIT_SP, 0); @@ -2387,6 +2708,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) return 0; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type) +{ + SLJIT_UNUSED_ARG(type); + SLJIT_UNREACHABLE(); + return 0; +} + SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code, void *exec_allocator_data) { SLJIT_UNUSED_ARG(code); @@ -2426,6 +2754,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp return SLJIT_ERR_UNSUPPORTED; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { SLJIT_UNUSED_ARG(compiler); @@ -2436,9 +2771,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp return SLJIT_ERR_UNSUPPORTED; } -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) { SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); SLJIT_UNREACHABLE(); return SLJIT_ERR_UNSUPPORTED; } @@ -2505,6 +2842,22 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil return SLJIT_ERR_UNSUPPORTED; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(src_dst); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -2703,6 +3056,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile return SLJIT_ERR_UNSUPPORTED; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(reg); + SLJIT_UNUSED_ARG(mem); + SLJIT_UNUSED_ARG(memw); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw) { SLJIT_UNUSED_ARG(compiler); @@ -2714,6 +3078,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil return SLJIT_ERR_UNSUPPORTED; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(freg); + SLJIT_UNUSED_ARG(mem); + SLJIT_UNUSED_ARG(memw); + SLJIT_UNREACHABLE(); + return SLJIT_ERR_UNSUPPORTED; +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) { SLJIT_UNUSED_ARG(compiler); diff --git a/thirdparty/pcre2/src/sljit/sljitLir.h b/thirdparty/pcre2/src/sljit/sljitLir.h index 1162658156..c6a0832ef8 100644 --- a/thirdparty/pcre2/src/sljit/sljitLir.h +++ b/thirdparty/pcre2/src/sljit/sljitLir.h @@ -36,26 +36,24 @@ Advantages: - The execution can be continued from any LIR instruction. In other words, it is possible to jump to any label from anywhere, even from - a code fragment, which is compiled later, if both compiled code - shares the same context. See sljit_emit_enter for more details - - Supports self modifying code: target of (conditional) jump and call + a code fragment, which is compiled later, as long as the compiling + context is the same. See sljit_emit_enter for more details. + - Supports self modifying code: target of any jump and call instructions and some constant values can be dynamically modified - during runtime + during runtime. See SLJIT_REWRITABLE_JUMP. - although it is not suggested to do it frequently - can be used for inline caching: save an important value once in the instruction stream - - since this feature limits the optimization possibilities, a - special flag must be passed at compile time when these - instructions are emitted - A fixed stack space can be allocated for local variables - The compiler is thread-safe - The compiler is highly configurable through preprocessor macros. You can disable unneeded features (multithreading in single threaded applications), and you can use your own system functions - (including memory allocators). See sljitConfig.h + (including memory allocators). See sljitConfig.h. Disadvantages: - - No automatic register allocation, and temporary results are - not stored on the stack. (hence the name comes) + - The compiler is more like a platform independent assembler, so + there is no built-in variable management. Registers and stack must + be managed manually (the name of the compiler refers to this). In practice: - This approach is very effective for interpreters - One of the saved registers typically points to a stack interface @@ -77,7 +75,7 @@ #include "sljitConfig.h" /* The following header file defines useful macros for fine tuning -sljit based code generators. They are listed in the beginning +SLJIT based code generators. They are listed in the beginning of sljitConfigInternal.h */ #include "sljitConfigInternal.h" @@ -90,6 +88,10 @@ of sljitConfigInternal.h */ extern "C" { #endif +/* Version numbers. */ +#define SLJIT_MAJOR_VERSION 0 +#define SLJIT_MINOR_VERSION 95 + /* --------------------------------------------------------------------- */ /* Error codes */ /* --------------------------------------------------------------------- */ @@ -97,33 +99,31 @@ extern "C" { /* Indicates no error. */ #define SLJIT_SUCCESS 0 /* After the call of sljit_generate_code(), the error code of the compiler - is set to this value to avoid future sljit calls (in debug mode at least). + is set to this value to avoid further code generation. The complier should be freed after sljit_generate_code(). */ #define SLJIT_ERR_COMPILED 1 -/* Cannot allocate non executable memory. */ +/* Cannot allocate non-executable memory. */ #define SLJIT_ERR_ALLOC_FAILED 2 /* Cannot allocate executable memory. - Only for sljit_generate_code() */ + Only sljit_generate_code() returns with this error code. */ #define SLJIT_ERR_EX_ALLOC_FAILED 3 /* Return value for SLJIT_CONFIG_UNSUPPORTED placeholder architecture. */ #define SLJIT_ERR_UNSUPPORTED 4 /* An ivalid argument is passed to any SLJIT function. */ #define SLJIT_ERR_BAD_ARGUMENT 5 -/* Dynamic code modification is not enabled. */ -#define SLJIT_ERR_DYN_CODE_MOD 6 /* --------------------------------------------------------------------- */ /* Registers */ /* --------------------------------------------------------------------- */ /* - Scratch (R) registers: registers whose may not preserve their values + Scratch (R) registers: registers which may not preserve their values across function calls. - Saved (S) registers: registers whose preserve their values across + Saved (S) registers: registers which preserve their values across function calls. - The scratch and saved register sets are overlap. The last scratch register + The scratch and saved register sets overlap. The last scratch register is the first saved register, the one before the last is the second saved register, and so on. @@ -209,7 +209,7 @@ extern "C" { /* The SLJIT_SP provides direct access to the linear stack space allocated by sljit_emit_enter. It can only be used in the following form: SLJIT_MEM1(SLJIT_SP). The immediate offset is extended by the relative stack offset automatically. - The sljit_get_local_base can be used to obtain the absolute offset. */ + The sljit_get_local_base can be used to obtain the real address of a value. */ #define SLJIT_SP (SLJIT_NUMBER_OF_REGISTERS + 1) /* Return with machine word. */ @@ -249,6 +249,10 @@ extern "C" { /* Float registers >= SLJIT_FIRST_SAVED_FLOAT_REG are saved registers. */ #define SLJIT_FIRST_SAVED_FLOAT_REG (SLJIT_FS0 - SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS + 1) +/* Return with floating point arg. */ + +#define SLJIT_RETURN_FREG SLJIT_FR0 + /* --------------------------------------------------------------------- */ /* Argument type definitions */ /* --------------------------------------------------------------------- */ @@ -386,6 +390,7 @@ struct sljit_label { struct sljit_jump { struct sljit_jump *next; sljit_uw addr; + /* Architecture dependent flags. */ sljit_uw flags; union { sljit_uw target; @@ -423,17 +428,17 @@ struct sljit_compiler { struct sljit_memory_fragment *buf; struct sljit_memory_fragment *abuf; - /* Used scratch registers. */ + /* Available scratch registers. */ sljit_s32 scratches; - /* Used saved registers. */ + /* Available saved registers. */ sljit_s32 saveds; - /* Used float scratch registers. */ + /* Available float scratch registers. */ sljit_s32 fscratches; - /* Used float saved registers. */ + /* Available float saved registers. */ sljit_s32 fsaveds; /* Local stack size. */ sljit_s32 local_size; - /* Code size. */ + /* Maximum code size. */ sljit_uw size; /* Relative offset of the executable mapping from the writable mapping. */ sljit_sw executable_offset; @@ -446,8 +451,6 @@ struct sljit_compiler { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) sljit_s32 args_size; - sljit_s32 locals_offset; - sljit_s32 scratches_offset; #endif #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) @@ -488,8 +491,7 @@ struct sljit_compiler { sljit_uw args_size; #endif -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - sljit_s32 delay_slot; +#if (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) sljit_s32 cache_arg; sljit_sw cache_argw; #endif @@ -517,7 +519,8 @@ struct sljit_compiler { #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) \ || (defined SLJIT_DEBUG && SLJIT_DEBUG) \ || (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - /* Trust arguments when the API function is called. */ + /* Trust arguments when an API function is called. + Used internally for calling API functions. */ sljit_s32 skip_checks; #endif }; @@ -526,7 +529,7 @@ struct sljit_compiler { /* Main functions */ /* --------------------------------------------------------------------- */ -/* Creates an sljit compiler. The allocator_data is required by some +/* Creates an SLJIT compiler. The allocator_data is required by some custom memory managers. This pointer is passed to SLJIT_MALLOC and SLJIT_FREE macros. Most allocators (including the default one) ignores this value, and it is recommended to pass NULL @@ -540,19 +543,19 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void *allo /* Frees everything except the compiled machine code. */ SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler); -/* Returns the current error code. If an error is occurred, future sljit - calls which uses the same compiler argument returns early with the same +/* Returns the current error code. If an error occurres, future calls + which uses the same compiler argument returns early with the same error code. Thus there is no need for checking the error after every - call, it is enough to do it before the code is compiled. Removing + call, it is enough to do it after the code is compiled. Removing these checks increases the performance of the compiling process. */ static SLJIT_INLINE sljit_s32 sljit_get_compiler_error(struct sljit_compiler *compiler) { return compiler->error; } /* Sets the compiler error code to SLJIT_ERR_ALLOC_FAILED except if an error was detected before. After the error code is set the compiler behaves as if the allocation failure happened - during an sljit function call. This can greatly simplify error - checking, since only the compiler status needs to be checked - after the compilation. */ + during an SLJIT function call. This can greatly simplify error + checking, since it is enough to check the compiler status + after the code is compiled. */ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_compiler_memory_error(struct sljit_compiler *compiler); /* @@ -560,8 +563,8 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_compiler_memory_error(struct sljit_compi and <= 128 bytes on 64 bit architectures. The memory area is owned by the compiler, and freed by sljit_free_compiler. The returned pointer is sizeof(sljit_sw) aligned. Excellent for allocating small blocks during - the compiling, and no need to worry about freeing them. The size is - enough to contain at most 16 pointers. If the size is outside of the range, + compiling, and no need to worry about freeing them. The size is enough + to contain at most 16 pointers. If the size is outside of the range, the function will return with NULL. However, this return value does not indicate that there is no more memory (does not set the current error code of the compiler to out-of-memory status). @@ -574,8 +577,8 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *comp #endif /* - Create executable code from the sljit instruction stream. This is the final step - of the code generation so no more instructions can be added after this call. + Create executable code from the instruction stream. This is the final step + of the code generation so no more instructions can be emitted after this call. */ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler); @@ -606,13 +609,14 @@ static SLJIT_INLINE sljit_sw sljit_get_executable_offset(struct sljit_compiler * static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler *compiler) { return compiler->executable_size; } /* Returns with non-zero if the feature or limitation type passed as its - argument is present on the current CPU. + argument is present on the current CPU. The return value is one, if a + feature is fully supported, and it is two, if partially supported. Some features (e.g. floating point operations) require hardware (CPU) support while others (e.g. move with update) are emulated if not available. - However even if a feature is emulated, specialized code paths can be faster - than the emulation. Some limitations are emulated as well so their general - case is supported but it has extra performance costs. */ + However, even when a feature is emulated, specialized code paths may be + faster than the emulation. Some limitations are emulated as well so their + general case is supported but it has extra performance costs. */ /* [Not emulated] Floating-point support is available. */ #define SLJIT_HAS_FPU 0 @@ -622,10 +626,14 @@ static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler #define SLJIT_HAS_ZERO_REGISTER 2 /* [Emulated] Count leading zero is supported. */ #define SLJIT_HAS_CLZ 3 +/* [Emulated] Count trailing zero is supported. */ +#define SLJIT_HAS_CTZ 4 +/* [Emulated] Rotate left/right is supported. */ +#define SLJIT_HAS_ROT 5 /* [Emulated] Conditional move is supported. */ -#define SLJIT_HAS_CMOV 4 -/* [Emulated] Conditional move is supported. */ -#define SLJIT_HAS_PREFETCH 5 +#define SLJIT_HAS_CMOV 6 +/* [Emulated] Prefetch instruction is available (emulated as a nop). */ +#define SLJIT_HAS_PREFETCH 7 #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) /* [Not emulated] SSE2 support is available on x86. */ @@ -634,8 +642,23 @@ static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type); -/* Instruction generation. Returns with any error code. If there is no - error, they return with SLJIT_SUCCESS. */ +/* If type is between SLJIT_ORDERED_EQUAL and SLJIT_ORDERED_LESS_EQUAL, + sljit_cmp_info returns one, if the cpu supports the passed floating + point comparison type. + + If type is SLJIT_UNORDERED or SLJIT_ORDERED, sljit_cmp_info returns + one, if the cpu supports checking the unordered comparison result + regardless of the comparison type passed to the comparison instruction. + The returned value is always one, if there is at least one type between + SLJIT_ORDERED_EQUAL and SLJIT_ORDERED_LESS_EQUAL where sljit_cmp_info + returns with a zero value. + + Otherwise it returns zero. */ +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type); + +/* The following functions generate machine code. If there is no + error, they return with SLJIT_SUCCESS, otherwise they return + with an error code. */ /* The executable code is a function from the viewpoint of the C @@ -643,30 +666,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) Binary Interface) of the platform, which specify the purpose of machine registers and stack handling among other things. The sljit_emit_enter function emits the necessary instructions for - setting up a new context for the executable code and moves function - arguments to the saved registers. Furthermore the options argument + setting up a new context for the executable code. This is often + called as function prologue. Furthermore the options argument can be used to pass configuration options to the compiler. The available options are listed before sljit_emit_enter. - The function argument list is the combination of SLJIT_ARGx - (SLJIT_DEF_ARG1) macros. Currently maximum 4 arguments are - supported. The first integer argument is loaded into SLJIT_S0, - the second one is loaded into SLJIT_S1, and so on. Similarly, - the first floating point argument is loaded into SLJIT_FR0, - the second one is loaded into SLJIT_FR1, and so on. Furthermore - the register set used by the function must be declared as well. - The number of scratch and saved registers used by the function - must be passed to sljit_emit_enter. Only R registers between R0 + The function argument list is specified by the SLJIT_ARGSx + (SLJIT_ARGS0 .. SLJIT_ARGS4) macros. Currently maximum four + arguments are supported. See the description of SLJIT_ARGSx + macros about argument passing. Furthermore the register set + used by the function must be declared as well. The number of + scratch and saved registers available to the function must + be passed to sljit_emit_enter. Only R registers between R0 and "scratches" argument can be used later. E.g. if "scratches" - is set to 2, the scratch register set will be limited to SLJIT_R0 - and SLJIT_R1. The S registers and the floating point registers - ("fscratches" and "fsaveds") are specified in a similar manner. - The sljit_emit_enter is also capable of allocating a stack space - for local variables. The "local_size" argument contains the size - in bytes of this local area and its staring address is stored - in SLJIT_SP. The memory area between SLJIT_SP (inclusive) and - SLJIT_SP + local_size (exclusive) can be modified freely until - the function returns. The stack space is not initialized. + is set to two, the scratch register set will be limited to + SLJIT_R0 and SLJIT_R1. The S registers and the floating point + registers ("fscratches" and "fsaveds") are specified in a + similar manner. The sljit_emit_enter is also capable of + allocating a stack space for local data. The "local_size" + argument contains the size in bytes of this local area, and + it can be accessed using SLJIT_MEM1(SLJIT_SP). The memory + area between SLJIT_SP (inclusive) and SLJIT_SP + local_size + (exclusive) can be modified freely until the function returns. + The stack space is not initialized to zero. Note: the following conditions must met: 0 <= scratches <= SLJIT_NUMBER_OF_REGISTERS @@ -683,9 +705,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) overwrites the previous context. */ -/* The compiled function uses cdecl calling - * convention instead of SLJIT_FUNC. */ -#define SLJIT_ENTER_CDECL 0x00000001 +/* Saved registers between SLJIT_S0 and SLJIT_S(n - 1) (inclusive) + are not saved / restored on function enter / return. Instead, + these registers can be used to pass / return data (such as + global / local context pointers) across function calls. The + value of n must be between 1 and 3. This option is only + supported by SLJIT_ENTER_REG_ARG calling convention. */ +#define SLJIT_ENTER_KEEP(n) (n) + +/* The compiled function uses an SLJIT specific register argument + calling convention. This is a lightweight function call type where + both the caller and the called functions must be compiled by + SLJIT. The type argument of the call must be SLJIT_CALL_REG_ARG + and all arguments must be stored in scratch registers. */ +#define SLJIT_ENTER_REG_ARG 0x00000004 /* The local_size must be >= 0 and <= SLJIT_MAX_LOCAL_SIZE. */ #define SLJIT_MAX_LOCAL_SIZE 65536 @@ -694,12 +727,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size); -/* The machine code has a context (which contains the local stack space size, - number of used registers, etc.) which initialized by sljit_emit_enter. Several - functions (such as sljit_emit_return) requres this context to be able to generate - the appropriate code. However, some code fragments (like inline cache) may have - no normal entry point so their context is unknown for the compiler. Their context - can be provided to the compiler by the sljit_set_context function. +/* The SLJIT compiler has a current context (which contains the local + stack space size, number of used registers, etc.) which is initialized + by sljit_emit_enter. Several functions (such as sljit_emit_return) + requires this context to be able to generate the appropriate code. + However, some code fragments (compiled separately) may have no + normal entry point so their context is unknown for the compiler. + + The sljit_set_context and sljit_emit_enter have the same arguments, + but sljit_set_context does not generate any machine code. Note: every call of sljit_emit_enter and sljit_set_context overwrites the previous context. */ @@ -708,16 +744,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size); -/* Return from machine code. The sljit_emit_return_void function does not return with - any value. The sljit_emit_return function returns with a single value which stores - the result of a data move instruction. The instruction is specified by the op - argument, and must be between SLJIT_MOV and SLJIT_MOV_P (see sljit_emit_op1). */ +/* Return to the caller function. The sljit_emit_return_void function + does not return with any value. The sljit_emit_return function returns + with a single value loaded from its source operand. The load operation + can be between SLJIT_MOV and SLJIT_MOV_P (see sljit_emit_op1) and + SLJIT_MOV_F32/SLJIT_MOV_F64 (see sljit_emit_fop1) depending on the + return value specified by sljit_emit_enter/sljit_set_context. */ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler); SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw); +/* Restores the saved registers and free the stack area, then the execution + continues from the address specified by the source operand. This + operation is similar to sljit_emit_return, but it ignores the return + address. The code where the exection continues should use the same context + as the caller function (see sljit_set_context). A word (pointer) value + can be passed in the SLJIT_RETURN_REG register. This function can be used + to jump to exception handlers. */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw); + /* Generating entry and exit points for fast call functions (see SLJIT_FAST_CALL). Both sljit_emit_fast_enter and SLJIT_FAST_RETURN operations preserve the values of all registers and stack frame. The return address is stored in the @@ -726,9 +775,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp Fast calls are cheap operations (usually only a single call instruction is emitted) but they do not preserve any registers. However the callee function - can freely use / update any registers and stack values which can be + can freely use / update any registers and the local area which can be efficiently exploited by various optimizations. Registers can be saved - manually by the callee function if needed. + and restored manually if needed. Although returning to different address by SLJIT_FAST_RETURN is possible, this address usually cannot be predicted by the return address predictor of @@ -743,16 +792,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler * /* Source and destination operands for arithmetical instructions imm - a simple immediate value (cannot be used as a destination) - reg - any of the registers (immediate argument must be 0) - [imm] - absolute immediate memory address + reg - any of the available registers (immediate argument must be 0) + [imm] - absolute memory address [reg+imm] - indirect memory address [reg+(reg<<imm)] - indirect indexed memory address (shift must be between 0 and 3) - useful for (byte, half, int, sljit_sw) array access - (fully supported by both x86 and ARM architectures, and cheap operation on others) + useful for accessing arrays (fully supported by both x86 and + ARM architectures, and cheap operation on others) */ /* - IMPORTANT NOTE: memory access MUST be naturally aligned unless + IMPORTANT NOTE: memory accesses MUST be naturally aligned unless SLJIT_UNALIGNED macro is defined and its value is 1. length | alignment @@ -792,8 +841,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler * Write-back is supported except for one instruction: 32 bit signed load with [reg+imm] addressing mode on 64 bit. mips: [reg+imm], -65536 <= imm <= 65535 - sparc: [reg+imm], -4096 <= imm <= 4095 - [reg+reg] is supported + Write-back is not supported + riscv: [reg+imm], -2048 <= imm <= 2047 + Write-back is not supported s390x: [reg+imm], -2^19 <= imm < 2^19 [reg+reg] is supported Write-back is not supported @@ -805,20 +855,22 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler * #define SLJIT_MEM1(r1) (SLJIT_MEM | (r1)) #define SLJIT_MEM2(r1, r2) (SLJIT_MEM | (r1) | ((r2) << 8)) #define SLJIT_IMM 0x40 +#define SLJIT_REG_PAIR(r1, r2) ((r1) | ((r2) << 8)) /* Sets 32 bit operation mode on 64 bit CPUs. This option is ignored on 32 bit CPUs. When this option is set for an arithmetic operation, only - the lower 32 bit of the input registers are used, and the CPU status + the lower 32 bits of the input registers are used, and the CPU status flags are set according to the 32 bit result. Although the higher 32 bit of the input and the result registers are not defined by SLJIT, it might be defined by the CPU architecture (e.g. MIPS). To satisfy these CPU requirements all source registers must be the result of those operations where this option was also set. Memory loads read 32 bit values rather than 64 bit ones. In other words 32 bit and 64 bit operations cannot be - mixed. The only exception is SLJIT_MOV32 whose source register can hold + mixed. The only exception is SLJIT_MOV32 which source register can hold any 32 or 64 bit value, and it is converted to a 32 bit compatible format - first. This conversion is free (no instructions are emitted) on most CPUs. - A 32 bit value can also be converted to a 64 bit value by SLJIT_MOV_S32 + first. When the source and destination registers are the same, this + conversion is free (no instructions are emitted) on most CPUs. A 32 bit + value can also be converted to a 64 bit value by SLJIT_MOV_S32 (sign extension) or SLJIT_MOV_U32 (zero extension). As for floating-point operations, this option sets 32 bit single @@ -835,18 +887,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler * SLJIT_ADD32 == (SLJIT_ADD | SLJIT_32) */ #define SLJIT_32 0x100 -/* Many CPUs (x86, ARM, PPC) have status flags which can be set according +/* Many CPUs (x86, ARM, PPC) have status flag bits which can be set according to the result of an operation. Other CPUs (MIPS) do not have status - flags, and results must be stored in registers. To cover both architecture - types efficiently only two flags are defined by SLJIT: + flag bits, and results must be stored in registers. To cover both + architecture types efficiently only two flags are defined by SLJIT: * Zero (equal) flag: it is set if the result is zero - * Variable flag: its value is defined by the last arithmetic operation + * Variable flag: its value is defined by the arithmetic operation SLJIT instructions can set any or both of these flags. The value of - these flags is undefined if the instruction does not specify their value. - The description of each instruction contains the list of allowed flag - types. + these flags is undefined if the instruction does not specify their + value. The description of each instruction contains the list of + allowed flag types. + + Note: the logical or operation can be used to set flags. Example: SLJIT_ADD can set the Z, OVERFLOW, CARRY flags hence @@ -867,32 +921,40 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler * Sets the variable flag if unsigned overflow (carry) occurs, clears it otherwise. - If an instruction (e.g. SLJIT_MOV) does not modify flags the flags are - unchanged. + Certain instructions (e.g. SLJIT_MOV) does not modify flags, so + status flags are unchanged. - Using these flags can reduce the number of emitted instructions. E.g. a - fast loop can be implemented by decreasing a counter register and set the - zero flag to jump back if the counter register has not reached zero. + Example: - Motivation: although CPUs can set a large number of flags, usually their - values are ignored or only one of them is used. Emulating a large number - of flags on systems without flag register is complicated so SLJIT - instructions must specify the flag they want to use and only that flag - will be emulated. The last arithmetic instruction can be repeated if + sljit_op2(..., SLJIT_ADD | SLJIT_SET_Z, ...) + sljit_op1(..., SLJIT_MOV, ...) + Zero flag is set according to the result of SLJIT_ADD. + + sljit_op2(..., SLJIT_ADD | SLJIT_SET_Z, ...) + sljit_op2(..., SLJIT_ADD, ...) + Zero flag has unknown value. + + These flags can be used for code optimization. E.g. a fast loop can be + implemented by decreasing a counter register and set the zero flag + using a single instruction. The zero register can be used by a + conditional jump to restart the loop. A single comparison can set a + zero and less flags to check if a value is less, equal, or greater + than another value. + + Motivation: although some CPUs can set a large number of flag bits, + usually their values are ignored or only a few of them are used. Emulating + a large number of flags on systems without a flag register is complicated + so SLJIT instructions must specify the flag they want to use and only + that flag is computed. The last arithmetic instruction can be repeated if multiple flags need to be checked. */ /* Set Zero status flag. */ #define SLJIT_SET_Z 0x0200 /* Set the variable status flag if condition is true. - See comparison types. */ + See comparison types (e.g. SLJIT_SET_LESS, SLJIT_SET_F_EQUAL). */ #define SLJIT_SET(condition) ((condition) << 10) -/* Notes: - - you cannot postpone conditional jump instructions except if noted that - the instruction does not set flags (See: SLJIT_KEEP_FLAGS). - - flag combinations: '|' means 'logical or'. */ - /* Starting index of opcodes for sljit_emit_op0. */ #define SLJIT_OP0_BASE 0 @@ -943,10 +1005,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler * /* Flags: - (does not modify flags) ENDBR32 instruction for x86-32 and ENDBR64 instruction for x86-64 when Intel Control-flow Enforcement Technology (CET) is enabled. - No instruction for other architectures. */ + No instructions are emitted for other architectures. */ #define SLJIT_ENDBR (SLJIT_OP0_BASE + 8) /* Flags: - (may destroy flags) - Skip stack frames before return. */ + Skip stack frames before return when Intel Control-flow + Enforcement Technology (CET) is enabled. No instructions + are emitted for other architectures. */ #define SLJIT_SKIP_FRAMES_BEFORE_RETURN (SLJIT_OP0_BASE + 9) SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op); @@ -990,9 +1054,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile /* Flags: - (does not modify flags) */ #define SLJIT_MOV32 (SLJIT_OP1_BASE + 7) /* Flags: - (does not modify flags) - Note: load a pointer sized data, useful on x32 (a 32 bit mode on x86-64 - where all x64 features are available, e.g. 16 register) or similar - compiling modes */ + Note: loads a pointer sized data, useful on x32 mode (a 64 bit mode + on x86-64 which uses 32 bit pointers) or similar compiling modes */ #define SLJIT_MOV_P (SLJIT_OP1_BASE + 8) /* Flags: Z Note: immediate source argument is not supported */ @@ -1003,6 +1066,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile Note: immediate source argument is not supported */ #define SLJIT_CLZ (SLJIT_OP1_BASE + 10) #define SLJIT_CLZ32 (SLJIT_CLZ | SLJIT_32) +/* Count trailing zeroes + Flags: - (may destroy flags) + Note: immediate source argument is not supported */ +#define SLJIT_CTZ (SLJIT_OP1_BASE + 11) +#define SLJIT_CTZ32 (SLJIT_CTZ | SLJIT_32) SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw dstw, @@ -1019,7 +1087,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile #define SLJIT_ADDC32 (SLJIT_ADDC | SLJIT_32) /* Flags: Z | LESS | GREATER_EQUAL | GREATER | LESS_EQUAL SIG_LESS | SIG_GREATER_EQUAL | SIG_GREATER - SIG_LESS_EQUAL | CARRY */ + SIG_LESS_EQUAL | OVERFLOW | CARRY */ #define SLJIT_SUB (SLJIT_OP2_BASE + 2) #define SLJIT_SUB32 (SLJIT_SUB | SLJIT_32) /* Flags: CARRY */ @@ -1046,31 +1114,100 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile #define SLJIT_SHL (SLJIT_OP2_BASE + 8) #define SLJIT_SHL32 (SLJIT_SHL | SLJIT_32) /* Flags: Z + Same as SLJIT_SHL, except the the second operand is + always masked by the length of the shift operation. */ +#define SLJIT_MSHL (SLJIT_OP2_BASE + 9) +#define SLJIT_MSHL32 (SLJIT_MSHL | SLJIT_32) +/* Flags: Z Let bit_length be the length of the shift operation: 32 or 64. If src2 is immediate, src2w is masked by (bit_length - 1). Otherwise, if the content of src2 is outside the range from 0 to bit_length - 1, the result is undefined. */ -#define SLJIT_LSHR (SLJIT_OP2_BASE + 9) +#define SLJIT_LSHR (SLJIT_OP2_BASE + 10) #define SLJIT_LSHR32 (SLJIT_LSHR | SLJIT_32) /* Flags: Z + Same as SLJIT_LSHR, except the the second operand is + always masked by the length of the shift operation. */ +#define SLJIT_MLSHR (SLJIT_OP2_BASE + 11) +#define SLJIT_MLSHR32 (SLJIT_MLSHR | SLJIT_32) +/* Flags: Z Let bit_length be the length of the shift operation: 32 or 64. If src2 is immediate, src2w is masked by (bit_length - 1). Otherwise, if the content of src2 is outside the range from 0 to bit_length - 1, the result is undefined. */ -#define SLJIT_ASHR (SLJIT_OP2_BASE + 10) +#define SLJIT_ASHR (SLJIT_OP2_BASE + 12) #define SLJIT_ASHR32 (SLJIT_ASHR | SLJIT_32) +/* Flags: Z + Same as SLJIT_ASHR, except the the second operand is + always masked by the length of the shift operation. */ +#define SLJIT_MASHR (SLJIT_OP2_BASE + 13) +#define SLJIT_MASHR32 (SLJIT_MASHR | SLJIT_32) +/* Flags: - (may destroy flags) + Let bit_length be the length of the rotate operation: 32 or 64. + The second operand is always masked by (bit_length - 1). */ +#define SLJIT_ROTL (SLJIT_OP2_BASE + 14) +#define SLJIT_ROTL32 (SLJIT_ROTL | SLJIT_32) +/* Flags: - (may destroy flags) + Let bit_length be the length of the rotate operation: 32 or 64. + The second operand is always masked by (bit_length - 1). */ +#define SLJIT_ROTR (SLJIT_OP2_BASE + 15) +#define SLJIT_ROTR32 (SLJIT_ROTR | SLJIT_32) SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw dstw, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w); -/* The sljit_emit_op2u function is the same as sljit_emit_op2 except the result is discarded. */ +/* The sljit_emit_op2u function is the same as sljit_emit_op2 + except the result is discarded. */ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w); +/* Emit a left or right shift operation, where the bits shifted + in comes from a separate source operand. All operands are + interpreted as unsigned integers. + + In the followings the value_mask variable is 31 for 32 bit + operations and word_size - 1 otherwise. + + op must be one of the following operations: + SLJIT_SHL or SLJIT_SHL32: + src_dst <<= src2 + src_dst |= ((src1 >> 1) >> (src2 ^ value_mask)) + SLJIT_MSHL or SLJIT_MSHL32: + src2 &= value_mask + perform the SLJIT_SHL or SLJIT_SHL32 operation + SLJIT_LSHR or SLJIT_LSHR32: + src_dst >>= src2 + src_dst |= ((src1 << 1) << (src2 ^ value_mask)) + SLJIT_MLSHR or SLJIT_MLSHR32: + src2 &= value_mask + perform the SLJIT_LSHR or SLJIT_LSHR32 operation + + op can be combined (or'ed) with SLJIT_SHIFT_INTO_NON_ZERO + + src_dst must be a register which content is updated after + the operation is completed + src1 / src1w contains the bits which shifted into src_dst + src2 / src2w contains the shift amount + + Note: a rotate operation can be performed if src_dst and + src1 are set to the same register + + Flags: - (may destroy flags) */ + +/* The src2 contains a non-zero value. Improves the generated + code on certain architectures, which provides a small + performance improvement. */ +#define SLJIT_SHIFT_INTO_NON_ZERO 0x200 + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w); + /* Starting index of opcodes for sljit_emit_op2. */ #define SLJIT_OP_SRC_BASE 128 @@ -1116,8 +1253,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp #define SLJIT_MOV_F64 (SLJIT_FOP1_BASE + 0) #define SLJIT_MOV_F32 (SLJIT_MOV_F64 | SLJIT_32) /* Convert opcodes: CONV[DST_TYPE].FROM[SRC_TYPE] - SRC/DST TYPE can be: D - double, S - single, W - signed word, I - signed int - Rounding mode when the destination is W or I: round towards zero. */ + SRC/DST TYPE can be: F64, F32, S32, SW + Rounding mode when the destination is SW or S32: round towards zero. */ /* Flags: - (may destroy flags) */ #define SLJIT_CONV_F64_FROM_F32 (SLJIT_FOP1_BASE + 1) #define SLJIT_CONV_F32_FROM_F64 (SLJIT_CONV_F64_FROM_F32 | SLJIT_32) @@ -1133,7 +1270,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp /* Flags: - (may destroy flags) */ #define SLJIT_CONV_F64_FROM_S32 (SLJIT_FOP1_BASE + 5) #define SLJIT_CONV_F32_FROM_S32 (SLJIT_CONV_F64_FROM_S32 | SLJIT_32) -/* Note: dst is the left and src is the right operand for SLJIT_CMPD. +/* Note: dst is the left and src is the right operand for SLJIT_CMP_F32/64. Flags: EQUAL_F | LESS_F | GREATER_EQUAL_F | GREATER_F | LESS_EQUAL_F */ #define SLJIT_CMP_F64 (SLJIT_FOP1_BASE + 6) #define SLJIT_CMP_F32 (SLJIT_CMP_F64 | SLJIT_32) @@ -1202,46 +1339,75 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi #define SLJIT_SET_OVERFLOW SLJIT_SET(SLJIT_OVERFLOW) #define SLJIT_NOT_OVERFLOW 11 -/* Unlike other flags, sljit_emit_jump may destroy this flag. */ +/* Unlike other flags, sljit_emit_jump may destroy the carry flag. */ #define SLJIT_CARRY 12 #define SLJIT_SET_CARRY SLJIT_SET(SLJIT_CARRY) #define SLJIT_NOT_CARRY 13 -/* Floating point comparison types. */ -#define SLJIT_EQUAL_F64 14 -#define SLJIT_EQUAL_F32 (SLJIT_EQUAL_F64 | SLJIT_32) -#define SLJIT_SET_EQUAL_F SLJIT_SET(SLJIT_EQUAL_F64) -#define SLJIT_NOT_EQUAL_F64 15 -#define SLJIT_NOT_EQUAL_F32 (SLJIT_NOT_EQUAL_F64 | SLJIT_32) -#define SLJIT_SET_NOT_EQUAL_F SLJIT_SET(SLJIT_NOT_EQUAL_F64) -#define SLJIT_LESS_F64 16 -#define SLJIT_LESS_F32 (SLJIT_LESS_F64 | SLJIT_32) -#define SLJIT_SET_LESS_F SLJIT_SET(SLJIT_LESS_F64) -#define SLJIT_GREATER_EQUAL_F64 17 -#define SLJIT_GREATER_EQUAL_F32 (SLJIT_GREATER_EQUAL_F64 | SLJIT_32) -#define SLJIT_SET_GREATER_EQUAL_F SLJIT_SET(SLJIT_GREATER_EQUAL_F64) -#define SLJIT_GREATER_F64 18 -#define SLJIT_GREATER_F32 (SLJIT_GREATER_F64 | SLJIT_32) -#define SLJIT_SET_GREATER_F SLJIT_SET(SLJIT_GREATER_F64) -#define SLJIT_LESS_EQUAL_F64 19 -#define SLJIT_LESS_EQUAL_F32 (SLJIT_LESS_EQUAL_F64 | SLJIT_32) -#define SLJIT_SET_LESS_EQUAL_F SLJIT_SET(SLJIT_LESS_EQUAL_F64) -#define SLJIT_UNORDERED_F64 20 -#define SLJIT_UNORDERED_F32 (SLJIT_UNORDERED_F64 | SLJIT_32) -#define SLJIT_SET_UNORDERED_F SLJIT_SET(SLJIT_UNORDERED_F64) -#define SLJIT_ORDERED_F64 21 -#define SLJIT_ORDERED_F32 (SLJIT_ORDERED_F64 | SLJIT_32) -#define SLJIT_SET_ORDERED_F SLJIT_SET(SLJIT_ORDERED_F64) +/* Basic floating point comparison types. + + Note: when the comparison result is unordered, their behaviour is unspecified. */ + +#define SLJIT_F_EQUAL 14 +#define SLJIT_SET_F_EQUAL SLJIT_SET(SLJIT_F_EQUAL) +#define SLJIT_F_NOT_EQUAL 15 +#define SLJIT_SET_F_NOT_EQUAL SLJIT_SET(SLJIT_F_NOT_EQUAL) +#define SLJIT_F_LESS 16 +#define SLJIT_SET_F_LESS SLJIT_SET(SLJIT_F_LESS) +#define SLJIT_F_GREATER_EQUAL 17 +#define SLJIT_SET_F_GREATER_EQUAL SLJIT_SET(SLJIT_F_GREATER_EQUAL) +#define SLJIT_F_GREATER 18 +#define SLJIT_SET_F_GREATER SLJIT_SET(SLJIT_F_GREATER) +#define SLJIT_F_LESS_EQUAL 19 +#define SLJIT_SET_F_LESS_EQUAL SLJIT_SET(SLJIT_F_LESS_EQUAL) + +/* Jumps when either argument contains a NaN value. */ +#define SLJIT_UNORDERED 20 +#define SLJIT_SET_UNORDERED SLJIT_SET(SLJIT_UNORDERED) +/* Jumps when neither argument contains a NaN value. */ +#define SLJIT_ORDERED 21 +#define SLJIT_SET_ORDERED SLJIT_SET(SLJIT_ORDERED) + +/* Ordered / unordered floating point comparison types. + + Note: each comparison type has an ordered and unordered form. Some + architectures supports only either of them (see: sljit_cmp_info). */ + +#define SLJIT_ORDERED_EQUAL 22 +#define SLJIT_SET_ORDERED_EQUAL SLJIT_SET(SLJIT_ORDERED_EQUAL) +#define SLJIT_UNORDERED_OR_NOT_EQUAL 23 +#define SLJIT_SET_UNORDERED_OR_NOT_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_NOT_EQUAL) +#define SLJIT_ORDERED_LESS 24 +#define SLJIT_SET_ORDERED_LESS SLJIT_SET(SLJIT_ORDERED_LESS) +#define SLJIT_UNORDERED_OR_GREATER_EQUAL 25 +#define SLJIT_SET_UNORDERED_OR_GREATER_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_GREATER_EQUAL) +#define SLJIT_ORDERED_GREATER 26 +#define SLJIT_SET_ORDERED_GREATER SLJIT_SET(SLJIT_ORDERED_GREATER) +#define SLJIT_UNORDERED_OR_LESS_EQUAL 27 +#define SLJIT_SET_UNORDERED_OR_LESS_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_LESS_EQUAL) + +#define SLJIT_UNORDERED_OR_EQUAL 28 +#define SLJIT_SET_UNORDERED_OR_EQUAL SLJIT_SET(SLJIT_UNORDERED_OR_EQUAL) +#define SLJIT_ORDERED_NOT_EQUAL 29 +#define SLJIT_SET_ORDERED_NOT_EQUAL SLJIT_SET(SLJIT_ORDERED_NOT_EQUAL) +#define SLJIT_UNORDERED_OR_LESS 30 +#define SLJIT_SET_UNORDERED_OR_LESS SLJIT_SET(SLJIT_UNORDERED_OR_LESS) +#define SLJIT_ORDERED_GREATER_EQUAL 31 +#define SLJIT_SET_ORDERED_GREATER_EQUAL SLJIT_SET(SLJIT_ORDERED_GREATER_EQUAL) +#define SLJIT_UNORDERED_OR_GREATER 32 +#define SLJIT_SET_UNORDERED_OR_GREATER SLJIT_SET(SLJIT_UNORDERED_OR_GREATER) +#define SLJIT_ORDERED_LESS_EQUAL 33 +#define SLJIT_SET_ORDERED_LESS_EQUAL SLJIT_SET(SLJIT_ORDERED_LESS_EQUAL) /* Unconditional jump types. */ -#define SLJIT_JUMP 22 - /* Fast calling method. See sljit_emit_fast_enter / SLJIT_FAST_RETURN. */ -#define SLJIT_FAST_CALL 23 - /* Called function must be declared with the SLJIT_FUNC attribute. */ -#define SLJIT_CALL 24 - /* Called function must be declared with cdecl attribute. - This is the default attribute for C functions. */ -#define SLJIT_CALL_CDECL 25 +#define SLJIT_JUMP 34 +/* Fast calling method. See sljit_emit_fast_enter / SLJIT_FAST_RETURN. */ +#define SLJIT_FAST_CALL 35 +/* Default C calling convention. */ +#define SLJIT_CALL 36 +/* Called function must be compiled by SLJIT. + See SLJIT_ENTER_REG_ARG option. */ +#define SLJIT_CALL_REG_ARG 37 /* The target can be changed during runtime (see: sljit_set_jump_addr). */ #define SLJIT_REWRITABLE_JUMP 0x1000 @@ -1249,11 +1415,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi the called function returns to the caller of the current function. The stack usage is reduced before the call, but it is not necessarily reduced to zero. In the latter case the compiler needs to allocate space for some - arguments and the return register must be kept as well. - - This feature is highly experimental and not supported on SPARC platform - at the moment. */ -#define SLJIT_CALL_RETURN 0x2000 + arguments and the return address must be stored on the stack as well. */ +#define SLJIT_CALL_RETURN 0x2000 /* Emit a jump instruction. The destination is not set, only the type of the jump. type must be between SLJIT_EQUAL and SLJIT_FAST_CALL @@ -1263,18 +1426,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type); /* Emit a C compiler (ABI) compatible function call. - type must be SLJIT_CALL or SLJIT_CALL_CDECL - type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP and SLJIT_CALL_RETURN - arg_types is the combination of SLJIT_RET / SLJIT_ARGx (SLJIT_DEF_RET / SLJIT_DEF_ARGx) macros + type must be SLJIT_CALL or SLJIT_CALL_REG_ARG + type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP and/or SLJIT_CALL_RETURN + arg_types can be specified by SLJIT_ARGSx (SLJIT_ARG_RETURN / SLJIT_ARG_VALUE) macros Flags: destroy all flags. */ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types); /* Basic arithmetic comparison. In most architectures it is implemented as - an compare operation followed by a sljit_emit_jump. However some - architectures (i.e: ARM64 or MIPS) may employ special optimizations here. - It is suggested to use this comparison form when appropriate. - type must be between SLJIT_EQUAL and SLJIT_I_SIG_LESS_EQUAL + a compare operation followed by a sljit_emit_jump. However some + architectures (i.e: ARM64 or MIPS) may employ special optimizations + here. It is suggested to use this comparison form when appropriate. + type must be between SLJIT_EQUAL and SLJIT_SIG_LESS_EQUAL type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP Flags: may destroy flags. */ @@ -1283,15 +1446,14 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler sljit_s32 src2, sljit_sw src2w); /* Basic floating point comparison. In most architectures it is implemented as - an SLJIT_FCMP operation (setting appropriate flags) followed by a + a SLJIT_CMP_F32/64 operation (setting appropriate flags) followed by a sljit_emit_jump. However some architectures (i.e: MIPS) may employ special optimizations here. It is suggested to use this comparison form when appropriate. - type must be between SLJIT_EQUAL_F64 and SLJIT_ORDERED_F32 + type must be between SLJIT_F_EQUAL and SLJIT_ORDERED_LESS_EQUAL type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP Flags: destroy flags. - Note: if either operand is NaN, the behaviour is undefined for - types up to SLJIT_S_LESS_EQUAL. */ + Note: when an operand is NaN the behaviour depends on the comparison type. */ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w); @@ -1312,22 +1474,22 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi /* Emit a C compiler (ABI) compatible function call. Direct form: set src to SLJIT_IMM() and srcw to the address Indirect form: any other valid addressing mode - type must be SLJIT_CALL or SLJIT_CALL_CDECL + type must be SLJIT_CALL or SLJIT_CALL_REG_ARG type can be combined (or'ed) with SLJIT_CALL_RETURN - arg_types is the combination of SLJIT_RET / SLJIT_ARGx (SLJIT_DEF_RET / SLJIT_DEF_ARGx) macros + arg_types can be specified by SLJIT_ARGSx (SLJIT_ARG_RETURN / SLJIT_ARG_VALUE) macros Flags: destroy all flags. */ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types, sljit_s32 src, sljit_sw srcw); -/* Perform the operation using the conditional flags as the second argument. - Type must always be between SLJIT_EQUAL and SLJIT_ORDERED_F64. The value - represented by the type is 1, if the condition represented by the type - is fulfilled, and 0 otherwise. +/* Perform an operation using the conditional flags as the second argument. + Type must always be between SLJIT_EQUAL and SLJIT_ORDERED_LESS_EQUAL. + The value represented by the type is 1, if the condition represented + by the type is fulfilled, and 0 otherwise. - If op == SLJIT_MOV, SLJIT_MOV32: + When op is SLJIT_MOV or SLJIT_MOV32: Set dst to the value represented by the type (0 or 1). Flags: - (does not modify flags) - If op == SLJIT_OR, op == SLJIT_AND, op == SLJIT_XOR + When op is SLJIT_AND, SLJIT_AND32, SLJIT_OR, SLJIT_OR32, SLJIT_XOR, or SLJIT_XOR32 Performs the binary operation using dst as the first, and the value represented by type as the second argument. Result is written into dst. Flags: Z (may destroy flags) */ @@ -1339,69 +1501,139 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co if the condition is satisfied. Unlike other arithmetic operations this instruction does not support memory access. - type must be between SLJIT_EQUAL and SLJIT_ORDERED_F64 - dst_reg must be a valid register and it can be combined - with SLJIT_32 to perform a 32 bit arithmetic operation - src must be register or immediate (SLJIT_IMM) + type must be between SLJIT_EQUAL and SLJIT_ORDERED_LESS_EQUAL + type can be combined (or'ed) with SLJIT_32 + dst_reg must be a valid register + src must be a valid register or immediate (SLJIT_IMM) Flags: - (does not modify flags) */ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 dst_reg, sljit_s32 src, sljit_sw srcw); +/* The following flags are used by sljit_emit_mem(), sljit_emit_mem_update(), + sljit_emit_fmem(), and sljit_emit_fmem_update(). */ + +/* Memory load operation. This is the default. */ +#define SLJIT_MEM_LOAD 0x000000 +/* Memory store operation. */ +#define SLJIT_MEM_STORE 0x000200 + /* The following flags are used by sljit_emit_mem() and sljit_emit_fmem(). */ +/* Load or stora data from an unaligned (byte aligned) address. */ +#define SLJIT_MEM_UNALIGNED 0x000400 +/* Load or stora data from a 16 bit aligned address. */ +#define SLJIT_MEM_UNALIGNED_16 0x000800 +/* Load or stora data from a 32 bit aligned address. */ +#define SLJIT_MEM_UNALIGNED_32 0x001000 + +/* The following flags are used by sljit_emit_mem_update(), + and sljit_emit_fmem_update(). */ + +/* Base register is updated before the memory access (default). */ +#define SLJIT_MEM_PRE 0x000000 +/* Base register is updated after the memory access. */ +#define SLJIT_MEM_POST 0x000400 + /* When SLJIT_MEM_SUPP is passed, no instructions are emitted. Instead the function returns with SLJIT_SUCCESS if the instruction form is supported and SLJIT_ERR_UNSUPPORTED otherwise. This flag allows runtime checking of available instruction forms. */ -#define SLJIT_MEM_SUPP 0x0200 -/* Memory load operation. This is the default. */ -#define SLJIT_MEM_LOAD 0x0000 -/* Memory store operation. */ -#define SLJIT_MEM_STORE 0x0400 -/* Base register is updated before the memory access. */ -#define SLJIT_MEM_PRE 0x0800 -/* Base register is updated after the memory access. */ -#define SLJIT_MEM_POST 0x1000 - -/* Emit a single memory load or store with update instruction. When the - requested instruction form is not supported by the CPU, it returns - with SLJIT_ERR_UNSUPPORTED instead of emulating the instruction. This - allows specializing tight loops based on the supported instruction - forms (see SLJIT_MEM_SUPP flag). +#define SLJIT_MEM_SUPP 0x000800 + +/* The sljit_emit_mem emits instructions for various memory operations: + + When SLJIT_MEM_UNALIGNED / SLJIT_MEM_UNALIGNED_16 / + SLJIT_MEM_UNALIGNED_32 is set in type argument: + Emit instructions for unaligned memory loads or stores. When + SLJIT_UNALIGNED is not defined, the only way to access unaligned + memory data is using sljit_emit_mem. Otherwise all operations (e.g. + sljit_emit_op1/2, or sljit_emit_fop1/2) supports unaligned access. + In general, the performance of unaligned memory accesses are often + lower than aligned and should be avoided. + + When a pair of registers is passed in reg argument: + Emit instructions for moving data between a register pair and + memory. The register pair can be specified by the SLJIT_REG_PAIR + macro. The first register is loaded from or stored into the + location specified by the mem/memw arguments, and the end address + of this operation is the starting address of the data transfer + between the second register and memory. The type argument must + be SLJIT_MOV. The SLJIT_MEM_UNALIGNED* options are allowed for + this operation. type must be between SLJIT_MOV and SLJIT_MOV_P and can be - combined with SLJIT_MEM_* flags. Either SLJIT_MEM_PRE - or SLJIT_MEM_POST must be specified. - reg is the source or destination register, and must be - different from the base register of the mem operand - mem must be a SLJIT_MEM1() or SLJIT_MEM2() operand + combined (or'ed) with SLJIT_MEM_* flags + reg is a register or register pair, which is the source or + destination of the operation + mem must be a memory operand Flags: - (does not modify flags) */ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw); +/* Emit a single memory load or store with update instruction. + When the requested instruction form is not supported by the CPU, + it returns with SLJIT_ERR_UNSUPPORTED instead of emulating the + instruction. This allows specializing tight loops based on + the supported instruction forms (see SLJIT_MEM_SUPP flag). + Absolute address (SLJIT_MEM0) forms are never supported + and the base (first) register specified by the mem argument + must not be SLJIT_SP and must also be different from the + register specified by the reg argument. + + type must be between SLJIT_MOV and SLJIT_MOV_P and can be + combined (or'ed) with SLJIT_MEM_* flags + reg is the source or destination register of the operation + mem must be a memory operand + + Flags: - (does not modify flags) */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw); + /* Same as sljit_emit_mem except the followings: + Loading or storing a pair of registers is not supported. + type must be SLJIT_MOV_F64 or SLJIT_MOV_F32 and can be - combined with SLJIT_MEM_* flags. Either SLJIT_MEM_PRE - or SLJIT_MEM_POST must be specified. - freg is the source or destination floating point register */ + combined (or'ed) with SLJIT_MEM_* flags. + freg is the source or destination floating point register + of the operation + mem must be a memory operand + + Flags: - (does not modify flags) */ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw); -/* Copies the base address of SLJIT_SP + offset to dst. The offset can be - anything to negate the effect of relative addressing. For example if an - array of sljit_sw values is stored on the stack from offset 0x40, and R0 - contains the offset of an array item plus 0x120, this item can be - overwritten by two SLJIT instructions: +/* Same as sljit_emit_mem_update except the followings: + + type must be SLJIT_MOV_F64 or SLJIT_MOV_F32 and can be + combined (or'ed) with SLJIT_MEM_* flags + freg is the source or destination floating point register + of the operation + mem must be a memory operand + + Flags: - (does not modify flags) */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw); + +/* Copies the base address of SLJIT_SP + offset to dst. The offset can + represent the starting address of a value in the local data (stack). + The offset is not limited by the local data limits, it can be any value. + For example if an array of bytes are stored on the stack from + offset 0x40, and R0 contains the offset of an array item plus 0x120, + this item can be changed by two SLJIT instructions: sljit_get_local_base(compiler, SLJIT_R1, 0, 0x40 - 0x120); - sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_MEM2(SLJIT_R1, SLJIT_R0), 0, SLJIT_IMM, 0x5); + sljit_emit_op1(compiler, SLJIT_MOV_U8, SLJIT_MEM2(SLJIT_R1, SLJIT_R0), 0, SLJIT_IMM, 0x5); Flags: - (may destroy flags) */ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset); @@ -1430,15 +1662,67 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset); /* --------------------------------------------------------------------- */ -/* Miscellaneous utility functions */ +/* CPU specific functions */ /* --------------------------------------------------------------------- */ -#define SLJIT_MAJOR_VERSION 0 -#define SLJIT_MINOR_VERSION 94 +/* The following function is a helper function for sljit_emit_op_custom. + It returns with the real machine register index ( >=0 ) of any SLJIT_R, + SLJIT_S and SLJIT_SP registers. + + Note: it returns with -1 for virtual registers (only on x86-32). */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg); + +/* The following function is a helper function for sljit_emit_op_custom. + It returns with the real machine register ( >= 0 ) index of any SLJIT_FR, + and SLJIT_FS register. + + Note: the index is always an even number on ARM-32, MIPS. */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg); + +/* Any instruction can be inserted into the instruction stream by + sljit_emit_op_custom. It has a similar purpose as inline assembly. + The size parameter must match to the instruction size of the target + architecture: + + x86: 0 < size <= 15. The instruction argument can be byte aligned. + Thumb2: if size == 2, the instruction argument must be 2 byte aligned. + if size == 4, the instruction argument must be 4 byte aligned. + Otherwise: size must be 4 and instruction argument must be 4 byte aligned. */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_u32 size); + +/* Flags were set by a 32 bit operation. */ +#define SLJIT_CURRENT_FLAGS_32 SLJIT_32 + +/* Flags were set by an ADD or ADDC operations. */ +#define SLJIT_CURRENT_FLAGS_ADD 0x01 +/* Flags were set by a SUB, SUBC, or NEG operation. */ +#define SLJIT_CURRENT_FLAGS_SUB 0x02 + +/* Flags were set by sljit_emit_op2u with SLJIT_SUB opcode. + Must be combined with SLJIT_CURRENT_FLAGS_SUB. */ +#define SLJIT_CURRENT_FLAGS_COMPARE 0x04 + +/* Define the currently available CPU status flags. It is usually used after + an sljit_emit_label or sljit_emit_op_custom operations to define which CPU + status flags are available. + + The current_flags must be a valid combination of SLJIT_SET_* and + SLJIT_CURRENT_FLAGS_* constants. */ + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler, + sljit_s32 current_flags); + +/* --------------------------------------------------------------------- */ +/* Miscellaneous utility functions */ +/* --------------------------------------------------------------------- */ /* Get the human readable name of the platform. Can be useful on platforms - like ARM, where ARM and Thumb2 functions can be mixed, and - it is useful to know the type of the code generator. */ + like ARM, where ARM and Thumb2 functions can be mixed, and it is useful + to know the type of the code generator. */ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void); /* Portable helper function to get an offset of a member. */ @@ -1532,60 +1816,6 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void); #endif -/* --------------------------------------------------------------------- */ -/* CPU specific functions */ -/* --------------------------------------------------------------------- */ - -/* The following function is a helper function for sljit_emit_op_custom. - It returns with the real machine register index ( >=0 ) of any SLJIT_R, - SLJIT_S and SLJIT_SP registers. - - Note: it returns with -1 for virtual registers (only on x86-32). */ - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg); - -/* The following function is a helper function for sljit_emit_op_custom. - It returns with the real machine register index of any SLJIT_FLOAT register. - - Note: the index is always an even number on ARM (except ARM-64), MIPS, and SPARC. */ - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg); - -/* Any instruction can be inserted into the instruction stream by - sljit_emit_op_custom. It has a similar purpose as inline assembly. - The size parameter must match to the instruction size of the target - architecture: - - x86: 0 < size <= 15. The instruction argument can be byte aligned. - Thumb2: if size == 2, the instruction argument must be 2 byte aligned. - if size == 4, the instruction argument must be 4 byte aligned. - Otherwise: size must be 4 and instruction argument must be 4 byte aligned. */ - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_u32 size); - -/* Flags were set by a 32 bit operation. */ -#define SLJIT_CURRENT_FLAGS_32 SLJIT_32 - -/* Flags were set by an ADD or ADDC operations. */ -#define SLJIT_CURRENT_FLAGS_ADD 0x01 -/* Flags were set by a SUB, SUBC, or NEG operation. */ -#define SLJIT_CURRENT_FLAGS_SUB 0x02 - -/* Flags were set by sljit_emit_op2u with SLJIT_SUB opcode. - Must be combined with SLJIT_CURRENT_FLAGS_SUB. */ -#define SLJIT_CURRENT_FLAGS_COMPARE 0x04 - -/* Define the currently available CPU status flags. It is usually used after - an sljit_emit_label or sljit_emit_op_custom operations to define which CPU - status flags are available. - - The current_flags must be a valid combination of SLJIT_SET_* and - SLJIT_CURRENT_FLAGS_* constants. */ - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_current_flags(struct sljit_compiler *compiler, - sljit_s32 current_flags); - #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/thirdparty/pcre2/src/sljit/sljitNativeARM_32.c b/thirdparty/pcre2/src/sljit/sljitNativeARM_32.c index 7b87f5907a..54b8ade063 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeARM_32.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeARM_32.c @@ -100,6 +100,8 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define CMP 0xe1400000 #define BKPT 0xe1200070 #define EOR 0xe0200000 +#define LDR 0xe5100000 +#define LDR_POST 0xe4100000 #define MOV 0xe1a00000 #define MUL 0xe0000090 #define MVN 0xe1e00000 @@ -107,10 +109,12 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define ORR 0xe1800000 #define PUSH 0xe92d0000 #define POP 0xe8bd0000 +#define RBIT 0xe6ff0f30 #define RSB 0xe0600000 #define RSC 0xe0e00000 #define SBC 0xe0c00000 #define SMULL 0xe0c00090 +#define STR 0xe5000000 #define SUB 0xe0400000 #define TST 0xe1000000 #define UMULL 0xe0800090 @@ -564,6 +568,7 @@ static SLJIT_INLINE void inline_set_jump_addr(sljit_uw jump_ptr, sljit_sw execut static sljit_uw get_imm(sljit_uw imm); static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_uw imm); +static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg); static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw executable_offset, sljit_uw new_constant, sljit_s32 flush_cache) { @@ -955,12 +960,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) #endif case SLJIT_HAS_CLZ: + case SLJIT_HAS_ROT: case SLJIT_HAS_CMOV: #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + case SLJIT_HAS_CTZ: case SLJIT_HAS_PREFETCH: #endif return 1; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + case SLJIT_HAS_CTZ: + return 2; +#endif + default: return 0; } @@ -1049,7 +1061,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { sljit_uw imm, offset; - sljit_s32 i, tmp, size, word_arg_count, saved_arg_count; + sljit_s32 i, tmp, size, word_arg_count; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); #ifdef __SOFTFP__ sljit_u32 float_arg_count; #else @@ -1065,7 +1078,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi imm = 0; tmp = SLJIT_S0 - saveds; - for (i = SLJIT_S0; i > tmp; i--) + for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) imm |= (sljit_uw)1 << reg_map[i]; for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) @@ -1082,7 +1095,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi FAIL_IF(push_inst(compiler, 0xe52d0004 | RD(TMP_REG2))); /* Stack must be aligned to 8 bytes: */ - size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1); if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) { if ((size & SSIZE_OF(sw)) != 0) { @@ -1103,6 +1116,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi local_size = ((size + local_size + 0x7) & ~0x7) - size; compiler->local_size = local_size; + if (options & SLJIT_ENTER_REG_ARG) + arg_types = 0; + arg_types >>= SLJIT_ARG_SHIFT; word_arg_count = 0; saved_arg_count = 0; @@ -1148,8 +1164,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi if (offset < 4 * sizeof(sljit_sw)) FAIL_IF(push_inst(compiler, MOV | RD(tmp) | (offset >> 2))); else - FAIL_IF(push_inst(compiler, data_transfer_insts[WORD_SIZE | LOAD_DATA] | 0x800000 - | RN(SLJIT_SP) | RD(tmp) | (offset + (sljit_uw)size - 4 * sizeof(sljit_sw)))); + FAIL_IF(push_inst(compiler, LDR | 0x800000 | RN(SLJIT_SP) | RD(tmp) | (offset + (sljit_uw)size - 4 * sizeof(sljit_sw)))); break; } @@ -1217,7 +1232,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1); if ((size & SSIZE_OF(sw)) != 0 && (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)) size += SSIZE_OF(sw); @@ -1231,8 +1246,11 @@ static sljit_s32 emit_add_sp(struct sljit_compiler *compiler, sljit_uw imm) sljit_uw imm2 = get_imm(imm); if (imm2 == 0) { - FAIL_IF(load_immediate(compiler, TMP_REG2, imm)); - imm2 = RM(TMP_REG2); + imm2 = (imm & ~(sljit_uw)0x3ff) >> 10; + imm = (imm & 0x3ff) >> 2; + + FAIL_IF(push_inst(compiler, ADD | SRC2_IMM | RD(SLJIT_SP) | RN(SLJIT_SP) | 0xb00 | imm2)); + return push_inst(compiler, ADD | SRC2_IMM | RD(SLJIT_SP) | RN(SLJIT_SP) | 0xf00 | (imm & 0xff)); } return push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | imm2); @@ -1241,10 +1259,11 @@ static sljit_s32 emit_add_sp(struct sljit_compiler *compiler, sljit_uw imm) static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size) { sljit_s32 local_size, fscratches, fsaveds, i, tmp; + sljit_s32 restored_reg = 0; sljit_s32 lr_dst = TMP_PC; - sljit_uw reg_list; + sljit_uw reg_list = 0; - SLJIT_ASSERT(reg_map[TMP_REG2] == 14); + SLJIT_ASSERT(reg_map[TMP_REG2] == 14 && frame_size <= 128); local_size = compiler->local_size; fscratches = compiler->fscratches; @@ -1269,45 +1288,84 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit if (frame_size < 0) { lr_dst = TMP_REG2; frame_size = 0; - } else if (frame_size > 0) + } else if (frame_size > 0) { + SLJIT_ASSERT(frame_size == 1 || (frame_size & 0x7) == 0); lr_dst = 0; + frame_size &= ~0x7; + } - reg_list = 0; if (lr_dst != 0) reg_list |= (sljit_uw)1 << reg_map[lr_dst]; tmp = SLJIT_S0 - compiler->saveds; - for (i = SLJIT_S0; i > tmp; i--) - reg_list |= (sljit_uw)1 << reg_map[i]; + i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); + if (tmp < i) { + restored_reg = i; + do { + reg_list |= (sljit_uw)1 << reg_map[i]; + } while (--i > tmp); + } - for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) - reg_list |= (sljit_uw)1 << reg_map[i]; + i = compiler->scratches; + if (i >= SLJIT_FIRST_SAVED_REG) { + restored_reg = i; + do { + reg_list |= (sljit_uw)1 << reg_map[i]; + } while (--i >= SLJIT_FIRST_SAVED_REG); + } + + if (lr_dst == TMP_REG2 && reg_list == 0) { + restored_reg = TMP_REG2; + lr_dst = 0; + } if (lr_dst == 0 && (reg_list & (reg_list - 1)) == 0) { /* The local_size does not include the saved registers. */ - local_size += SSIZE_OF(sw); + tmp = 0; + if (reg_list != 0) { + tmp = 2; + if (local_size <= 0xfff) { + if (local_size == 0) { + SLJIT_ASSERT(restored_reg != TMP_REG2); + if (frame_size == 0) + return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(restored_reg) | 0x800008); + if (frame_size > 2 * SSIZE_OF(sw)) + return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(restored_reg) | (sljit_uw)(frame_size - (2 * SSIZE_OF(sw)))); + } - if (reg_list != 0) - local_size += SSIZE_OF(sw); + FAIL_IF(push_inst(compiler, LDR | 0x800000 | RN(SLJIT_SP) | RD(restored_reg) | (sljit_uw)local_size)); + tmp = 1; + } else if (frame_size == 0) { + frame_size = (restored_reg == TMP_REG2) ? SSIZE_OF(sw) : 2 * SSIZE_OF(sw); + tmp = 3; + } + + /* Place for the saved register. */ + if (restored_reg != TMP_REG2) + local_size += SSIZE_OF(sw); + } + + /* Place for the lr register. */ + local_size += SSIZE_OF(sw); if (frame_size > local_size) FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 25) | (sljit_uw)(frame_size - local_size))); else if (frame_size < local_size) FAIL_IF(emit_add_sp(compiler, (sljit_uw)(local_size - frame_size))); - if (reg_list == 0) + if (tmp <= 1) return SLJIT_SUCCESS; - if (compiler->saveds > 0) { - SLJIT_ASSERT(reg_list == ((sljit_uw)1 << reg_map[SLJIT_S0])); - lr_dst = SLJIT_S0; - } else { - SLJIT_ASSERT(reg_list == ((sljit_uw)1 << reg_map[SLJIT_FIRST_SAVED_REG])); - lr_dst = SLJIT_FIRST_SAVED_REG; + if (tmp == 2) { + frame_size -= SSIZE_OF(sw); + if (restored_reg != TMP_REG2) + frame_size -= SSIZE_OF(sw); + + return push_inst(compiler, LDR | 0x800000 | RN(SLJIT_SP) | RD(restored_reg) | (sljit_uw)frame_size); } - return push_inst(compiler, data_transfer_insts[WORD_SIZE | LOAD_DATA] | 0x800000 - | RN(SLJIT_SP) | RD(lr_dst) | (sljit_uw)(frame_size - 2 * SSIZE_OF(sw))); + tmp = (restored_reg == TMP_REG2) ? 0x800004 : 0x800008; + return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(restored_reg) | (sljit_uw)tmp); } if (local_size > 0) @@ -1320,13 +1378,18 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit SLJIT_ASSERT(lr_dst != 0); SLJIT_ASSERT(reg_list == (sljit_uw)1 << reg_map[lr_dst]); - return push_inst(compiler, 0xe49d0004 | RD(lr_dst)); + return push_inst(compiler, LDR_POST | RN(SLJIT_SP) | RD(lr_dst) | 0x800004); } FAIL_IF(push_inst(compiler, POP | reg_list)); + if (frame_size > 0) return push_inst(compiler, SUB | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 25) | ((sljit_uw)frame_size - sizeof(sljit_sw))); - return SLJIT_SUCCESS; + + if (lr_dst != 0) + return SLJIT_SUCCESS; + + return push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 25) | sizeof(sljit_sw)); } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) @@ -1337,28 +1400,38 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler return emit_stack_frame_release(compiler, 0); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + srcw = 0; + } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src))); + src = TMP_REG1; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 1)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} + /* --------------------------------------------------------------------- */ /* Operators */ /* --------------------------------------------------------------------- */ -#define EMIT_SHIFT_INS_AND_RETURN(opcode) \ - SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); \ - if (compiler->shift_imm != 0x20) { \ - SLJIT_ASSERT(src1 == TMP_REG1); \ - SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); \ - \ - if (compiler->shift_imm != 0) \ - return push_inst(compiler, MOV | (flags & SET_FLAGS) | \ - RD(dst) | (compiler->shift_imm << 7) | (opcode << 5) | RM(src2)); \ - return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | RM(src2)); \ - } \ - return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) \ - | RM8((flags & ARGS_SWAPPED) ? src1 : src2) | (sljit_uw)(opcode << 5) \ - | 0x10 | RM((flags & ARGS_SWAPPED) ? src2 : src1)); - static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, sljit_uw dst, sljit_uw src1, sljit_uw src2) { + sljit_s32 is_masked; + sljit_uw shift_type; + switch (GET_OPCODE(op)) { case SLJIT_MOV: SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); @@ -1413,11 +1486,24 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl return push_inst(compiler, MVN | (flags & SET_FLAGS) | RD(dst) | RM(src2)); case SLJIT_CLZ: - SLJIT_ASSERT(!(flags & INV_IMM)); - SLJIT_ASSERT(!(src2 & SRC2_IMM)); + SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); FAIL_IF(push_inst(compiler, CLZ | RD(dst) | RM(src2))); return SLJIT_SUCCESS; + case SLJIT_CTZ: + SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + FAIL_IF(push_inst(compiler, RSB | SRC2_IMM | RD(TMP_REG1) | RN(src2) | 0)); + FAIL_IF(push_inst(compiler, AND | RD(TMP_REG2) | RN(src2) | RM(TMP_REG1))); + FAIL_IF(push_inst(compiler, CLZ | RD(dst) | RM(TMP_REG2))); + FAIL_IF(push_inst(compiler, CMP | SET_FLAGS | SRC2_IMM | RN(dst) | 32)); + return push_inst(compiler, (EOR ^ 0xf0000000) | SRC2_IMM | RD(dst) | RN(dst) | 0x1f); +#else /* !SLJIT_CONFIG_ARM_V5 */ + FAIL_IF(push_inst(compiler, RBIT | RD(dst) | RM(src2))); + return push_inst(compiler, CLZ | RD(dst) | RM(dst)); +#endif /* SLJIT_CONFIG_ARM_V5 */ + case SLJIT_ADD: SLJIT_ASSERT(!(flags & INV_IMM)); @@ -1471,17 +1557,61 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl return push_inst(compiler, EOR | (flags & SET_FLAGS) | RD(dst) | RN(src1) | ((src2 & SRC2_IMM) ? src2 : RM(src2))); case SLJIT_SHL: - EMIT_SHIFT_INS_AND_RETURN(0); + case SLJIT_MSHL: + shift_type = 0; + is_masked = GET_OPCODE(op) == SLJIT_MSHL; + break; case SLJIT_LSHR: - EMIT_SHIFT_INS_AND_RETURN(1); + case SLJIT_MLSHR: + shift_type = 1; + is_masked = GET_OPCODE(op) == SLJIT_MLSHR; + break; case SLJIT_ASHR: - EMIT_SHIFT_INS_AND_RETURN(2); + case SLJIT_MASHR: + shift_type = 2; + is_masked = GET_OPCODE(op) == SLJIT_MASHR; + break; + + case SLJIT_ROTL: + if (compiler->shift_imm == 0x20) { + FAIL_IF(push_inst(compiler, RSB | SRC2_IMM | RD(TMP_REG2) | RN(src2) | 0)); + src2 = TMP_REG2; + } else + compiler->shift_imm = (sljit_uw)(-(sljit_sw)compiler->shift_imm) & 0x1f; + /* fallthrough */ + + case SLJIT_ROTR: + shift_type = 3; + is_masked = 0; + break; + + default: + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; } - SLJIT_UNREACHABLE(); - return SLJIT_SUCCESS; + SLJIT_ASSERT(!(flags & ARGS_SWAPPED) && !(flags & INV_IMM) && !(src2 & SRC2_IMM)); + + if (compiler->shift_imm != 0x20) { + SLJIT_ASSERT(src1 == TMP_REG1); + + if (compiler->shift_imm != 0) + return push_inst(compiler, MOV | (flags & SET_FLAGS) | + RD(dst) | (compiler->shift_imm << 7) | (shift_type << 5) | RM(src2)); + return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | RM(src2)); + } + + SLJIT_ASSERT(src1 != TMP_REG2); + + if (is_masked) { + FAIL_IF(push_inst(compiler, AND | RD(TMP_REG2) | RN(src2) | SRC2_IMM | 0x1f)); + src2 = TMP_REG2; + } + + return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) + | RM8(src2) | (sljit_uw)(shift_type << 5) | 0x10 | RM(src1)); } #undef EMIT_SHIFT_INS_AND_RETURN @@ -1670,27 +1800,32 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, #endif } -static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, +static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg) { - sljit_uw imm, offset_reg; - sljit_uw is_type1_transfer = IS_TYPE1_TRANSFER(flags); + sljit_uw imm, offset_reg, tmp; + sljit_sw mask = IS_TYPE1_TRANSFER(flags) ? 0xfff : 0xff; + sljit_sw sign = IS_TYPE1_TRANSFER(flags) ? 0x1000 : 0x100; - SLJIT_ASSERT (arg & SLJIT_MEM); - SLJIT_ASSERT((arg & REG_MASK) != tmp_reg); + SLJIT_ASSERT(arg & SLJIT_MEM); + SLJIT_ASSERT((arg & REG_MASK) != tmp_reg || (arg == SLJIT_MEM1(tmp_reg) && argw >= -mask && argw <= mask)); - if (!(arg & REG_MASK)) { - if (is_type1_transfer) { - FAIL_IF(load_immediate(compiler, tmp_reg, (sljit_uw)argw & ~(sljit_uw)0xfff)); - argw &= 0xfff; - } - else { - FAIL_IF(load_immediate(compiler, tmp_reg, (sljit_uw)argw & ~(sljit_uw)0xff)); - argw &= 0xff; + if (SLJIT_UNLIKELY(!(arg & REG_MASK))) { + tmp = (sljit_uw)(argw & (sign | mask)); + tmp = (sljit_uw)((argw + (tmp <= (sljit_uw)sign ? 0 : sign)) & ~mask); + + FAIL_IF(load_immediate(compiler, tmp_reg, tmp)); + + argw -= (sljit_sw)tmp; + tmp = 1; + + if (argw < 0) { + argw = -argw; + tmp = 0; } - return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg, - is_type1_transfer ? argw : TYPE2_TRANSFER_IMM(argw))); + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, tmp, reg, tmp_reg, + (mask == 0xff) ? TYPE2_TRANSFER_IMM(argw) : argw)); } if (arg & OFFS_REG_MASK) { @@ -1698,72 +1833,62 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit arg &= REG_MASK; argw &= 0x3; - if (argw != 0 && !is_type1_transfer) { + if (argw != 0 && (mask == 0xff)) { FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | RM(offset_reg) | ((sljit_uw)argw << 7))); return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, tmp_reg, TYPE2_TRANSFER_IMM(0))); } /* Bit 25: RM is offset. */ return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, - RM(offset_reg) | (is_type1_transfer ? (1 << 25) : 0) | ((sljit_uw)argw << 7))); + RM(offset_reg) | (mask == 0xff ? 0 : (1 << 25)) | ((sljit_uw)argw << 7))); } arg &= REG_MASK; - if (is_type1_transfer) { - if (argw > 0xfff) { - imm = get_imm((sljit_uw)argw & ~(sljit_uw)0xfff); - if (imm) { - FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm)); - argw = argw & 0xfff; - arg = tmp_reg; - } - } - else if (argw < -0xfff) { - imm = get_imm((sljit_uw)-argw & ~(sljit_uw)0xfff); - if (imm) { - FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm)); - argw = -(-argw & 0xfff); - arg = tmp_reg; - } + if (argw > mask) { + tmp = (sljit_uw)(argw & (sign | mask)); + tmp = (sljit_uw)((argw + (tmp <= (sljit_uw)sign ? 0 : sign)) & ~mask); + imm = get_imm(tmp); + + if (imm) { + FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm)); + argw -= (sljit_sw)tmp; + arg = tmp_reg; + + SLJIT_ASSERT(argw >= -mask && argw <= mask); } + } else if (argw < -mask) { + tmp = (sljit_uw)(-argw & (sign | mask)); + tmp = (sljit_uw)((-argw + (tmp <= (sljit_uw)sign ? 0 : sign)) & ~mask); + imm = get_imm(tmp); - if (argw >= 0 && argw <= 0xfff) - return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, argw)); + if (imm) { + FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm)); + argw += (sljit_sw)tmp; + arg = tmp_reg; - if (argw < 0 && argw >= -0xfff) - return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, -argw)); - } - else { - if (argw > 0xff) { - imm = get_imm((sljit_uw)argw & ~(sljit_uw)0xff); - if (imm) { - FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg) | imm)); - argw = argw & 0xff; - arg = tmp_reg; - } + SLJIT_ASSERT(argw >= -mask && argw <= mask); } - else if (argw < -0xff) { - imm = get_imm((sljit_uw)-argw & ~(sljit_uw)0xff); - if (imm) { - FAIL_IF(push_inst(compiler, SUB | RD(tmp_reg) | RN(arg) | imm)); - argw = -(-argw & 0xff); - arg = tmp_reg; - } + } + + if (argw <= mask && argw >= -mask) { + if (argw >= 0) { + if (mask == 0xff) + argw = TYPE2_TRANSFER_IMM(argw); + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, argw)); } - if (argw >= 0 && argw <= 0xff) - return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, TYPE2_TRANSFER_IMM(argw))); + argw = -argw; - if (argw < 0 && argw >= -0xff) { - argw = -argw; - return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, TYPE2_TRANSFER_IMM(argw))); - } + if (mask == 0xff) + argw = TYPE2_TRANSFER_IMM(argw); + + return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 0, reg, arg, argw)); } FAIL_IF(load_immediate(compiler, tmp_reg, (sljit_uw)argw)); return push_inst(compiler, EMIT_DATA_TRANSFER(flags, 1, reg, arg, - RM(tmp_reg) | (is_type1_transfer ? (1 << 25) : 0))); + RM(tmp_reg) | (mask == 0xff ? 0 : (1 << 25)))); } static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 inp_flags, @@ -1961,15 +2086,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile saved_reg_list[saved_reg_count++] = 1; if (saved_reg_count > 0) { - FAIL_IF(push_inst(compiler, 0xe52d0000 | (saved_reg_count >= 3 ? 16 : 8) + FAIL_IF(push_inst(compiler, STR | 0x2d0000 | (saved_reg_count >= 3 ? 16 : 8) | (saved_reg_list[0] << 12) /* str rX, [sp, #-8/-16]! */)); if (saved_reg_count >= 2) { SLJIT_ASSERT(saved_reg_list[1] < 8); - FAIL_IF(push_inst(compiler, 0xe58d0004 | (saved_reg_list[1] << 12) /* str rX, [sp, #4] */)); + FAIL_IF(push_inst(compiler, STR | 0x8d0004 | (saved_reg_list[1] << 12) /* str rX, [sp, #4] */)); } if (saved_reg_count >= 3) { SLJIT_ASSERT(saved_reg_list[2] < 8); - FAIL_IF(push_inst(compiler, 0xe58d0008 | (saved_reg_list[2] << 12) /* str rX, [sp, #8] */)); + FAIL_IF(push_inst(compiler, STR | 0x8d0008 | (saved_reg_list[2] << 12) /* str rX, [sp, #8] */)); } } @@ -1983,13 +2108,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile if (saved_reg_count > 0) { if (saved_reg_count >= 3) { SLJIT_ASSERT(saved_reg_list[2] < 8); - FAIL_IF(push_inst(compiler, 0xe59d0008 | (saved_reg_list[2] << 12) /* ldr rX, [sp, #8] */)); + FAIL_IF(push_inst(compiler, LDR | 0x8d0008 | (saved_reg_list[2] << 12) /* ldr rX, [sp, #8] */)); } if (saved_reg_count >= 2) { SLJIT_ASSERT(saved_reg_list[1] < 8); - FAIL_IF(push_inst(compiler, 0xe59d0004 | (saved_reg_list[1] << 12) /* ldr rX, [sp, #4] */)); + FAIL_IF(push_inst(compiler, LDR | 0x8d0004 | (saved_reg_list[1] << 12) /* ldr rX, [sp, #4] */)); } - return push_inst(compiler, 0xe49d0000 | (sljit_uw)(saved_reg_count >= 3 ? 16 : 8) + return push_inst(compiler, (LDR ^ (1 << 24)) | 0x8d0000 | (sljit_uw)(saved_reg_count >= 3 ? 16 : 8) | (saved_reg_list[0] << 12) /* ldr rX, [sp], #8/16 */); } return SLJIT_SUCCESS; @@ -2034,6 +2159,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw); case SLJIT_CLZ: + case SLJIT_CTZ: return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src, srcw); } @@ -2069,13 +2195,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, src1, src1w, src2, src2w); case SLJIT_SHL: + case SLJIT_MSHL: case SLJIT_LSHR: + case SLJIT_MLSHR: case SLJIT_ASHR: + case SLJIT_MASHR: + case SLJIT_ROTL: + case SLJIT_ROTR: if (src2 & SLJIT_IMM) { compiler->shift_imm = src2w & 0x1f; return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src1, src1w); - } - else { + } else { compiler->shift_imm = 0x20; return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w); } @@ -2091,13 +2221,67 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 is_left; + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + + op = GET_OPCODE(op); + is_left = (op == SLJIT_SHL || op == SLJIT_MSHL); + + if (src_dst == src1) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, is_left ? SLJIT_ROTL : SLJIT_ROTR, src_dst, 0, src_dst, 0, src2, src2w); + } + + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + /* Shift type of ROR is 3. */ + if (src2 & SLJIT_IMM) { + src2w &= 0x1f; + + if (src2w == 0) + return SLJIT_SUCCESS; + } else if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, src2, src2w, TMP_REG2)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, src1, src1w, TMP_REG1)); + src1 = TMP_REG1; + } else if (src1 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)src1w)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_IMM) { + FAIL_IF(push_inst(compiler, MOV | RD(src_dst) | RM(src_dst) | ((sljit_uw)(is_left ? 0 : 1) << 5) | ((sljit_uw)src2w << 7))); + src2w = (src2w ^ 0x1f) + 1; + return push_inst(compiler, ORR | RD(src_dst) | RN(src_dst) | RM(src1) | ((sljit_uw)(is_left ? 1 : 0) << 5) | ((sljit_uw)src2w << 7)); + } + + if (op == SLJIT_MSHL || op == SLJIT_MLSHR) { + FAIL_IF(push_inst(compiler, AND | SRC2_IMM | RD(TMP_REG2) | RN(src2) | 0x1f)); + src2 = TMP_REG2; + } + + FAIL_IF(push_inst(compiler, MOV | RD(src_dst) | RM8(src2) | ((sljit_uw)(is_left ? 0 : 1) << 5) | 0x10 | RM(src_dst))); + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src1) | ((sljit_uw)(is_left ? 1 : 0) << 5) | (1 << 7))); + FAIL_IF(push_inst(compiler, EOR | SRC2_IMM | RD(TMP_REG2) | RN(src2) | 0x1f)); + return push_inst(compiler, ORR | RD(src_dst) | RN(src_dst) | RM(TMP_REG1) | ((sljit_uw)(is_left ? 1 : 0) << 5) | 0x10 | RM8(TMP_REG2)); +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -2370,7 +2554,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil return SLJIT_SUCCESS; } -#undef FPU_LOAD #undef EMIT_FPU_DATA_TRANSFER /* --------------------------------------------------------------------- */ @@ -2400,11 +2583,15 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) { switch (type) { case SLJIT_EQUAL: - case SLJIT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */ return 0x00000000; case SLJIT_NOT_EQUAL: - case SLJIT_NOT_EQUAL_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: /* Not supported. */ return 0x10000000; case SLJIT_CARRY: @@ -2413,7 +2600,6 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) /* fallthrough */ case SLJIT_LESS: - case SLJIT_LESS_F64: return 0x30000000; case SLJIT_NOT_CARRY: @@ -2422,27 +2608,33 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) /* fallthrough */ case SLJIT_GREATER_EQUAL: - case SLJIT_GREATER_EQUAL_F64: return 0x20000000; case SLJIT_GREATER: - case SLJIT_GREATER_F64: + case SLJIT_UNORDERED_OR_GREATER: return 0x80000000; case SLJIT_LESS_EQUAL: - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: return 0x90000000; case SLJIT_SIG_LESS: + case SLJIT_UNORDERED_OR_LESS: return 0xb0000000; case SLJIT_SIG_GREATER_EQUAL: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: return 0xa0000000; case SLJIT_SIG_GREATER: + case SLJIT_F_GREATER: + case SLJIT_ORDERED_GREATER: return 0xc0000000; case SLJIT_SIG_LESS_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: return 0xd0000000; case SLJIT_OVERFLOW: @@ -2450,7 +2642,7 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) return 0x10000000; /* fallthrough */ - case SLJIT_UNORDERED_F64: + case SLJIT_UNORDERED: return 0x60000000; case SLJIT_NOT_OVERFLOW: @@ -2458,11 +2650,18 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) return 0x00000000; /* fallthrough */ - case SLJIT_ORDERED_F64: + case SLJIT_ORDERED: return 0x70000000; + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: + return 0x40000000; + + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + return 0x50000000; + default: - SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL); + SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_REG_ARG); return 0xe0000000; } } @@ -2639,7 +2838,7 @@ static sljit_s32 softfloat_call_with_args(struct sljit_compiler *compiler, sljit } FAIL_IF(push_inst(compiler, MOV | (offset << 10) | (word_arg_offset >> 2))); } else - FAIL_IF(push_inst(compiler, data_transfer_insts[WORD_SIZE] | 0x800000 | RN(SLJIT_SP) | (word_arg_offset << 10) | (offset - 4 * sizeof(sljit_sw)))); + FAIL_IF(push_inst(compiler, STR | 0x800000 | RN(SLJIT_SP) | (word_arg_offset << 10) | (offset - 4 * sizeof(sljit_sw)))); } break; } @@ -2718,51 +2917,48 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); #ifdef __SOFTFP__ - PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL, &extra_space)); - SLJIT_ASSERT((extra_space & 0x7) == 0); - - if ((type & SLJIT_CALL_RETURN) && extra_space == 0) - type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) { + PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL, &extra_space)); + SLJIT_ASSERT((extra_space & 0x7) == 0); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + if ((type & SLJIT_CALL_RETURN) && extra_space == 0) + type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); - jump = sljit_emit_jump(compiler, type); - PTR_FAIL_IF(jump == NULL); + SLJIT_SKIP_CHECKS(compiler); + jump = sljit_emit_jump(compiler, type); + PTR_FAIL_IF(jump == NULL); - if (extra_space > 0) { - if (type & SLJIT_CALL_RETURN) - PTR_FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, - TMP_REG2, SLJIT_SP, extra_space - sizeof(sljit_sw)))); + if (extra_space > 0) { + if (type & SLJIT_CALL_RETURN) + PTR_FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, + TMP_REG2, SLJIT_SP, extra_space - sizeof(sljit_sw)))); - PTR_FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | extra_space)); + PTR_FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | extra_space)); - if (type & SLJIT_CALL_RETURN) { - PTR_FAIL_IF(push_inst(compiler, BX | RM(TMP_REG2))); - return jump; + if (type & SLJIT_CALL_RETURN) { + PTR_FAIL_IF(push_inst(compiler, BX | RM(TMP_REG2))); + return jump; + } } + + SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); + PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types)); + return jump; } +#endif /* __SOFTFP__ */ - SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); - PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types)); - return jump; -#else /* !__SOFTFP__ */ if (type & SLJIT_CALL_RETURN) { PTR_FAIL_IF(emit_stack_frame_release(compiler, -1)); type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); } - PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif +#ifndef __SOFTFP__ + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); +#endif /* !__SOFTFP__ */ + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, type); -#endif /* __SOFTFP__ */ } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) @@ -2822,55 +3018,79 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi src = TMP_REG1; } - if ((type & SLJIT_CALL_RETURN) && (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0)) { + if ((type & SLJIT_CALL_RETURN) && (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) { FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG1) | RM(src))); src = TMP_REG1; } #ifdef __SOFTFP__ - FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src, &extra_space)); - SLJIT_ASSERT((extra_space & 0x7) == 0); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) { + FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src, &extra_space)); + SLJIT_ASSERT((extra_space & 0x7) == 0); - if ((type & SLJIT_CALL_RETURN) && extra_space == 0) - type = SLJIT_JUMP; + if ((type & SLJIT_CALL_RETURN) && extra_space == 0) + type = SLJIT_JUMP; -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); + FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); - FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + if (extra_space > 0) { + if (type & SLJIT_CALL_RETURN) + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, + TMP_REG2, SLJIT_SP, extra_space - sizeof(sljit_sw)))); - if (extra_space > 0) { - if (type & SLJIT_CALL_RETURN) - FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(WORD_SIZE | LOAD_DATA, 1, - TMP_REG2, SLJIT_SP, extra_space - sizeof(sljit_sw)))); + FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | extra_space)); - FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RN(SLJIT_SP) | SRC2_IMM | extra_space)); + if (type & SLJIT_CALL_RETURN) + return push_inst(compiler, BX | RM(TMP_REG2)); + } - if (type & SLJIT_CALL_RETURN) - return push_inst(compiler, BX | RM(TMP_REG2)); + SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); + return softfloat_post_call_with_args(compiler, arg_types); } +#endif /* __SOFTFP__ */ - SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); - return softfloat_post_call_with_args(compiler, arg_types); -#else /* !__SOFTFP__ */ if (type & SLJIT_CALL_RETURN) { FAIL_IF(emit_stack_frame_release(compiler, -1)); type = SLJIT_JUMP; } - FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif +#ifndef __SOFTFP__ + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); +#endif /* !__SOFTFP__ */ + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_ijump(compiler, type, src, srcw); -#endif /* __SOFTFP__ */ } +#ifdef __SOFTFP__ + +static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + if (compiler->options & SLJIT_ENTER_REG_ARG) { + if (src == SLJIT_FR0) + return SLJIT_SUCCESS; + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw); + } + + if (FAST_IS_REG(src)) { + if (op & SLJIT_32) + return push_inst(compiler, VMOV | (1 << 20) | RD(SLJIT_R0) | VN(src)); + return push_inst(compiler, VMOV2 | (1 << 20) | RD(SLJIT_R0) | RN(SLJIT_R1) | VM(src)); + } + + SLJIT_SKIP_CHECKS(compiler); + + if (op & SLJIT_32) + return sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_R0, 0, src, srcw); + return sljit_emit_mem(compiler, SLJIT_MOV, SLJIT_REG_PAIR(SLJIT_R0, SLJIT_R1), src, srcw); +} + +#endif /* __SOFTFP__ */ + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw dstw, sljit_s32 type) @@ -2883,7 +3103,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co ADJUST_LOCAL_OFFSET(dst, dstw); op = GET_OPCODE(op); - cc = get_cc(compiler, type & 0xff); + cc = get_cc(compiler, type); dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1; if (op < SLJIT_ADD) { @@ -2921,9 +3141,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); - dst_reg &= ~SLJIT_32; - - cc = get_cc(compiler, type & 0xff); + cc = get_cc(compiler, type & ~SLJIT_32); if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { tmp = get_imm((sljit_uw)srcw); @@ -2949,16 +3167,285 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil return push_inst(compiler, ((MOV | RD(dst_reg) | RM(src)) & ~COND_MASK) | cc); } +static sljit_s32 update_mem_addr(struct sljit_compiler *compiler, sljit_s32 *mem, sljit_sw *memw, sljit_s32 max_offset) +{ + sljit_s32 arg = *mem; + sljit_sw argw = *memw; + sljit_uw imm, tmp; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_sw mask = max_offset >= 0xf00 ? 0xfff : 0xff; + sljit_sw sign = max_offset >= 0xf00 ? 0x1000 : 0x100; +#else /* !SLJIT_CONFIG_ARM_V5 */ + sljit_sw mask = 0xfff; + sljit_sw sign = 0x1000; + + SLJIT_ASSERT(max_offset >= 0xf00); +#endif /* SLJIT_CONFIG_ARM_V5 */ + + *mem = TMP_REG1; + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + *memw = 0; + return push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | ((sljit_uw)(argw & 0x3) << 7)); + } + + arg &= REG_MASK; + + if (arg) { + if (argw <= max_offset && argw >= -mask) { + *mem = arg; + return SLJIT_SUCCESS; + } + + if (argw >= 0) { + tmp = (sljit_uw)(argw & (sign | mask)); + tmp = (sljit_uw)((argw + ((tmp <= (sljit_uw)max_offset || tmp == (sljit_uw)sign) ? 0 : sign)) & ~mask); + imm = get_imm(tmp); + + if (imm) { + *memw = argw - (sljit_sw)tmp; + SLJIT_ASSERT(*memw >= -mask && *memw <= max_offset); + + return push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg) | imm); + } + } else { + tmp = (sljit_uw)(-argw & (sign | mask)); + tmp = (sljit_uw)((-argw + ((tmp <= (sljit_uw)((sign << 1) - max_offset - 1)) ? 0 : sign)) & ~mask); + imm = get_imm(tmp); + + if (imm) { + *memw = argw + (sljit_sw)tmp; + SLJIT_ASSERT(*memw >= -mask && *memw <= max_offset); + + return push_inst(compiler, SUB | RD(TMP_REG1) | RN(arg) | imm); + } + } + } + + tmp = (sljit_uw)(argw & (sign | mask)); + tmp = (sljit_uw)((argw + ((tmp <= (sljit_uw)max_offset || tmp == (sljit_uw)sign) ? 0 : sign)) & ~mask); + *memw = argw - (sljit_sw)tmp; + + FAIL_IF(load_immediate(compiler, TMP_REG1, tmp)); + + if (arg == 0) + return SLJIT_SUCCESS; + + return push_inst(compiler, ADD | RD(TMP_REG1) | RN(TMP_REG1) | RM(arg)); +} + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + +static sljit_s32 sljit_emit_mem_unaligned(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 flags, steps, tmp_reg; + sljit_uw add, shift; + + switch (type & 0xff) { + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: + flags = BYTE_SIZE; + if (!(type & SLJIT_MEM_STORE)) + flags |= LOAD_DATA; + if ((type & 0xff) == SLJIT_MOV_S8) + flags |= SIGNED; + + return emit_op_mem(compiler, flags, reg, mem, memw, TMP_REG1); + + case SLJIT_MOV_U16: + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 1)); + flags = BYTE_SIZE; + steps = 1; + break; + + case SLJIT_MOV_S16: + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xff - 1)); + flags = BYTE_SIZE | SIGNED; + steps = 1; + break; + + default: + if (type & SLJIT_MEM_UNALIGNED_32) { + flags = WORD_SIZE; + if (!(type & SLJIT_MEM_STORE)) + flags |= LOAD_DATA; + + return emit_op_mem(compiler, flags, reg, mem, memw, TMP_REG1); + } + + if (!(type & SLJIT_MEM_UNALIGNED_16)) { + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 3)); + flags = BYTE_SIZE; + steps = 3; + break; + } + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xff - 2)); + + add = 1; + if (memw < 0) { + add = 0; + memw = -memw; + } + + tmp_reg = reg; + + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(HALF_SIZE, add, reg, mem, TYPE2_TRANSFER_IMM(memw)))); + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG2) | RM(reg) | (16 << 7) | (2 << 4))); + } else { + if (reg == mem) { + SLJIT_ASSERT(reg != TMP_REG1); + tmp_reg = TMP_REG1; + } + + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(HALF_SIZE | LOAD_DATA, add, tmp_reg, mem, TYPE2_TRANSFER_IMM(memw)))); + } + + if (!add) { + memw -= 2; + if (memw <= 0) { + memw = -memw; + add = 1; + } + } else + memw += 2; + + if (type & SLJIT_MEM_STORE) + return push_inst(compiler, EMIT_DATA_TRANSFER(HALF_SIZE, add, TMP_REG2, mem, TYPE2_TRANSFER_IMM(memw))); + + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(HALF_SIZE | LOAD_DATA, add, TMP_REG2, mem, TYPE2_TRANSFER_IMM(memw)))); + return push_inst(compiler, ORR | RD(reg) | RN(tmp_reg) | RM(TMP_REG2) | (16 << 7)); + } + + SLJIT_ASSERT(steps > 0); + + add = 1; + if (memw < 0) { + add = 0; + memw = -memw; + } + + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(BYTE_SIZE, add, reg, mem, memw))); + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG2) | RM(reg) | (8 << 7) | (2 << 4))); + + while (1) { + if (!add) { + memw -= 1; + if (memw == 0) + add = 1; + } else + memw += 1; + + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(BYTE_SIZE, add, TMP_REG2, mem, memw))); + + if (--steps == 0) + return SLJIT_SUCCESS; + + FAIL_IF(push_inst(compiler, MOV | RD(TMP_REG2) | RM(TMP_REG2) | (8 << 7) | (2 << 4))); + } + } + + tmp_reg = reg; + + if (reg == mem) { + SLJIT_ASSERT(reg != TMP_REG1); + tmp_reg = TMP_REG1; + } + + shift = 8; + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(BYTE_SIZE | LOAD_DATA, add, tmp_reg, mem, memw))); + + do { + if (!add) { + memw -= 1; + if (memw == 0) + add = 1; + } else + memw += 1; + + if (steps > 1) { + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(BYTE_SIZE | LOAD_DATA, add, TMP_REG2, mem, memw))); + FAIL_IF(push_inst(compiler, ORR | RD(tmp_reg) | RN(tmp_reg) | RM(TMP_REG2) | (shift << 7))); + shift += 8; + } + } while (--steps != 0); + + flags |= LOAD_DATA; + + if (flags & SIGNED) + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(flags, add, TMP_REG2, mem, TYPE2_TRANSFER_IMM(memw)))); + else + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(flags, add, TMP_REG2, mem, memw))); + + return push_inst(compiler, ORR | RD(reg) | RN(tmp_reg) | RM(TMP_REG2) | (shift << 7)); +} + +#endif /* SLJIT_CONFIG_ARM_V5 */ + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw) { sljit_s32 flags; - sljit_uw is_type1_transfer, inst; CHECK_ERROR(); CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + if (!(reg & REG_PAIR_MASK)) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + ADJUST_LOCAL_OFFSET(mem, memw); +#endif /* SLJIT_CONFIG_ARM_V5 */ + + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + } + + ADJUST_LOCAL_OFFSET(mem, memw); + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (type & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16)) { + FAIL_IF(update_mem_addr(compiler, &mem, &memw, (type & SLJIT_MEM_UNALIGNED_16) ? 0xfff - 6 : 0xfff - 7)); + + if (!(type & SLJIT_MEM_STORE) && REG_PAIR_FIRST(reg) == (mem & REG_MASK)) { + FAIL_IF(sljit_emit_mem_unaligned(compiler, type, REG_PAIR_SECOND(reg), SLJIT_MEM1(mem), memw + SSIZE_OF(sw))); + return sljit_emit_mem_unaligned(compiler, type, REG_PAIR_FIRST(reg), SLJIT_MEM1(mem), memw); + } + + FAIL_IF(sljit_emit_mem_unaligned(compiler, type, REG_PAIR_FIRST(reg), SLJIT_MEM1(mem), memw)); + return sljit_emit_mem_unaligned(compiler, type, REG_PAIR_SECOND(reg), SLJIT_MEM1(mem), memw + SSIZE_OF(sw)); + } +#endif /* SLJIT_CONFIG_ARM_V5 */ + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4)); + + flags = WORD_SIZE; + + if (!(type & SLJIT_MEM_STORE)) { + if (REG_PAIR_FIRST(reg) == (mem & REG_MASK)) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, REG_PAIR_SECOND(reg), SLJIT_MEM1(mem), memw + SSIZE_OF(sw), TMP_REG1)); + return emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, REG_PAIR_FIRST(reg), SLJIT_MEM1(mem), memw, TMP_REG1); + } + + flags = WORD_SIZE | LOAD_DATA; + } + + FAIL_IF(emit_op_mem(compiler, flags, REG_PAIR_FIRST(reg), SLJIT_MEM1(mem), memw, TMP_REG1)); + return emit_op_mem(compiler, flags, REG_PAIR_SECOND(reg), SLJIT_MEM1(mem), memw + SSIZE_OF(sw), TMP_REG1); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 flags; + sljit_uw is_type1_transfer, inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw)); + is_type1_transfer = 1; switch (type & 0xff) { @@ -2999,16 +3486,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { if (!is_type1_transfer && memw != 0) return SLJIT_ERR_UNSUPPORTED; - } - else { + } else { if (is_type1_transfer) { if (memw > 4095 || memw < -4095) return SLJIT_ERR_UNSUPPORTED; - } - else { - if (memw > 255 || memw < -255) - return SLJIT_ERR_UNSUPPORTED; - } + } else if (memw > 255 || memw < -255) + return SLJIT_ERR_UNSUPPORTED; } if (type & SLJIT_MEM_SUPP) @@ -3022,20 +3505,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile if (is_type1_transfer) inst |= (1 << 25); - if (type & SLJIT_MEM_PRE) - inst |= (1 << 21); - else + if (type & SLJIT_MEM_POST) inst ^= (1 << 24); + else + inst |= (1 << 21); return push_inst(compiler, inst); } inst = EMIT_DATA_TRANSFER(flags, 0, reg, mem & REG_MASK, 0); - if (type & SLJIT_MEM_PRE) - inst |= (1 << 21); - else + if (type & SLJIT_MEM_POST) inst ^= (1 << 24); + else + inst |= (1 << 21); if (is_type1_transfer) { if (memw >= 0) @@ -3054,6 +3537,103 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile return push_inst(compiler, inst | TYPE2_TRANSFER_IMM((sljit_uw)memw)); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_s32 max_offset; + sljit_s32 dst; +#endif /* SLJIT_CONFIG_ARM_V5 */ + + CHECK_ERROR(); + CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); + + if (type & SLJIT_MEM_UNALIGNED_32) + return emit_fop_mem(compiler, ((type ^ SLJIT_32) & SLJIT_32) | ((type & SLJIT_MEM_STORE) ? 0 : FPU_LOAD), freg, mem, memw); + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(freg) | RD(TMP_REG2))); + + if (type & SLJIT_32) + return sljit_emit_mem_unaligned(compiler, SLJIT_MOV | SLJIT_MEM_STORE | (type & SLJIT_MEM_UNALIGNED_16), TMP_REG2, mem, memw); + + max_offset = 0xfff - 7; + if (type & SLJIT_MEM_UNALIGNED_16) + max_offset++; + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, max_offset)); + mem |= SLJIT_MEM; + + FAIL_IF(sljit_emit_mem_unaligned(compiler, SLJIT_MOV | SLJIT_MEM_STORE | (type & SLJIT_MEM_UNALIGNED_16), TMP_REG2, mem, memw)); + + FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(freg) | 0x80 | RD(TMP_REG2))); + return sljit_emit_mem_unaligned(compiler, SLJIT_MOV | SLJIT_MEM_STORE | (type & SLJIT_MEM_UNALIGNED_16), TMP_REG2, mem, memw + 4); + } + + max_offset = (type & SLJIT_32) ? 0xfff - 3 : 0xfff - 7; + if (type & SLJIT_MEM_UNALIGNED_16) + max_offset++; + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, max_offset)); + + dst = TMP_REG1; + + /* Stack offset adjustment is not needed because dst + is not stored on the stack when mem is SLJIT_SP. */ + + if (mem == TMP_REG1) { + dst = SLJIT_R3; + + if (compiler->scratches >= 4) + FAIL_IF(push_inst(compiler, STR | (1 << 21) | RN(SLJIT_SP) | RD(SLJIT_R3) | 8)); + } + + mem |= SLJIT_MEM; + + FAIL_IF(sljit_emit_mem_unaligned(compiler, SLJIT_MOV | (type & SLJIT_MEM_UNALIGNED_16), dst, mem, memw)); + FAIL_IF(push_inst(compiler, VMOV | VN(freg) | RD(dst))); + + if (!(type & SLJIT_32)) { + FAIL_IF(sljit_emit_mem_unaligned(compiler, SLJIT_MOV | (type & SLJIT_MEM_UNALIGNED_16), dst, mem, memw + 4)); + FAIL_IF(push_inst(compiler, VMOV | VN(freg) | 0x80 | RD(dst))); + } + + if (dst == SLJIT_R3 && compiler->scratches >= 4) + FAIL_IF(push_inst(compiler, (LDR ^ (0x1 << 24)) | (0x1 << 23) | RN(SLJIT_SP) | RD(SLJIT_R3) | 8)); + return SLJIT_SUCCESS; +#else /* !SLJIT_CONFIG_ARM_V5 */ + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(freg) | RD(TMP_REG2))); + + if (type & SLJIT_32) + return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1); + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4)); + mem |= SLJIT_MEM; + + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1)); + FAIL_IF(push_inst(compiler, VMOV | (1 << 20) | VN(freg) | 0x80 | RD(TMP_REG2))); + return emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw + 4, TMP_REG1); + } + + if (type & SLJIT_32) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, mem, memw, TMP_REG1)); + return push_inst(compiler, VMOV | VN(freg) | RD(TMP_REG2)); + } + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4)); + mem |= SLJIT_MEM; + + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG2, mem, memw, TMP_REG1)); + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | LOAD_DATA, TMP_REG1, mem, memw + 4, TMP_REG1)); + return push_inst(compiler, VMOV2 | VM(freg) | RD(TMP_REG2) | RN(TMP_REG1)); +#endif /* SLJIT_CONFIG_ARM_V5 */ +} + +#undef FPU_LOAD + SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { struct sljit_const *const_; diff --git a/thirdparty/pcre2/src/sljit/sljitNativeARM_64.c b/thirdparty/pcre2/src/sljit/sljitNativeARM_64.c index 96453b4abe..89f747e7c8 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeARM_64.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeARM_64.c @@ -86,6 +86,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define CSINC 0x9a800400 #define EOR 0xca000000 #define EORI 0xd2000000 +#define EXTR 0x93c00000 #define FABS 0x1e60c000 #define FADD 0x1e602800 #define FCMP 0x1e602000 @@ -98,6 +99,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define FSUB 0x1e603800 #define LDRI 0xf9400000 #define LDRI_F64 0xfd400000 +#define LDRI_POST 0xf8400400 #define LDP 0xa9400000 #define LDP_F64 0x6d400000 #define LDP_POST 0xa8c00000 @@ -112,7 +114,9 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define ORN 0xaa200000 #define ORR 0xaa000000 #define ORRI 0xb2000000 +#define RBIT 0xdac00000 #define RET 0xd65f0000 +#define RORV 0x9ac02c00 #define SBC 0xda000000 #define SBFM 0x93000000 #define SCVTF 0x9e620000 @@ -137,8 +141,6 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define UDIV 0x9ac00800 #define UMULH 0x9bc03c00 -/* dest_reg is the absolute name of the register - Useful for reordering instructions in the delay slot. */ static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins) { sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); @@ -296,8 +298,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil } next_addr = compute_next_addr(label, jump, const_, put_label); } - code_ptr ++; - word_count ++; + code_ptr++; + word_count++; } while (buf_ptr < buf_end); buf = buf->next; @@ -391,6 +393,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) #endif case SLJIT_HAS_CLZ: + case SLJIT_HAS_CTZ: + case SLJIT_HAS_ROT: case SLJIT_HAS_CMOV: case SLJIT_HAS_PREFETCH: return 1; @@ -631,6 +635,7 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s switch (op) { case SLJIT_MUL: case SLJIT_CLZ: + case SLJIT_CTZ: case SLJIT_ADDC: case SLJIT_SUBC: /* No form with immediate operand (except imm 0, which @@ -701,36 +706,50 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s FAIL_IF(push_inst(compiler, (inst_bits ^ inv_bits) | RD(dst) | RN(reg))); goto set_flags; case SLJIT_SHL: + case SLJIT_MSHL: if (flags & ARG1_IMM) break; + if (flags & INT_OP) { imm &= 0x1f; - FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) - | (((sljit_ins)-imm & 0x1f) << 16) | ((31 - (sljit_ins)imm) << 10))); - } - else { + inst_bits = (((sljit_ins)-imm & 0x1f) << 16) | ((31 - (sljit_ins)imm) << 10); + } else { imm &= 0x3f; - FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) - | (((sljit_ins)-imm & 0x3f) << 16) | ((63 - (sljit_ins)imm) << 10))); + inst_bits = ((sljit_ins)1 << 22) | (((sljit_ins)-imm & 0x3f) << 16) | ((63 - (sljit_ins)imm) << 10); } + + FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | inst_bits)); goto set_flags; case SLJIT_LSHR: + case SLJIT_MLSHR: case SLJIT_ASHR: + case SLJIT_MASHR: if (flags & ARG1_IMM) break; - if (op == SLJIT_ASHR) + + if (op >= SLJIT_ASHR) inv_bits |= 1 << 30; + if (flags & INT_OP) { imm &= 0x1f; - FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) - | ((sljit_ins)imm << 16) | (31 << 10))); - } - else { + inst_bits = ((sljit_ins)imm << 16) | (31 << 10); + } else { imm &= 0x3f; - FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) - | (1 << 22) | ((sljit_ins)imm << 16) | (63 << 10))); + inst_bits = ((sljit_ins)1 << 22) | ((sljit_ins)imm << 16) | (63 << 10); } + + FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | inst_bits)); goto set_flags; + case SLJIT_ROTL: + case SLJIT_ROTR: + if (flags & ARG1_IMM) + break; + + if (op == SLJIT_ROTL) + imm = -imm; + + imm &= (flags & INT_OP) ? 0x1f : 0x3f; + return push_inst(compiler, (EXTR ^ (inv_bits | (inv_bits >> 9))) | RD(dst) | RN(arg1) | RM(arg1) | ((sljit_ins)imm << 10)); default: SLJIT_UNREACHABLE(); break; @@ -796,6 +815,10 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s case SLJIT_CLZ: SLJIT_ASSERT(arg1 == TMP_REG1); return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(arg2)); + case SLJIT_CTZ: + SLJIT_ASSERT(arg1 == TMP_REG1); + FAIL_IF(push_inst(compiler, (RBIT ^ inv_bits) | RD(dst) | RN(arg2))); + return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(dst)); case SLJIT_ADD: compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD; CHECK_FLAGS(1 << 29); @@ -834,14 +857,23 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s FAIL_IF(push_inst(compiler, (EOR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); break; /* Set flags. */ case SLJIT_SHL: + case SLJIT_MSHL: FAIL_IF(push_inst(compiler, (LSLV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); break; /* Set flags. */ case SLJIT_LSHR: + case SLJIT_MLSHR: FAIL_IF(push_inst(compiler, (LSRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); break; /* Set flags. */ case SLJIT_ASHR: + case SLJIT_MASHR: FAIL_IF(push_inst(compiler, (ASRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2))); break; /* Set flags. */ + case SLJIT_ROTL: + FAIL_IF(push_inst(compiler, (SUB ^ inv_bits) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(arg2))); + arg2 = TMP_REG2; + /* fallthrough */ + case SLJIT_ROTR: + return push_inst(compiler, (RORV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)); default: SLJIT_UNREACHABLE(); return SLJIT_SUCCESS; @@ -895,21 +927,37 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, s return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10)); } - if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) { - if ((argw >> shift) <= 0xfff) - return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | ((sljit_ins)argw << (10 - shift))); + if ((argw & ((1 << shift) - 1)) == 0) { + if (argw >= 0) { + if ((argw >> shift) <= 0xfff) + return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | ((sljit_ins)argw << (10 - shift))); - if (argw <= 0xffffff) { - FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10))); + if (argw <= 0xffffff) { + FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10))); - argw = ((argw & 0xfff) >> shift); + argw = ((argw & 0xfff) >> shift); + return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10)); + } + } else if (argw < -256 && argw >= -0xfff000) { + FAIL_IF(push_inst(compiler, SUBI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)(-argw + 0xfff) >> 12) << 10))); + argw = ((0x1000 + argw) & 0xfff) >> shift; return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10)); } } - if (argw <= 255 && argw >= -256) + if (argw <= 0xff && argw >= -0x100) return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | (((sljit_ins)argw & 0x1ff) << 12)); + if (argw >= 0) { + if (argw <= 0xfff0ff && ((argw + 0x100) & 0xfff) <= 0x1ff) { + FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10))); + return push_inst(compiler, STURBI | type | RT(reg) | RN(tmp_reg) | (((sljit_ins)argw & 0x1ff) << 12)); + } + } else if (argw >= -0xfff100 && ((-argw + 0xff) & 0xfff) <= 0x1ff) { + FAIL_IF(push_inst(compiler, SUBI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)-argw >> 12) << 10))); + return push_inst(compiler, STURBI | type | RT(reg) | RN(tmp_reg) | (((sljit_ins)argw & 0x1ff) << 12)); + } + FAIL_IF(load_immediate(compiler, tmp_reg, argw)); return push_inst(compiler, STRB | type | RT(reg) | RN(arg) | RM(tmp_reg)); @@ -924,14 +972,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { sljit_s32 prev, fprev, saved_regs_size, i, tmp; - sljit_s32 word_arg_count = 0; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); sljit_ins offs; CHECK_ERROR(); CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2); + saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 2); saved_regs_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, SSIZE_OF(f64)); local_size = (local_size + saved_regs_size + 0xf) & ~0xf; @@ -954,7 +1002,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi prev = -1; tmp = SLJIT_S0 - saveds; - for (i = SLJIT_S0; i > tmp; i--) { + for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) { if (prev == -1) { prev = i; continue; @@ -1003,23 +1051,27 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi if (prev != -1) FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5) | ((fprev == -1) ? (1 << 10) : 0))); - arg_types >>= SLJIT_ARG_SHIFT; #ifdef _WIN32 if (local_size > 4096) FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 10) | (1 << 22))); #endif /* _WIN32 */ - tmp = 0; - while (arg_types > 0) { - if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) { - if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) { - FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0 - tmp) | RN(TMP_ZERO) | RM(SLJIT_R0 + word_arg_count))); + if (!(options & SLJIT_ENTER_REG_ARG)) { + arg_types >>= SLJIT_ARG_SHIFT; + saved_arg_count = 0; + tmp = SLJIT_R0; + + while (arg_types) { + if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) { + if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) { + FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0 - saved_arg_count) | RN(TMP_ZERO) | RM(tmp))); + saved_arg_count++; + } tmp++; } - word_arg_count++; + arg_types >>= SLJIT_ARG_SHIFT; } - arg_types >>= SLJIT_ARG_SHIFT; } #ifdef _WIN32 @@ -1100,26 +1152,34 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2); + saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 2); saved_regs_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, SSIZE_OF(f64)); compiler->local_size = (local_size + saved_regs_size + 0xf) & ~0xf; return SLJIT_SUCCESS; } -static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) +static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to) { sljit_s32 local_size, prev, fprev, i, tmp; sljit_ins offs; local_size = compiler->local_size; - if (local_size > 512 && local_size <= 512 + 496) { - FAIL_IF(push_inst(compiler, LDP_POST | RT(TMP_FP) | RT2(TMP_LR) - | RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << (15 - 3)))); - local_size = 512; - } else - FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP))); + if (!is_return_to) { + if (local_size > 512 && local_size <= 512 + 496) { + FAIL_IF(push_inst(compiler, LDP_POST | RT(TMP_FP) | RT2(TMP_LR) + | RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << (15 - 3)))); + local_size = 512; + } else + FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP))); + } else { + if (local_size > 512 && local_size <= 512 + 248) { + FAIL_IF(push_inst(compiler, LDRI_POST | RT(TMP_FP) | RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << 12))); + local_size = 512; + } else + FAIL_IF(push_inst(compiler, LDRI | RT(TMP_FP) | RN(SLJIT_SP) | 0)); + } if (local_size > 512) { local_size -= 512; @@ -1137,7 +1197,7 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) prev = -1; tmp = SLJIT_S0 - compiler->saveds; - for (i = SLJIT_S0; i > tmp; i--) { + for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) { if (prev == -1) { prev = i; continue; @@ -1195,11 +1255,34 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler CHECK_ERROR(); CHECK(check_sljit_emit_return_void(compiler)); - FAIL_IF(emit_stack_frame_release(compiler)); + FAIL_IF(emit_stack_frame_release(compiler, 0)); return push_inst(compiler, RET | RN(TMP_LR)); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + srcw = 0; + } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(src))); + src = TMP_REG1; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 1)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} + /* --------------------------------------------------------------------- */ /* Operators */ /* --------------------------------------------------------------------- */ @@ -1392,13 +1475,84 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_ins inv_bits, imm; + sljit_s32 is_left; + sljit_sw mask; + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + + is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL); + + if (src_dst == src1) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, (is_left ? SLJIT_ROTL : SLJIT_ROTR) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w); + } + + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + inv_bits = (op & SLJIT_32) ? W_OP : 0; + mask = inv_bits ? 0x1f : 0x3f; + + if (src2 & SLJIT_IMM) { + src2w &= mask; + + if (src2w == 0) + return SLJIT_SUCCESS; + } else if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG2, src2, src2w, TMP_REG2)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG1)); + src1 = TMP_REG1; + } else if (src1 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_IMM) { + if (is_left) + src2w = (src2w ^ mask) + 1; + + return push_inst(compiler, (EXTR ^ (inv_bits | (inv_bits >> 9))) | RD(src_dst) + | RN(is_left ? src_dst : src1) | RM(is_left ? src1 : src_dst) | ((sljit_ins)src2w << 10)); + } + + FAIL_IF(push_inst(compiler, ((is_left ? LSLV : LSRV) ^ inv_bits) | RD(src_dst) | RN(src_dst) | RM(src2))); + + if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) { + /* Shift left/right by 1. */ + if (is_left) + imm = (sljit_ins)(inv_bits ? ((1 << 16) | (31 << 10)) : ((1 << 16) | (63 << 10) | (1 << 22))); + else + imm = (sljit_ins)(inv_bits ? ((31 << 16) | (30 << 10)) : ((63 << 16) | (62 << 10) | (1 << 22))); + + FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(TMP_REG1) | RN(src1) | imm)); + + /* Set imm to mask. */ + imm = (sljit_ins)(inv_bits ? (4 << 10) : ((5 << 10) | (1 << 22))); + FAIL_IF(push_inst(compiler, (EORI ^ inv_bits) | RD(TMP_REG2) | RN(src2) | imm)); + + src1 = TMP_REG1; + } else + FAIL_IF(push_inst(compiler, (SUB ^ inv_bits) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(src2))); + + FAIL_IF(push_inst(compiler, ((is_left ? LSRV : LSLV) ^ inv_bits) | RD(TMP_REG1) | RN(src1) | RM(TMP_REG2))); + return push_inst(compiler, (ORR ^ inv_bits) | RD(src_dst) | RN(src_dst) | RM(TMP_REG1)); +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -1550,10 +1704,9 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1); src = TMP_REG1; } else if (src & SLJIT_IMM) { -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) srcw = (sljit_s32)srcw; -#endif + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); src = TMP_REG1; } @@ -1699,11 +1852,15 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type) { switch (type) { case SLJIT_EQUAL: - case SLJIT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */ return 0x1; case SLJIT_NOT_EQUAL: - case SLJIT_NOT_EQUAL_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: /* Not supported. */ return 0x0; case SLJIT_CARRY: @@ -1712,7 +1869,6 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type) /* fallthrough */ case SLJIT_LESS: - case SLJIT_LESS_F64: return 0x2; case SLJIT_NOT_CARRY: @@ -1721,27 +1877,33 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type) /* fallthrough */ case SLJIT_GREATER_EQUAL: - case SLJIT_GREATER_EQUAL_F64: return 0x3; case SLJIT_GREATER: - case SLJIT_GREATER_F64: + case SLJIT_UNORDERED_OR_GREATER: return 0x9; case SLJIT_LESS_EQUAL: - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: return 0x8; case SLJIT_SIG_LESS: + case SLJIT_UNORDERED_OR_LESS: return 0xa; case SLJIT_SIG_GREATER_EQUAL: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: return 0xb; case SLJIT_SIG_GREATER: + case SLJIT_F_GREATER: + case SLJIT_ORDERED_GREATER: return 0xd; case SLJIT_SIG_LESS_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: return 0xc; case SLJIT_OVERFLOW: @@ -1749,7 +1911,7 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type) return 0x0; /* fallthrough */ - case SLJIT_UNORDERED_F64: + case SLJIT_UNORDERED: return 0x7; case SLJIT_NOT_OVERFLOW: @@ -1757,9 +1919,16 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type) return 0x1; /* fallthrough */ - case SLJIT_ORDERED_F64: + case SLJIT_ORDERED: return 0x6; + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: + return 0x5; + + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + return 0x4; + default: SLJIT_UNREACHABLE(); return 0xe; @@ -1816,15 +1985,11 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); if (type & SLJIT_CALL_RETURN) { - PTR_FAIL_IF(emit_stack_frame_release(compiler)); + PTR_FAIL_IF(emit_stack_frame_release(compiler, 0)); type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, type); } @@ -1869,10 +2034,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi CHECK_ERROR(); CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); - ADJUST_LOCAL_OFFSET(src, srcw); if (!(src & SLJIT_IMM)) { if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); src = TMP_REG1; } @@ -1897,28 +2062,24 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi SLJIT_UNUSED_ARG(arg_types); CHECK_ERROR(); CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); - ADJUST_LOCAL_OFFSET(src, srcw); if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); src = TMP_REG1; } if (type & SLJIT_CALL_RETURN) { - if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) { + if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(src))); src = TMP_REG1; } - FAIL_IF(emit_stack_frame_release(compiler)); + FAIL_IF(emit_stack_frame_release(compiler, 0)); type = SLJIT_JUMP; } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_ijump(compiler, type, src, srcw); } @@ -1933,7 +2094,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); ADJUST_LOCAL_OFFSET(dst, dstw); - cc = get_cc(compiler, type & 0xff); + cc = get_cc(compiler, type); dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; if (GET_OPCODE(op) < SLJIT_ADD) { @@ -1974,22 +2135,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil sljit_s32 dst_reg, sljit_s32 src, sljit_sw srcw) { - sljit_ins inv_bits = (dst_reg & SLJIT_32) ? W_OP : 0; + sljit_ins inv_bits = (type & SLJIT_32) ? W_OP : 0; sljit_ins cc; CHECK_ERROR(); CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { - if (dst_reg & SLJIT_32) + if (type & SLJIT_32) srcw = (sljit_s32)srcw; FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); src = TMP_REG1; srcw = 0; } - cc = get_cc(compiler, type & 0xff); - dst_reg &= ~SLJIT_32; + cc = get_cc(compiler, type & ~SLJIT_32); return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src)); } @@ -1998,11 +2158,82 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile sljit_s32 reg, sljit_s32 mem, sljit_sw memw) { - sljit_u32 sign = 0, inst; + sljit_u32 inst; CHECK_ERROR(); CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + if (!(reg & REG_PAIR_MASK)) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + + ADJUST_LOCAL_OFFSET(mem, memw); + + if (!(mem & REG_MASK)) { + FAIL_IF(load_immediate(compiler, TMP_REG1, memw & ~0x1f8)); + + mem = SLJIT_MEM1(TMP_REG1); + memw &= 0x1f8; + } else if (mem & OFFS_REG_MASK) { + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(OFFS_REG(mem)) | ((sljit_ins)(memw & 0x3) << 10))); + + mem = SLJIT_MEM1(TMP_REG1); + memw = 0; + } else if ((memw & 0x7) != 0 || memw > 0x1f8 || memw < -0x200) { + inst = ADDI; + + if (memw < 0) { + /* Remains negative for integer min. */ + memw = -memw; + inst = SUBI; + } else if ((memw & 0x7) == 0 && memw <= 0x7ff0) { + if (!(type & SLJIT_MEM_STORE) && (mem & REG_MASK) == REG_PAIR_FIRST(reg)) { + FAIL_IF(push_inst(compiler, LDRI | RD(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | ((sljit_ins)memw << 7))); + return push_inst(compiler, LDRI | RD(REG_PAIR_FIRST(reg)) | RN(mem & REG_MASK) | ((sljit_ins)(memw + 0x8) << 7)); + } + + inst = (type & SLJIT_MEM_STORE) ? STRI : LDRI; + + FAIL_IF(push_inst(compiler, inst | RD(REG_PAIR_FIRST(reg)) | RN(mem & REG_MASK) | ((sljit_ins)memw << 7))); + return push_inst(compiler, inst | RD(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | ((sljit_ins)(memw + 0x8) << 7)); + } + + if ((sljit_uw)memw <= 0xfff) { + FAIL_IF(push_inst(compiler, inst | RD(TMP_REG1) | RN(mem & REG_MASK) | ((sljit_ins)memw << 10))); + memw = 0; + } else if ((sljit_uw)memw <= 0xffffff) { + FAIL_IF(push_inst(compiler, inst | (1 << 22) | RD(TMP_REG1) | RN(mem & REG_MASK) | (((sljit_ins)memw >> 12) << 10))); + + if ((memw & 0xe07) != 0) { + FAIL_IF(push_inst(compiler, inst | RD(TMP_REG1) | RN(TMP_REG1) | (((sljit_ins)memw & 0xfff) << 10))); + memw = 0; + } else { + memw &= 0xfff; + } + } else { + FAIL_IF(load_immediate(compiler, TMP_REG1, memw)); + FAIL_IF(push_inst(compiler, (inst == ADDI ? ADD : SUB) | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(TMP_REG1))); + memw = 0; + } + + mem = SLJIT_MEM1(TMP_REG1); + + if (inst == SUBI) + memw = -memw; + } + + SLJIT_ASSERT((memw & 0x7) == 0 && memw <= 0x1f8 && memw >= -0x200); + return push_inst(compiler, ((type & SLJIT_MEM_STORE) ? STP : LDP) | RT(REG_PAIR_FIRST(reg)) | RT2(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x3f8) << 12)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_u32 sign = 0, inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw)); + if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256)) return SLJIT_ERR_UNSUPPORTED; @@ -2042,20 +2273,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile if (!(type & SLJIT_MEM_STORE)) inst |= sign ? 0x00800000 : 0x00400000; - if (type & SLJIT_MEM_PRE) + if (!(type & SLJIT_MEM_POST)) inst |= 0x800; return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x1ff) << 12)); } -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw) { sljit_u32 inst; CHECK_ERROR(); - CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); + CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw)); if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256)) return SLJIT_ERR_UNSUPPORTED; @@ -2071,7 +2302,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil if (!(type & SLJIT_MEM_STORE)) inst |= 0x00400000; - if (type & SLJIT_MEM_PRE) + if (!(type & SLJIT_MEM_POST)) inst |= 0x800; return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x1ff) << 12)); diff --git a/thirdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c b/thirdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c index ed21ea7daa..7d6bac077e 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c @@ -100,7 +100,6 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define ADDS 0x1800 #define ADDSI3 0x1c00 #define ADDSI8 0x3000 -#define ADD_W 0xeb000000 #define ADDWI 0xf2000000 #define ADD_SP 0x4485 #define ADD_SP_I 0xb000 @@ -131,6 +130,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define IT 0xbf00 #define LDR_SP 0x9800 #define LDR 0xf8d00000 +#define LDRD 0xe9500000 #define LDRI 0xf8500800 #define LSLS 0x4080 #define LSLSI 0x0000 @@ -160,6 +160,10 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define POP_W 0xe8bd0000 #define PUSH 0xb400 #define PUSH_W 0xe92d0000 +#define RBIT 0xfa90f0a0 +#define RORS 0x41c0 +#define ROR_W 0xfa60f000 +#define ROR_WI 0xea4f0030 #define RSB_WI 0xf1c00000 #define RSBSI 0x4240 #define SBCI 0xf1600000 @@ -167,6 +171,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define SBC_W 0xeb600000 #define SDIV 0xfb90f0f0 #define SMULL 0xfb800000 +#define STRD 0xe9400000 #define STR_SP 0x9000 #define SUBS 0x1a00 #define SUBSI3 0x1e00 @@ -434,8 +439,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil } next_addr = compute_next_addr(label, jump, const_, put_label); } - code_ptr ++; - half_count ++; + code_ptr++; + half_count++; } while (buf_ptr < buf_end); buf = buf->next; @@ -491,6 +496,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) #endif case SLJIT_HAS_CLZ: + case SLJIT_HAS_CTZ: + case SLJIT_HAS_ROT: case SLJIT_HAS_CMOV: case SLJIT_HAS_PREFETCH: return 1; @@ -592,7 +599,7 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s arg1 must be register, imm arg2 must be register, imm */ sljit_s32 reg; - sljit_uw imm, nimm; + sljit_uw imm, imm2; if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) { /* Both are immediates, no temporaries are used. */ @@ -607,6 +614,7 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s switch (flags & 0xffff) { case SLJIT_CLZ: + case SLJIT_CTZ: case SLJIT_MUL: /* No form with immediate operand. */ break; @@ -621,31 +629,31 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s break; case SLJIT_ADD: compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD; - nimm = NEGATE(imm); + imm2 = NEGATE(imm); if (IS_2_LO_REGS(reg, dst)) { if (imm <= 0x7) return push_inst16(compiler, ADDSI3 | IMM3(imm) | RD3(dst) | RN3(reg)); - if (nimm <= 0x7) - return push_inst16(compiler, SUBSI3 | IMM3(nimm) | RD3(dst) | RN3(reg)); + if (imm2 <= 0x7) + return push_inst16(compiler, SUBSI3 | IMM3(imm2) | RD3(dst) | RN3(reg)); if (reg == dst) { if (imm <= 0xff) return push_inst16(compiler, ADDSI8 | IMM8(imm) | RDN3(dst)); - if (nimm <= 0xff) - return push_inst16(compiler, SUBSI8 | IMM8(nimm) | RDN3(dst)); + if (imm2 <= 0xff) + return push_inst16(compiler, SUBSI8 | IMM8(imm2) | RDN3(dst)); } } if (!(flags & SET_FLAGS)) { if (imm <= 0xfff) return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(imm)); - if (nimm <= 0xfff) - return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(nimm)); + if (imm2 <= 0xfff) + return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(imm2)); } - nimm = get_imm(imm); - if (nimm != INVALID_IMM) - return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); - nimm = get_imm(NEGATE(imm)); - if (nimm != INVALID_IMM) - return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); + imm2 = get_imm(imm); + if (imm2 != INVALID_IMM) + return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2); + imm = get_imm(NEGATE(imm)); + if (imm != INVALID_IMM) + return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_ADDC: compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD; @@ -666,39 +674,39 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s if (flags & UNUSED_RETURN) { if (imm <= 0xff && reg_map[reg] <= 7) return push_inst16(compiler, CMPI | IMM8(imm) | RDN3(reg)); - nimm = get_imm(imm); - if (nimm != INVALID_IMM) - return push_inst32(compiler, CMPI_W | RN4(reg) | nimm); - nimm = get_imm(NEGATE(imm)); - if (nimm != INVALID_IMM) - return push_inst32(compiler, CMNI_W | RN4(reg) | nimm); + imm2 = get_imm(imm); + if (imm2 != INVALID_IMM) + return push_inst32(compiler, CMPI_W | RN4(reg) | imm2); + imm = get_imm(NEGATE(imm)); + if (imm != INVALID_IMM) + return push_inst32(compiler, CMNI_W | RN4(reg) | imm); break; } - nimm = NEGATE(imm); + imm2 = NEGATE(imm); if (IS_2_LO_REGS(reg, dst)) { if (imm <= 0x7) return push_inst16(compiler, SUBSI3 | IMM3(imm) | RD3(dst) | RN3(reg)); - if (nimm <= 0x7) - return push_inst16(compiler, ADDSI3 | IMM3(nimm) | RD3(dst) | RN3(reg)); + if (imm2 <= 0x7) + return push_inst16(compiler, ADDSI3 | IMM3(imm2) | RD3(dst) | RN3(reg)); if (reg == dst) { if (imm <= 0xff) return push_inst16(compiler, SUBSI8 | IMM8(imm) | RDN3(dst)); - if (nimm <= 0xff) - return push_inst16(compiler, ADDSI8 | IMM8(nimm) | RDN3(dst)); + if (imm2 <= 0xff) + return push_inst16(compiler, ADDSI8 | IMM8(imm2) | RDN3(dst)); } } if (!(flags & SET_FLAGS)) { if (imm <= 0xfff) return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(imm)); - if (nimm <= 0xfff) - return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(nimm)); + if (imm2 <= 0xfff) + return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(imm2)); } - nimm = get_imm(imm); - if (nimm != INVALID_IMM) - return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); - nimm = get_imm(NEGATE(imm)); - if (nimm != INVALID_IMM) - return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); + imm2 = get_imm(imm); + if (imm2 != INVALID_IMM) + return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2); + imm = get_imm(NEGATE(imm)); + if (imm != INVALID_IMM) + return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_SUBC: compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB; @@ -709,17 +717,17 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_AND: - nimm = get_imm(imm); - if (nimm != INVALID_IMM) - return push_inst32(compiler, ((flags & UNUSED_RETURN) ? TSTI : ANDI) | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); + imm2 = get_imm(imm); + if (imm2 != INVALID_IMM) + return push_inst32(compiler, ((flags & UNUSED_RETURN) ? TSTI : ANDI) | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2); imm = get_imm(~imm); if (imm != INVALID_IMM) return push_inst32(compiler, BICI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_OR: - nimm = get_imm(imm); - if (nimm != INVALID_IMM) - return push_inst32(compiler, ORRI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | nimm); + imm2 = get_imm(imm); + if (imm2 != INVALID_IMM) + return push_inst32(compiler, ORRI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2); imm = get_imm(~imm); if (imm != INVALID_IMM) return push_inst32(compiler, ORNI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); @@ -730,11 +738,17 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s return push_inst32(compiler, EORI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); break; case SLJIT_SHL: + case SLJIT_MSHL: case SLJIT_LSHR: + case SLJIT_MLSHR: case SLJIT_ASHR: + case SLJIT_MASHR: + case SLJIT_ROTL: + case SLJIT_ROTR: if (flags & ARG1_IMM) break; imm &= 0x1f; + if (imm == 0) { if (!(flags & SET_FLAGS)) return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); @@ -742,19 +756,28 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); } + switch (flags & 0xffff) { case SLJIT_SHL: + case SLJIT_MSHL: if (IS_2_LO_REGS(dst, reg)) return push_inst16(compiler, LSLSI | RD3(dst) | RN3(reg) | (imm << 6)); return push_inst32(compiler, LSL_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); case SLJIT_LSHR: + case SLJIT_MLSHR: if (IS_2_LO_REGS(dst, reg)) return push_inst16(compiler, LSRSI | RD3(dst) | RN3(reg) | (imm << 6)); return push_inst32(compiler, LSR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); - default: /* SLJIT_ASHR */ + case SLJIT_ASHR: + case SLJIT_MASHR: if (IS_2_LO_REGS(dst, reg)) return push_inst16(compiler, ASRSI | RD3(dst) | RN3(reg) | (imm << 6)); return push_inst32(compiler, ASR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); + case SLJIT_ROTL: + imm = (imm ^ 0x1f) + 1; + /* fallthrough */ + default: /* SLJIT_ROTR */ + return push_inst32(compiler, ROR_WI | RD4(dst) | RM4(reg) | IMM5(imm)); } default: SLJIT_UNREACHABLE(); @@ -813,8 +836,11 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s return push_inst32(compiler, MVN_W | (flags & SET_FLAGS) | RD4(dst) | RM4(arg2)); case SLJIT_CLZ: SLJIT_ASSERT(arg1 == TMP_REG2); - FAIL_IF(push_inst32(compiler, CLZ | RN4(arg2) | RD4(dst) | RM4(arg2))); - return SLJIT_SUCCESS; + return push_inst32(compiler, CLZ | RN4(arg2) | RD4(dst) | RM4(arg2)); + case SLJIT_CTZ: + SLJIT_ASSERT(arg1 == TMP_REG2); + FAIL_IF(push_inst32(compiler, RBIT | RN4(arg2) | RD4(dst) | RM4(arg2))); + return push_inst32(compiler, CLZ | RN4(dst) | RD4(dst) | RM4(dst)); case SLJIT_ADD: compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD; if (IS_3_LO_REGS(dst, arg1, arg2)) @@ -864,18 +890,38 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, EORS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, EOR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_MSHL: + FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(arg2) | 0x1f)); + arg2 = TMP_REG2; + /* fallthrough */ case SLJIT_SHL: if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, LSLS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, LSL_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_MLSHR: + FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(arg2) | 0x1f)); + arg2 = TMP_REG2; + /* fallthrough */ case SLJIT_LSHR: if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, LSRS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, LSR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_MASHR: + FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(arg2) | 0x1f)); + arg2 = TMP_REG2; + /* fallthrough */ case SLJIT_ASHR: if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2)) return push_inst16(compiler, ASRS | RD3(dst) | RN3(arg2)); return push_inst32(compiler, ASR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_ROTL: + FAIL_IF(push_inst32(compiler, RSB_WI | RD4(TMP_REG2) | RN4(arg2) | 0)); + arg2 = TMP_REG2; + /* fallthrough */ + case SLJIT_ROTR: + if (dst == (sljit_s32)arg1 && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, RORS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, ROR_W | RD4(dst) | RN4(arg1) | RM4(arg2)); } SLJIT_UNREACHABLE(); @@ -890,8 +936,8 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s #define HALF_SIZE 0x08 #define PRELOAD 0x0c -#define IS_WORD_SIZE(flags) (!(flags & (BYTE_SIZE | HALF_SIZE))) -#define OFFSET_CHECK(imm, shift) (!(argw & ~(imm << shift))) +#define IS_WORD_SIZE(flags) (!((flags) & (BYTE_SIZE | HALF_SIZE))) +#define ALIGN_CHECK(argw, imm, shift) (!((argw) & ~((imm) << (shift)))) /* 1st letter: @@ -990,16 +1036,15 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg) { sljit_s32 other_r; - sljit_uw tmp; + sljit_uw imm, tmp; SLJIT_ASSERT(arg & SLJIT_MEM); - SLJIT_ASSERT((arg & REG_MASK) != tmp_reg); - arg &= ~SLJIT_MEM; + SLJIT_ASSERT((arg & REG_MASK) != tmp_reg || (arg == SLJIT_MEM1(tmp_reg) && argw >= -0xff && argw <= 0xfff)); if (SLJIT_UNLIKELY(!(arg & REG_MASK))) { - tmp = get_imm((sljit_uw)argw & ~(sljit_uw)0xfff); - if (tmp != INVALID_IMM) { - FAIL_IF(push_inst32(compiler, MOV_WI | RD4(tmp_reg) | tmp)); + imm = get_imm((sljit_uw)argw & ~(sljit_uw)0xfff); + if (imm != INVALID_IMM) { + FAIL_IF(push_inst32(compiler, MOV_WI | RD4(tmp_reg) | imm)); return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(tmp_reg) | (argw & 0xfff)); } @@ -1012,51 +1057,59 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { argw &= 0x3; other_r = OFFS_REG(arg); - arg &= 0xf; + arg &= REG_MASK; if (!argw && IS_3_LO_REGS(reg, arg, other_r)) return push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(other_r)); return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(other_r) | ((sljit_ins)argw << 4)); } + arg &= REG_MASK; + if (argw > 0xfff) { - tmp = get_imm((sljit_uw)argw & ~(sljit_uw)0xfff); - if (tmp != INVALID_IMM) { - push_inst32(compiler, ADD_WI | RD4(tmp_reg) | RN4(arg) | tmp); + imm = get_imm((sljit_uw)(argw & ~0xfff)); + if (imm != INVALID_IMM) { + push_inst32(compiler, ADD_WI | RD4(tmp_reg) | RN4(arg) | imm); arg = tmp_reg; argw = argw & 0xfff; } } else if (argw < -0xff) { - tmp = get_imm((sljit_uw)-argw & ~(sljit_uw)0xff); - if (tmp != INVALID_IMM) { - push_inst32(compiler, SUB_WI | RD4(tmp_reg) | RN4(arg) | tmp); + tmp = (sljit_uw)((-argw + 0xfff) & ~0xfff); + SLJIT_ASSERT(tmp >= (sljit_uw)-argw); + imm = get_imm(tmp); + + if (imm != INVALID_IMM) { + push_inst32(compiler, SUB_WI | RD4(tmp_reg) | RN4(arg) | imm); arg = tmp_reg; - argw = -(-argw & 0xff); + argw += (sljit_sw)tmp; + + SLJIT_ASSERT(argw >= 0 && argw <= 0xfff); } } + /* 16 bit instruction forms. */ if (IS_2_LO_REGS(reg, arg) && sljit_mem16_imm5[flags]) { tmp = 3; if (IS_WORD_SIZE(flags)) { - if (OFFSET_CHECK(0x1f, 2)) + if (ALIGN_CHECK(argw, 0x1f, 2)) tmp = 2; } else if (flags & BYTE_SIZE) { - if (OFFSET_CHECK(0x1f, 0)) + if (ALIGN_CHECK(argw, 0x1f, 0)) tmp = 0; } else { SLJIT_ASSERT(flags & HALF_SIZE); - if (OFFSET_CHECK(0x1f, 1)) + if (ALIGN_CHECK(argw, 0x1f, 1)) tmp = 1; } if (tmp < 3) return push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(arg) | ((sljit_ins)argw << (6 - tmp))); } - else if (SLJIT_UNLIKELY(arg == SLJIT_SP) && IS_WORD_SIZE(flags) && OFFSET_CHECK(0xff, 2) && reg_map[reg] <= 7) { + else if (SLJIT_UNLIKELY(arg == SLJIT_SP) && IS_WORD_SIZE(flags) && ALIGN_CHECK(argw, 0xff, 2) && reg_map[reg] <= 7) { /* SP based immediate. */ return push_inst16(compiler, STR_SP | (sljit_ins)((flags & STORE) ? 0 : 0x800) | RDN3(reg) | ((sljit_ins)argw >> 2)); } @@ -1074,6 +1127,9 @@ static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp_reg)); } +#undef ALIGN_CHECK +#undef IS_WORD_SIZE + /* --------------------------------------------------------------------- */ /* Entry, exit */ /* --------------------------------------------------------------------- */ @@ -1082,7 +1138,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_s32 size, i, tmp, word_arg_count, saved_arg_count; + sljit_s32 size, i, tmp, word_arg_count; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); sljit_uw offset; sljit_uw imm = 0; #ifdef __SOFTFP__ @@ -1098,7 +1155,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); tmp = SLJIT_S0 - saveds; - for (i = SLJIT_S0; i > tmp; i--) + for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) imm |= (sljit_uw)1 << reg_map[i]; for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) @@ -1110,7 +1167,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi : push_inst16(compiler, PUSH | (1 << 8) | imm)); /* Stack must be aligned to 8 bytes: (LR, R4) */ - size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1); if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) { if ((size & SSIZE_OF(sw)) != 0) { @@ -1131,6 +1188,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi local_size = ((size + local_size + 0x7) & ~0x7) - size; compiler->local_size = local_size; + if (options & SLJIT_ENTER_REG_ARG) + arg_types = 0; + arg_types >>= SLJIT_ARG_SHIFT; word_arg_count = 0; saved_arg_count = 0; @@ -1173,13 +1233,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi else break; - SLJIT_ASSERT(reg_map[tmp] <= 7); - if (offset < 4 * sizeof(sljit_sw)) - FAIL_IF(push_inst16(compiler, MOV | RD3(tmp) | (offset << 1))); - else + FAIL_IF(push_inst16(compiler, MOV | ((sljit_ins)reg_map[tmp] & 0x7) | (((sljit_ins)reg_map[tmp] & 0x8) << 4) | (offset << 1))); + else if (reg_map[tmp] <= 7) FAIL_IF(push_inst16(compiler, LDR_SP | RDN3(tmp) | ((offset + (sljit_uw)size - 4 * sizeof(sljit_sw)) >> 2))); + else + FAIL_IF(push_inst32(compiler, LDR | RT4(tmp) | RN4(SLJIT_SP) + | ((offset + (sljit_uw)size - 4 * sizeof(sljit_sw))))); break; } @@ -1293,7 +1354,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1); if ((size & SSIZE_OF(sw)) != 0 && (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)) size += SSIZE_OF(sw); @@ -1325,8 +1386,9 @@ static sljit_s32 emit_add_sp(struct sljit_compiler *compiler, sljit_uw imm) static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size) { sljit_s32 local_size, fscratches, fsaveds, i, tmp; + sljit_s32 restored_reg = 0; sljit_s32 lr_dst = TMP_PC; - sljit_uw reg_list; + sljit_uw reg_list = 0; SLJIT_ASSERT(reg_map[TMP_REG2] == 14 && frame_size <= 128); @@ -1353,46 +1415,88 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit if (frame_size < 0) { lr_dst = TMP_REG2; frame_size = 0; - } else if (frame_size > 0) + } else if (frame_size > 0) { + SLJIT_ASSERT(frame_size == 1 || (frame_size & 0x7) == 0); lr_dst = 0; + frame_size &= ~0x7; + } - reg_list = 0; tmp = SLJIT_S0 - compiler->saveds; - for (i = SLJIT_S0; i > tmp; i--) - reg_list |= (sljit_uw)1 << reg_map[i]; + i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); + if (tmp < i) { + restored_reg = i; + do { + reg_list |= (sljit_uw)1 << reg_map[i]; + } while (--i > tmp); + } + + i = compiler->scratches; + if (i >= SLJIT_FIRST_SAVED_REG) { + restored_reg = i; + do { + reg_list |= (sljit_uw)1 << reg_map[i]; + } while (--i >= SLJIT_FIRST_SAVED_REG); + } - for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) - reg_list |= (sljit_uw)1 << reg_map[i]; + if (lr_dst == TMP_REG2 && reg_list == 0) { + reg_list |= (sljit_uw)1 << reg_map[TMP_REG2]; + restored_reg = TMP_REG2; + lr_dst = 0; + } if (lr_dst == 0 && (reg_list & (reg_list - 1)) == 0) { /* The local_size does not include the saved registers. */ - local_size += SSIZE_OF(sw); + tmp = 0; + if (reg_list != 0) { + tmp = 2; + if (local_size <= 0xfff) { + if (local_size == 0) { + SLJIT_ASSERT(restored_reg != TMP_REG2); + if (frame_size == 0) + return push_inst32(compiler, LDRI | RT4(restored_reg) | RN4(SLJIT_SP) | 0x308); + if (frame_size > 2 * SSIZE_OF(sw)) + return push_inst32(compiler, LDRI | RT4(restored_reg) | RN4(SLJIT_SP) | 0x100 | (sljit_ins)(frame_size - (2 * SSIZE_OF(sw)))); + } + + if (reg_map[restored_reg] <= 7 && local_size <= 0x3fc) + FAIL_IF(push_inst16(compiler, STR_SP | 0x800 | RDN3(restored_reg) | (sljit_ins)(local_size >> 2))); + else + FAIL_IF(push_inst32(compiler, LDR | RT4(restored_reg) | RN4(SLJIT_SP) | (sljit_ins)local_size)); + tmp = 1; + } else if (frame_size == 0) { + frame_size = (restored_reg == TMP_REG2) ? SSIZE_OF(sw) : 2 * SSIZE_OF(sw); + tmp = 3; + } + + /* Place for the saved register. */ + if (restored_reg != TMP_REG2) + local_size += SSIZE_OF(sw); + } - if (reg_list != 0) - local_size += SSIZE_OF(sw); + /* Place for the lr register. */ + local_size += SSIZE_OF(sw); if (frame_size > local_size) - FAIL_IF(push_inst16(compiler, SUB_SP_I | ((sljit_uw)(frame_size - local_size) >> 2))); + FAIL_IF(push_inst16(compiler, SUB_SP_I | ((sljit_ins)(frame_size - local_size) >> 2))); else if (frame_size < local_size) FAIL_IF(emit_add_sp(compiler, (sljit_uw)(local_size - frame_size))); - if (reg_list == 0) + if (tmp <= 1) return SLJIT_SUCCESS; - if (compiler->saveds > 0) { - SLJIT_ASSERT(reg_list == ((sljit_uw)1 << reg_map[SLJIT_S0])); - lr_dst = SLJIT_S0; - } else { - SLJIT_ASSERT(reg_list == ((sljit_uw)1 << reg_map[SLJIT_FIRST_SAVED_REG])); - lr_dst = SLJIT_FIRST_SAVED_REG; - } + if (tmp == 2) { + frame_size -= SSIZE_OF(sw); + if (restored_reg != TMP_REG2) + frame_size -= SSIZE_OF(sw); - frame_size -= 2 * SSIZE_OF(sw); + if (reg_map[restored_reg] <= 7) + return push_inst16(compiler, STR_SP | 0x800 | RDN3(restored_reg) | (sljit_ins)(frame_size >> 2)); - if (reg_map[lr_dst] <= 7) - return push_inst16(compiler, STR_SP | 0x800 | RDN3(lr_dst) | (sljit_uw)(frame_size >> 2)); + return push_inst32(compiler, LDR | RT4(restored_reg) | RN4(SLJIT_SP) | (sljit_ins)frame_size); + } - return push_inst32(compiler, LDR | RT4(lr_dst) | RN4(SLJIT_SP) | (sljit_uw)frame_size); + tmp = (restored_reg == TMP_REG2) ? 0x304 : 0x308; + return push_inst32(compiler, LDRI | RT4(restored_reg) | RN4(SLJIT_SP) | (sljit_ins)tmp); } if (local_size > 0) @@ -1407,12 +1511,8 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit FAIL_IF(push_inst16(compiler, POP | reg_list)); } else { - if (lr_dst != 0) { - if (reg_list == 0) - return push_inst32(compiler, 0xf85d0b04 | RT4(lr_dst)); - + if (lr_dst != 0) reg_list |= (sljit_uw)1 << reg_map[lr_dst]; - } /* At least two registers must be set for POP_W instruction. */ SLJIT_ASSERT((reg_list & (reg_list - 1)) != 0); @@ -1421,8 +1521,12 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit } if (frame_size > 0) - return push_inst16(compiler, SUB_SP_I | (((sljit_uw)frame_size - sizeof(sljit_sw)) >> 2)); - return SLJIT_SUCCESS; + return push_inst16(compiler, SUB_SP_I | (((sljit_ins)frame_size - sizeof(sljit_sw)) >> 2)); + + if (lr_dst != 0) + return SLJIT_SUCCESS; + + return push_inst16(compiler, ADD_SP_I | 1); } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) @@ -1433,6 +1537,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler return emit_stack_frame_release(compiler, 0); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1)); + src = TMP_REG1; + srcw = 0; + } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG1, src))); + src = TMP_REG1; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 1)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} + /* --------------------------------------------------------------------- */ /* Operators */ /* --------------------------------------------------------------------- */ @@ -1685,13 +1811,75 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 is_left; + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + + op = GET_OPCODE(op); + is_left = (op == SLJIT_SHL || op == SLJIT_MSHL); + + if (src_dst == src1) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, is_left ? SLJIT_ROTL : SLJIT_ROTR, src_dst, 0, src_dst, 0, src2, src2w); + } + + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (src2 & SLJIT_IMM) { + src2w &= 0x1f; + + if (src2w == 0) + return SLJIT_SUCCESS; + } else if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, src2, src2w, TMP_REG2)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG1)); + src1 = TMP_REG1; + } else if (src1 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)src1w)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_IMM) { + if (reg_map[src_dst] <= 7) + FAIL_IF(push_inst16(compiler, (is_left ? LSLSI : LSRSI) | RD3(src_dst) | RN3(src_dst) | ((sljit_ins)src2w << 6))); + else + FAIL_IF(push_inst32(compiler, (is_left ? LSL_WI : LSR_WI) | RD4(src_dst) | RM4(src_dst) | IMM5(src2w))); + + src2w = (src2w ^ 0x1f) + 1; + return push_inst32(compiler, ORR_W | RD4(src_dst) | RN4(src_dst) | RM4(src1) | (is_left ? 0x10 : 0x0) | IMM5(src2w)); + } + + if (op == SLJIT_MSHL || op == SLJIT_MLSHR) { + FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(src2) | 0x1f)); + src2 = TMP_REG2; + } + + if (IS_2_LO_REGS(src_dst, src2)) + FAIL_IF(push_inst16(compiler, (is_left ? LSLS : LSRS) | RD3(src_dst) | RN3(src2))); + else + FAIL_IF(push_inst32(compiler, (is_left ? LSL_W : LSR_W) | RD4(src_dst) | RN4(src_dst) | RM4(src2))); + + FAIL_IF(push_inst32(compiler, (is_left ? LSR_WI : LSL_WI) | RD4(TMP_REG1) | RM4(src1) | (1 << 6))); + FAIL_IF(push_inst32(compiler, EORI | RD4(TMP_REG2) | RN4(src2) | 0x1f)); + FAIL_IF(push_inst32(compiler, (is_left ? LSR_W : LSL_W) | RD4(TMP_REG1) | RN4(TMP_REG1) | RM4(TMP_REG2))); + return push_inst32(compiler, ORR_W | RD4(src_dst) | RN4(src_dst) | RM4(TMP_REG1)); +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -1955,8 +2143,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil return emit_fop_mem(compiler, (op & SLJIT_32), TMP_FREG1, dst, dstw); } -#undef FPU_LOAD - /* --------------------------------------------------------------------- */ /* Other instructions */ /* --------------------------------------------------------------------- */ @@ -1984,11 +2170,15 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) { switch (type) { case SLJIT_EQUAL: - case SLJIT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */ return 0x0; case SLJIT_NOT_EQUAL: - case SLJIT_NOT_EQUAL_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: /* Not supported. */ return 0x1; case SLJIT_CARRY: @@ -1997,7 +2187,6 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) /* fallthrough */ case SLJIT_LESS: - case SLJIT_LESS_F64: return 0x3; case SLJIT_NOT_CARRY: @@ -2006,27 +2195,33 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) /* fallthrough */ case SLJIT_GREATER_EQUAL: - case SLJIT_GREATER_EQUAL_F64: return 0x2; case SLJIT_GREATER: - case SLJIT_GREATER_F64: + case SLJIT_UNORDERED_OR_GREATER: return 0x8; case SLJIT_LESS_EQUAL: - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: return 0x9; case SLJIT_SIG_LESS: + case SLJIT_UNORDERED_OR_LESS: return 0xb; case SLJIT_SIG_GREATER_EQUAL: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: return 0xa; case SLJIT_SIG_GREATER: + case SLJIT_F_GREATER: + case SLJIT_ORDERED_GREATER: return 0xc; case SLJIT_SIG_LESS_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: return 0xd; case SLJIT_OVERFLOW: @@ -2034,7 +2229,7 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) return 0x1; /* fallthrough */ - case SLJIT_UNORDERED_F64: + case SLJIT_UNORDERED: return 0x6; case SLJIT_NOT_OVERFLOW: @@ -2042,9 +2237,16 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type) return 0x0; /* fallthrough */ - case SLJIT_ORDERED_F64: + case SLJIT_ORDERED: return 0x7; + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: + return 0x4; + + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + return 0x5; + default: /* SLJIT_JUMP */ SLJIT_UNREACHABLE(); return 0xe; @@ -2289,52 +2491,49 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); #ifdef __SOFTFP__ - PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL, &extra_space)); - SLJIT_ASSERT((extra_space & 0x7) == 0); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) { + PTR_FAIL_IF(softfloat_call_with_args(compiler, arg_types, NULL, &extra_space)); + SLJIT_ASSERT((extra_space & 0x7) == 0); - if ((type & SLJIT_CALL_RETURN) && extra_space == 0) - type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); + if ((type & SLJIT_CALL_RETURN) && extra_space == 0) + type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); + jump = sljit_emit_jump(compiler, type); + PTR_FAIL_IF(jump == NULL); - jump = sljit_emit_jump(compiler, type); - PTR_FAIL_IF(jump == NULL); + if (extra_space > 0) { + if (type & SLJIT_CALL_RETURN) + PTR_FAIL_IF(push_inst32(compiler, LDR | RT4(TMP_REG2) + | RN4(SLJIT_SP) | (extra_space - sizeof(sljit_sw)))); - if (extra_space > 0) { - if (type & SLJIT_CALL_RETURN) - PTR_FAIL_IF(push_inst32(compiler, LDR | RT4(TMP_REG2) - | RN4(SLJIT_SP) | (extra_space - sizeof(sljit_sw)))); + PTR_FAIL_IF(push_inst16(compiler, ADD_SP_I | (extra_space >> 2))); - PTR_FAIL_IF(push_inst16(compiler, ADD_SP_I | (extra_space >> 2))); - - if (type & SLJIT_CALL_RETURN) { - PTR_FAIL_IF(push_inst16(compiler, BX | RN3(TMP_REG2))); - return jump; + if (type & SLJIT_CALL_RETURN) { + PTR_FAIL_IF(push_inst16(compiler, BX | RN3(TMP_REG2))); + return jump; + } } + + SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); + PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types)); + return jump; } +#endif /* __SOFTFP__ */ - SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); - PTR_FAIL_IF(softfloat_post_call_with_args(compiler, arg_types)); - return jump; -#else if (type & SLJIT_CALL_RETURN) { /* ldmia sp!, {..., lr} */ PTR_FAIL_IF(emit_stack_frame_release(compiler, -1)); type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); } - PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif +#ifndef __SOFTFP__ + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + PTR_FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); +#endif /* !__SOFTFP__ */ + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, type); -#endif } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) @@ -2385,56 +2584,80 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi src = TMP_REG1; } - if ((type & SLJIT_CALL_RETURN) && (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0)) { + if ((type & SLJIT_CALL_RETURN) && (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) { FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG1, src))); src = TMP_REG1; } #ifdef __SOFTFP__ - FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src, &extra_space)); - SLJIT_ASSERT((extra_space & 0x7) == 0); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) { + FAIL_IF(softfloat_call_with_args(compiler, arg_types, &src, &extra_space)); + SLJIT_ASSERT((extra_space & 0x7) == 0); - if ((type & SLJIT_CALL_RETURN) && extra_space == 0) - type = SLJIT_JUMP; + if ((type & SLJIT_CALL_RETURN) && extra_space == 0) + type = SLJIT_JUMP; -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); + FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); - FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + if (extra_space > 0) { + if (type & SLJIT_CALL_RETURN) + FAIL_IF(push_inst32(compiler, LDR | RT4(TMP_REG2) + | RN4(SLJIT_SP) | (extra_space - sizeof(sljit_sw)))); - if (extra_space > 0) { - if (type & SLJIT_CALL_RETURN) - FAIL_IF(push_inst32(compiler, LDR | RT4(TMP_REG2) - | RN4(SLJIT_SP) | (extra_space - sizeof(sljit_sw)))); + FAIL_IF(push_inst16(compiler, ADD_SP_I | (extra_space >> 2))); - FAIL_IF(push_inst16(compiler, ADD_SP_I | (extra_space >> 2))); + if (type & SLJIT_CALL_RETURN) + return push_inst16(compiler, BX | RN3(TMP_REG2)); + } - if (type & SLJIT_CALL_RETURN) - return push_inst16(compiler, BX | RN3(TMP_REG2)); + SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); + return softfloat_post_call_with_args(compiler, arg_types); } +#endif /* __SOFTFP__ */ - SLJIT_ASSERT(!(type & SLJIT_CALL_RETURN)); - return softfloat_post_call_with_args(compiler, arg_types); -#else /* !__SOFTFP__ */ if (type & SLJIT_CALL_RETURN) { /* ldmia sp!, {..., lr} */ FAIL_IF(emit_stack_frame_release(compiler, -1)); type = SLJIT_JUMP; } - FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif +#ifndef __SOFTFP__ + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + FAIL_IF(hardfloat_call_with_args(compiler, arg_types)); +#endif /* !__SOFTFP__ */ + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_ijump(compiler, type, src, srcw); -#endif /* __SOFTFP__ */ } +#ifdef __SOFTFP__ + +static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + if (compiler->options & SLJIT_ENTER_REG_ARG) { + if (src == SLJIT_FR0) + return SLJIT_SUCCESS; + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw); + } + + if (FAST_IS_REG(src)) { + if (op & SLJIT_32) + return push_inst32(compiler, VMOV | (1 << 20) | DN4(src) | RT4(SLJIT_R0)); + return push_inst32(compiler, VMOV2 | (1 << 20) | DM4(src) | RT4(SLJIT_R0) | RN4(SLJIT_R1)); + } + + SLJIT_SKIP_CHECKS(compiler); + + if (op & SLJIT_32) + return sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_R0, 0, src, srcw); + return sljit_emit_mem(compiler, SLJIT_MOV, SLJIT_REG_PAIR(SLJIT_R0, SLJIT_R1), src, srcw); +} + +#endif /* __SOFTFP__ */ + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw dstw, sljit_s32 type) @@ -2447,7 +2670,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co ADJUST_LOCAL_OFFSET(dst, dstw); op = GET_OPCODE(op); - cc = get_cc(compiler, type & 0xff); + cc = get_cc(compiler, type); dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; if (op < SLJIT_ADD) { @@ -2497,9 +2720,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); - dst_reg &= ~SLJIT_32; - - cc = get_cc(compiler, type & 0xff); + cc = get_cc(compiler, type & ~SLJIT_32); if (!(src & SLJIT_IMM)) { FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); @@ -2541,11 +2762,186 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile sljit_s32 mem, sljit_sw memw) { sljit_s32 flags; - sljit_ins inst; + sljit_uw imm, tmp; CHECK_ERROR(); CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + if (!(reg & REG_PAIR_MASK)) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + + if (type & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16 | SLJIT_MEM_UNALIGNED_32)) { + if ((mem & REG_MASK) == 0) { + if ((memw & 0xfff) >= (0x1000 - SSIZE_OF(sw))) { + imm = get_imm((sljit_uw)((memw + 0x1000) & ~0xfff)); + + if (imm != INVALID_IMM) + memw = (memw & 0xfff) - 0x1000; + } else { + imm = get_imm((sljit_uw)(memw & ~0xfff)); + + if (imm != INVALID_IMM) + memw &= 0xff; + } + + if (imm == INVALID_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw)); + memw = 0; + } else + FAIL_IF(push_inst32(compiler, MOV_WI | RD4(TMP_REG1) | imm)); + + mem = SLJIT_MEM1(TMP_REG1); + } else if (mem & OFFS_REG_MASK) { + FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(mem & REG_MASK) | RM4(OFFS_REG(mem)) | ((sljit_uw)(memw & 0x3) << 6))); + memw = 0; + mem = SLJIT_MEM1(TMP_REG1); + } else if (memw < -0xff) { + /* Zero value can be included in the first case. */ + if ((-memw & 0xfff) <= SSIZE_OF(sw)) + tmp = (sljit_uw)((-memw + 0x7ff) & ~0x7ff); + else + tmp = (sljit_uw)((-memw + 0xfff) & ~0xfff); + + SLJIT_ASSERT(tmp >= (sljit_uw)-memw); + imm = get_imm(tmp); + + if (imm != INVALID_IMM) { + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm)); + memw += (sljit_sw)tmp; + SLJIT_ASSERT(memw >= 0 && memw <= 0xfff - SSIZE_OF(sw)); + } else { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw)); + FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK))); + memw = 0; + } + + mem = SLJIT_MEM1(TMP_REG1); + } else if (memw >= (0x1000 - SSIZE_OF(sw))) { + if ((memw & 0xfff) >= (0x1000 - SSIZE_OF(sw))) { + imm = get_imm((sljit_uw)((memw + 0x1000) & ~0xfff)); + + if (imm != INVALID_IMM) + memw = (memw & 0xfff) - 0x1000; + } else { + imm = get_imm((sljit_uw)(memw & ~0xfff)); + + if (imm != INVALID_IMM) + memw &= 0xfff; + } + + if (imm != INVALID_IMM) { + SLJIT_ASSERT(memw >= -0xff && memw <= 0xfff); + FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm)); + } else { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw)); + FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK))); + memw = 0; + } + + mem = SLJIT_MEM1(TMP_REG1); + } + + flags = WORD_SIZE; + + SLJIT_ASSERT(memw <= 0xfff - SSIZE_OF(sw) && memw >= -0xff); + + if (type & SLJIT_MEM_STORE) { + flags |= STORE; + } else if (REG_PAIR_FIRST(reg) == (mem & REG_MASK)) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, REG_PAIR_SECOND(reg), mem, memw + SSIZE_OF(sw), TMP_REG2)); + return emit_op_mem(compiler, WORD_SIZE, REG_PAIR_FIRST(reg), mem, memw, TMP_REG2); + } + + FAIL_IF(emit_op_mem(compiler, flags, REG_PAIR_FIRST(reg), mem, memw, TMP_REG2)); + return emit_op_mem(compiler, flags, REG_PAIR_SECOND(reg), mem, memw + SSIZE_OF(sw), TMP_REG2); + } + + flags = 1 << 23; + + if ((mem & REG_MASK) == 0) { + tmp = (sljit_uw)(memw & 0x7fc); + imm = get_imm((sljit_uw)((memw + (tmp <= 0x400 ? 0 : 0x400)) & ~0x3fc)); + + if (imm == INVALID_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw)); + memw = 0; + } else { + FAIL_IF(push_inst32(compiler, MOV_WI | RD4(TMP_REG1) | imm)); + memw = (memw & 0x3fc) >> 2; + + if (tmp > 0x400) { + memw = 0x100 - memw; + flags = 0; + } + + SLJIT_ASSERT(memw >= 0 && memw <= 0xff); + } + + mem = SLJIT_MEM1(TMP_REG1); + } else if (mem & OFFS_REG_MASK) { + FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(mem & REG_MASK) | RM4(OFFS_REG(mem)) | ((sljit_uw)(memw & 0x3) << 6))); + memw = 0; + mem = SLJIT_MEM1(TMP_REG1); + } else if (memw < 0) { + if ((-memw & ~0x3fc) == 0) { + flags = 0; + memw = -memw >> 2; + } else { + tmp = (sljit_uw)(-memw & 0x7fc); + imm = get_imm((sljit_uw)((-memw + (tmp <= 0x400 ? 0 : 0x400)) & ~0x3fc)); + + if (imm != INVALID_IMM) { + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm)); + memw = (-memw & 0x3fc) >> 2; + + if (tmp <= 0x400) + flags = 0; + else + memw = 0x100 - memw; + } else { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw)); + FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK))); + memw = 0; + } + + mem = SLJIT_MEM1(TMP_REG1); + } + } else if ((memw & ~0x3fc) != 0) { + tmp = (sljit_uw)(memw & 0x7fc); + imm = get_imm((sljit_uw)((memw + (tmp <= 0x400 ? 0 : 0x400)) & ~0x3fc)); + + if (imm != INVALID_IMM) { + FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(mem & REG_MASK) | imm)); + memw = (memw & 0x3fc) >> 2; + + if (tmp > 0x400) { + memw = 0x100 - memw; + flags = 0; + } + } else { + FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw)); + FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem & REG_MASK))); + memw = 0; + } + + mem = SLJIT_MEM1(TMP_REG1); + } else + memw >>= 2; + + SLJIT_ASSERT(memw >= 0 && memw <= 0xff); + return push_inst32(compiler, ((type & SLJIT_MEM_STORE) ? STRD : LDRD) | (sljit_ins)flags | RN4(mem & REG_MASK) | RT4(REG_PAIR_FIRST(reg)) | RD4(REG_PAIR_SECOND(reg)) | (sljit_ins)memw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 flags; + sljit_ins inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw)); + if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -255)) return SLJIT_ERR_UNSUPPORTED; @@ -2583,7 +2979,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile inst = sljit_mem32[flags] | 0x900; - if (type & SLJIT_MEM_PRE) + if (!(type & SLJIT_MEM_POST)) inst |= 0x400; if (memw >= 0) @@ -2594,6 +2990,106 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile return push_inst32(compiler, inst | RT4(reg) | RN4(mem & REG_MASK) | (sljit_ins)memw); } +static sljit_s32 update_mem_addr(struct sljit_compiler *compiler, sljit_s32 *mem, sljit_sw *memw, sljit_s32 max_offset) +{ + sljit_s32 arg = *mem; + sljit_sw argw = *memw; + sljit_uw imm; + + *mem = TMP_REG1; + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + *memw = 0; + return push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(arg & REG_MASK) | RM4(OFFS_REG(arg)) | ((sljit_uw)(argw & 0x3) << 6)); + } + + arg &= REG_MASK; + + if (arg) { + if (argw <= max_offset && argw >= -0xff) { + *mem = arg; + return SLJIT_SUCCESS; + } + + if (argw < 0) { + imm = get_imm((sljit_uw)(-argw & ~0xff)); + + if (imm) { + *memw = -(-argw & 0xff); + return push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(arg) | imm); + } + } else if ((argw & 0xfff) <= max_offset) { + imm = get_imm((sljit_uw)(argw & ~0xfff)); + + if (imm) { + *memw = argw & 0xfff; + return push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg) | imm); + } + } else { + imm = get_imm((sljit_uw)((argw | 0xfff) + 1)); + + if (imm) { + *memw = (argw & 0xfff) - 0x1000; + return push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg) | imm); + } + } + } + + imm = (sljit_uw)(argw & ~0xfff); + + if ((argw & 0xfff) > max_offset) { + imm += 0x1000; + *memw = (argw & 0xfff) - 0x1000; + } else + *memw = argw & 0xfff; + + FAIL_IF(load_immediate(compiler, TMP_REG1, imm)); + + if (arg == 0) + return SLJIT_SUCCESS; + + return push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, arg)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); + + if (type & SLJIT_MEM_UNALIGNED_32) + return emit_fop_mem(compiler, ((type ^ SLJIT_32) & SLJIT_32) | ((type & SLJIT_MEM_STORE) ? 0 : FPU_LOAD), freg, mem, memw); + + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | DN4(freg) | RT4(TMP_REG2))); + + if (type & SLJIT_32) + return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw, TMP_REG1); + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4)); + mem |= SLJIT_MEM; + + FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw, TMP_REG1)); + FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | DN4(freg) | 0x80 | RT4(TMP_REG2))); + return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw + 4, TMP_REG1); + } + + if (type & SLJIT_32) { + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1)); + return push_inst32(compiler, VMOV | DN4(freg) | RT4(TMP_REG2)); + } + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4)); + mem |= SLJIT_MEM; + + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1)); + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, mem, memw + 4, TMP_REG1)); + return push_inst32(compiler, VMOV2 | DM4(freg) | RT4(TMP_REG2) | RN4(TMP_REG1)); +} + +#undef FPU_LOAD + SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { struct sljit_const *const_; diff --git a/thirdparty/pcre2/src/sljit/sljitNativeMIPS_32.c b/thirdparty/pcre2/src/sljit/sljitNativeMIPS_32.c index 1a06b17d12..e6853c98f6 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeMIPS_32.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeMIPS_32.c @@ -38,383 +38,6 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_a return (imm & 0xffff) ? push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar) : SLJIT_SUCCESS; } -#define EMIT_LOGICAL(op_imm, op_norm) \ - if (flags & SRC2_IMM) { \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ - } \ - else { \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ - } - -#define EMIT_SHIFT(op_imm, op_v) \ - if (flags & SRC2_IMM) { \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ - } \ - else { \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst))); \ - } - -static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, - sljit_s32 dst, sljit_s32 src1, sljit_sw src2) -{ - sljit_s32 is_overflow, is_carry, is_handled; - - switch (GET_OPCODE(op)) { - case SLJIT_MOV: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if (dst != src2) - return push_inst(compiler, ADDU | S(src2) | TA(0) | D(dst), DR(dst)); - return SLJIT_SUCCESS; - - case SLJIT_MOV_U8: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) - return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_S8: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst)); -#else /* SLJIT_MIPS_REV < 1 */ - FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst))); - return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(24), DR(dst)); -#endif /* SLJIT_MIPS_REV >= 1 */ - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_U16: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) - return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_S16: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst)); -#else /* SLJIT_MIPS_REV < 1 */ - FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst))); - return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(16), DR(dst)); -#endif /* SLJIT_MIPS_REV >= 1 */ - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_NOT: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); - return SLJIT_SUCCESS; - - case SLJIT_CLZ: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, CLZ | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - FAIL_IF(push_inst(compiler, CLZ | S(src2) | T(dst) | D(dst), DR(dst))); -#else /* SLJIT_MIPS_REV < 1 */ - if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) { - FAIL_IF(push_inst(compiler, SRL | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG)); - return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG); - } - /* Nearly all instructions are unmovable in the following sequence. */ - FAIL_IF(push_inst(compiler, ADDU | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); - /* Check zero. */ - FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM(32), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(dst) | IMM(-1), DR(dst))); - /* Loop for searching the highest bit. */ - FAIL_IF(push_inst(compiler, ADDIU | S(dst) | T(dst) | IMM(1), DR(dst))); - FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, SLL | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS)); -#endif /* SLJIT_MIPS_REV >= 1 */ - return SLJIT_SUCCESS; - - case SLJIT_ADD: - is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_overflow) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else - FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - } - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - - if (is_overflow || is_carry) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - else { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - } - } - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); - } - else { - if (is_overflow) - FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); - } - - /* a + b >= a | b (otherwise, the carry should be set to 1). */ - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - if (!is_overflow) - return SLJIT_SUCCESS; - FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); - return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); - - case SLJIT_ADDC: - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_carry) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - else { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - } - } - FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); - } else { - if (is_carry) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - /* dst may be the same as src1 or src2. */ - FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); - } - if (is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - - FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); - if (!is_carry) - return SLJIT_SUCCESS; - - /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */ - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - /* Set carry flag. */ - return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); - - case SLJIT_SUB: - if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); - src2 = TMP_REG2; - flags &= ~SRC2_IMM; - } - - is_handled = 0; - - if (flags & SRC2_IMM) { - if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - is_handled = 1; - } - else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - is_handled = 1; - } - } - - if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) { - is_handled = 1; - - if (flags & SRC2_IMM) { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); - src2 = TMP_REG2; - flags &= ~SRC2_IMM; - } - - if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - } - else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL) - { - FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); - } - else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - } - else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL) - { - FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); - } - } - - if (is_handled) { - if (flags & SRC2_IMM) { - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - return push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst)); - } - else { - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - return push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst)); - } - return SLJIT_SUCCESS; - } - - is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_overflow) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else - FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - } - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); - - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); - } - else { - if (is_overflow) - FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); - } - - if (!is_overflow) - return SLJIT_SUCCESS; - FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); - return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); - - case SLJIT_SUBC: - if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); - src2 = TMP_REG2; - flags &= ~SRC2_IMM; - } - - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_carry) - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - /* dst may be the same as src1 or src2. */ - FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); - } - else { - if (is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - /* dst may be the same as src1 or src2. */ - FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); - } - - if (is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1))); - - FAIL_IF(push_inst(compiler, SUBU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); - return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS; - - case SLJIT_MUL: - SLJIT_ASSERT(!(flags & SRC2_IMM)); - - if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) { -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); -#else /* SLJIT_MIPS_REV < 1 */ - FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); - return push_inst(compiler, MFLO | D(dst), DR(dst)); -#endif /* SLJIT_MIPS_REV >= 1 */ - } - -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) - FAIL_IF(push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst))); - FAIL_IF(push_inst(compiler, MUH | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); -#else /* SLJIT_MIPS_REV < 6 */ - FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); -#endif /* SLJIT_MIPS_REV >= 6 */ - FAIL_IF(push_inst(compiler, SRA | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG)); - return push_inst(compiler, SUBU | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); - - case SLJIT_AND: - EMIT_LOGICAL(ANDI, AND); - return SLJIT_SUCCESS; - - case SLJIT_OR: - EMIT_LOGICAL(ORI, OR); - return SLJIT_SUCCESS; - - case SLJIT_XOR: - EMIT_LOGICAL(XORI, XOR); - return SLJIT_SUCCESS; - - case SLJIT_SHL: - EMIT_SHIFT(SLL, SLLV); - return SLJIT_SUCCESS; - - case SLJIT_LSHR: - EMIT_SHIFT(SRL, SRLV); - return SLJIT_SUCCESS; - - case SLJIT_ASHR: - EMIT_SHIFT(SRA, SRAV); - return SLJIT_SUCCESS; - } - - SLJIT_UNREACHABLE(); - return SLJIT_SUCCESS; -} - static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value) { FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 16), DR(dst))); @@ -573,8 +196,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile sljit_s32 arg_types) { struct sljit_jump *jump; - sljit_u32 extra_space = (sljit_u32)type; - sljit_ins ins; + sljit_u32 extra_space = 0; + sljit_ins ins = NOP; CHECK_ERROR_PTR(); CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); @@ -583,14 +206,23 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile PTR_FAIL_IF(!jump); set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); - PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space)); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) { + extra_space = (sljit_u32)type; + PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space)); + } else if (type & SLJIT_CALL_RETURN) + PTR_FAIL_IF(emit_stack_frame_release(compiler, 0, &ins)); SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); - PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0)); + if (ins == NOP && compiler->delay_slot != UNMOVABLE_INS) + jump->flags |= IS_MOVABLE; if (!(type & SLJIT_CALL_RETURN) || extra_space > 0) { - jump->flags |= IS_JAL | IS_CALL; + jump->flags |= IS_JAL; + + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + jump->flags |= IS_CALL; + PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); } else PTR_FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS)); @@ -598,6 +230,9 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile jump->addr = compiler->size; PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS)); + /* Maximum number of instructions required for generating a constant. */ + compiler->size += 2; + if (extra_space == 0) return jump; @@ -623,16 +258,37 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi CHECK_ERROR(); CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw)); + src = PIC_ADDR_REG; + srcw = 0; + } + + if ((type & 0xff) == SLJIT_CALL_REG_ARG) { + if (type & SLJIT_CALL_RETURN) { + if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG))); + src = PIC_ADDR_REG; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 0, &ins)); + + if (ins != NOP) + FAIL_IF(push_inst(compiler, ins, MOVABLE_INS)); + } + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, type, src, srcw); + } + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); if (src & SLJIT_IMM) FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw)); - else if (FAST_IS_REG(src)) + else if (src != PIC_ADDR_REG) FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG))); - else if (src & SLJIT_MEM) { - ADJUST_LOCAL_OFFSET(src, srcw); - FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw)); - } FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space)); diff --git a/thirdparty/pcre2/src/sljit/sljitNativeMIPS_64.c b/thirdparty/pcre2/src/sljit/sljitNativeMIPS_64.c index c2b3d839c2..d2a5924f8e 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeMIPS_64.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeMIPS_64.c @@ -118,421 +118,6 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_a return !(imm & 0xffff) ? SLJIT_SUCCESS : push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar); } -#define SELECT_OP(a, b) \ - (!(op & SLJIT_32) ? a : b) - -#define EMIT_LOGICAL(op_imm, op_norm) \ - if (flags & SRC2_IMM) { \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ - } \ - else { \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ - } - -#define EMIT_SHIFT(op_dimm, op_dimm32, op_imm, op_dv, op_v) \ - if (flags & SRC2_IMM) { \ - if (src2 >= 32) { \ - SLJIT_ASSERT(!(op & SLJIT_32)); \ - ins = op_dimm32; \ - src2 -= 32; \ - } \ - else \ - ins = (op & SLJIT_32) ? op_imm : op_dimm; \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, ins | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, ins | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ - } \ - else { \ - ins = (op & SLJIT_32) ? op_v : op_dv; \ - if (op & SLJIT_SET_Z) \ - FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ - if (!(flags & UNUSED_DEST)) \ - FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | D(dst), DR(dst))); \ - } - -static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, - sljit_s32 dst, sljit_s32 src1, sljit_sw src2) -{ - sljit_ins ins; - sljit_s32 is_overflow, is_carry, is_handled; - - switch (GET_OPCODE(op)) { - case SLJIT_MOV: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if (dst != src2) - return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(dst), DR(dst)); - return SLJIT_SUCCESS; - - case SLJIT_MOV_U8: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) - return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_S8: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - if (op & SLJIT_32) - return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst)); -#endif /* SLJIT_MIPS_REV >= 1 */ - FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(24), DR(dst))); - return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(24), DR(dst)); - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_U16: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) - return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_S16: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - if (op & SLJIT_32) - return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst)); -#endif /* SLJIT_MIPS_REV >= 1 */ - FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(16), DR(dst))); - return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(16), DR(dst)); - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_U32: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2) - if (dst == src2) - return push_inst(compiler, DINSU | T(src2) | SA(0) | (31 << 11) | (0 << 11), DR(dst)); -#endif /* SLJIT_MIPS_REV >= 2 */ - FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(0), DR(dst))); - return push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst)); - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_S32: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - return push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(0), DR(dst)); - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_NOT: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); - return SLJIT_SUCCESS; - - case SLJIT_CLZ: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | T(dst) | D(dst), DR(dst))); -#else /* SLJIT_MIPS_REV < 1 */ - if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) { - FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG)); - return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG); - } - /* Nearly all instructions are unmovable in the following sequence. */ - FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); - /* Check zero. */ - FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM((op & SLJIT_32) ? 32 : 64), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | T(dst) | IMM(-1), DR(dst))); - /* Loop for searching the highest bit. */ - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(dst) | T(dst) | IMM(1), DR(dst))); - FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, SELECT_OP(DSLL, SLL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS)); -#endif /* SLJIT_MIPS_REV >= 1 */ - return SLJIT_SUCCESS; - - case SLJIT_ADD: - is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_overflow) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else - FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - } - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - - if (is_overflow || is_carry) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - else { - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - } - } - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); - } - else { - if (is_overflow) - FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); - } - - /* a + b >= a | b (otherwise, the carry should be set to 1). */ - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - if (!is_overflow) - return SLJIT_SUCCESS; - FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); - return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); - - case SLJIT_ADDC: - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_carry) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - else { - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - } - } - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); - } else { - if (is_carry) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - /* dst may be the same as src1 or src2. */ - FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); - } - if (is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - - FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); - if (!is_carry) - return SLJIT_SUCCESS; - - /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */ - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - /* Set carry flag. */ - return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); - - case SLJIT_SUB: - if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); - src2 = TMP_REG2; - flags &= ~SRC2_IMM; - } - - is_handled = 0; - - if (flags & SRC2_IMM) { - if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - is_handled = 1; - } - else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - is_handled = 1; - } - } - - if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) { - is_handled = 1; - - if (flags & SRC2_IMM) { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); - src2 = TMP_REG2; - flags &= ~SRC2_IMM; - } - - if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - } - else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL) - { - FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); - } - else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { - FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - } - else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL) - { - FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); - } - } - - if (is_handled) { - if (flags & SRC2_IMM) { - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - return push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)); - } - else { - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - if (!(flags & UNUSED_DEST)) - return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)); - } - return SLJIT_SUCCESS; - } - - is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_overflow) { - if (src2 >= 0) - FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else - FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); - } - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); - - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); - } - else { - if (is_overflow) - FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - else if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - - if (is_overflow || is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); - /* dst may be the same as src1 or src2. */ - if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) - FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); - } - - if (!is_overflow) - return SLJIT_SUCCESS; - FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); - if (op & SLJIT_SET_Z) - FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); - return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG); - - case SLJIT_SUBC: - if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { - FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); - src2 = TMP_REG2; - flags &= ~SRC2_IMM; - } - - is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); - - if (flags & SRC2_IMM) { - if (is_carry) - FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); - /* dst may be the same as src1 or src2. */ - FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); - } - else { - if (is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); - /* dst may be the same as src1 or src2. */ - FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); - } - - if (is_carry) - FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1))); - - FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); - return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS; - - case SLJIT_MUL: - SLJIT_ASSERT(!(flags & SRC2_IMM)); - - if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) { -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) - return push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst)); -#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) - if (op & SLJIT_32) - return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); - FAIL_IF(push_inst(compiler, DMULT | S(src1) | T(src2), MOVABLE_INS)); - return push_inst(compiler, MFLO | D(dst), DR(dst)); -#else /* SLJIT_MIPS_REV < 1 */ - FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); - return push_inst(compiler, MFLO | D(dst), DR(dst)); -#endif /* SLJIT_MIPS_REV >= 6 */ - } - -#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) - FAIL_IF(push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst))); - FAIL_IF(push_inst(compiler, SELECT_OP(DMUH, MUH) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); -#else /* SLJIT_MIPS_REV < 6 */ - FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG)); - FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); -#endif /* SLJIT_MIPS_REV >= 6 */ - FAIL_IF(push_inst(compiler, SELECT_OP(DSRA32, SRA) | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG)); - return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); - - case SLJIT_AND: - EMIT_LOGICAL(ANDI, AND); - return SLJIT_SUCCESS; - - case SLJIT_OR: - EMIT_LOGICAL(ORI, OR); - return SLJIT_SUCCESS; - - case SLJIT_XOR: - EMIT_LOGICAL(XORI, XOR); - return SLJIT_SUCCESS; - - case SLJIT_SHL: - EMIT_SHIFT(DSLL, DSLL32, SLL, DSLLV, SLLV); - return SLJIT_SUCCESS; - - case SLJIT_LSHR: - EMIT_SHIFT(DSRL, DSRL32, SRL, DSRLV, SRLV); - return SLJIT_SUCCESS; - - case SLJIT_ASHR: - EMIT_SHIFT(DSRA, DSRA32, SRA, DSRAV, SRAV); - return SLJIT_SUCCESS; - } - - SLJIT_UNREACHABLE(); - return SLJIT_SUCCESS; -} - static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value) { FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 48), DR(dst))); @@ -653,14 +238,20 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile if (type & SLJIT_CALL_RETURN) PTR_FAIL_IF(emit_stack_frame_release(compiler, 0, &ins)); - PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins)); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins)); SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); - PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0)); + if (ins == NOP && compiler->delay_slot != UNMOVABLE_INS) + jump->flags |= IS_MOVABLE; if (!(type & SLJIT_CALL_RETURN)) { - jump->flags |= IS_JAL | IS_CALL; + jump->flags |= IS_JAL; + + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + jump->flags |= IS_CALL; + PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); } else PTR_FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS)); @@ -668,6 +259,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile jump->addr = compiler->size; PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS)); + /* Maximum number of instructions required for generating a constant. */ + compiler->size += 6; return jump; } @@ -680,16 +273,37 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi CHECK_ERROR(); CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw)); + src = PIC_ADDR_REG; + srcw = 0; + } + + if ((type & 0xff) == SLJIT_CALL_REG_ARG) { + if (type & SLJIT_CALL_RETURN) { + if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG))); + src = PIC_ADDR_REG; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 0, &ins)); + + if (ins != NOP) + FAIL_IF(push_inst(compiler, ins, MOVABLE_INS)); + } + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, type, src, srcw); + } + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); if (src & SLJIT_IMM) FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw)); - else if (FAST_IS_REG(src)) + else if (src != PIC_ADDR_REG) FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG))); - else if (src & SLJIT_MEM) { - ADJUST_LOCAL_OFFSET(src, srcw); - FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw)); - } if (type & SLJIT_CALL_RETURN) FAIL_IF(emit_stack_frame_release(compiler, 0, &ins)); diff --git a/thirdparty/pcre2/src/sljit/sljitNativeMIPS_common.c b/thirdparty/pcre2/src/sljit/sljitNativeMIPS_common.c index be5cb22a23..9afe901c38 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeMIPS_common.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeMIPS_common.c @@ -42,6 +42,14 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) return "MIPS64-R6" SLJIT_CPUINFO; #endif /* SLJIT_CONFIG_MIPS_32 */ +#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2) + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + return "MIPS32-R2" SLJIT_CPUINFO; +#else /* !SLJIT_CONFIG_MIPS_32 */ + return "MIPS64-R2" SLJIT_CPUINFO; +#endif /* SLJIT_CONFIG_MIPS_32 */ + #elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) @@ -151,12 +159,18 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { #define BREAK (HI(0) | LO(13)) #define CFC1 (HI(17) | (2 << 21)) #if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) +#define C_EQ_S (HI(17) | CMP_FMT_S | LO(2)) +#define C_OLE_S (HI(17) | CMP_FMT_S | LO(6)) +#define C_OLT_S (HI(17) | CMP_FMT_S | LO(4)) #define C_UEQ_S (HI(17) | CMP_FMT_S | LO(3)) #define C_ULE_S (HI(17) | CMP_FMT_S | LO(7)) #define C_ULT_S (HI(17) | CMP_FMT_S | LO(5)) #define C_UN_S (HI(17) | CMP_FMT_S | LO(1)) #define C_FD (FD(TMP_FREG3)) #else /* SLJIT_MIPS_REV < 6 */ +#define C_EQ_S (HI(17) | FMT_S | LO(50)) +#define C_OLE_S (HI(17) | FMT_S | LO(54)) +#define C_OLT_S (HI(17) | FMT_S | LO(52)) #define C_UEQ_S (HI(17) | FMT_S | LO(51)) #define C_ULE_S (HI(17) | FMT_S | LO(55)) #define C_ULT_S (HI(17) | FMT_S | LO(53)) @@ -187,6 +201,9 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { #endif /* SLJIT_MIPS_REV >= 6 */ #define DIV_S (HI(17) | FMT_S | LO(3)) #define DINSU (HI(31) | LO(6)) +#define DROTR (HI(0) | (1 << 21) | LO(58)) +#define DROTR32 (HI(0) | (1 << 21) | LO(62)) +#define DROTRV (HI(0) | (1 << 6) | LO(22)) #define DSLL (HI(0) | LO(56)) #define DSLL32 (HI(0) | LO(60)) #define DSLLV (HI(0) | LO(20)) @@ -206,9 +223,13 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { #define JR (HI(0) | LO(8)) #endif /* SLJIT_MIPS_REV >= 6 */ #define LD (HI(55)) +#define LDL (HI(26)) +#define LDR (HI(27)) #define LDC1 (HI(53)) #define LUI (HI(15)) #define LW (HI(35)) +#define LWL (HI(34)) +#define LWR (HI(38)) #define LWC1 (HI(49)) #define MFC1 (HI(17)) #if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) @@ -235,7 +256,11 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { #define NOR (HI(0) | LO(39)) #define OR (HI(0) | LO(37)) #define ORI (HI(13)) +#define ROTR (HI(0) | (1 << 21) | LO(2)) +#define ROTRV (HI(0) | (1 << 6) | LO(6)) #define SD (HI(63)) +#define SDL (HI(44)) +#define SDR (HI(45)) #define SDC1 (HI(61)) #define SLT (HI(0) | LO(42)) #define SLTI (HI(10)) @@ -250,6 +275,8 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { #define SUB_S (HI(17) | FMT_S | LO(1)) #define SUBU (HI(0) | LO(35)) #define SW (HI(43)) +#define SWL (HI(42)) +#define SWR (HI(46)) #define SWC1 (HI(57)) #define TRUNC_W_S (HI(17) | FMT_S | LO(13)) #define XOR (HI(0) | LO(38)) @@ -277,12 +304,18 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 4] = { #define ADDU_W ADDU #define ADDIU_W ADDIU #define SLL_W SLL +#define SRA_W SRA #define SUBU_W SUBU +#define STORE_W SW +#define LOAD_W LW #else #define ADDU_W DADDU #define ADDIU_W DADDIU #define SLL_W DSLL +#define SRA_W DSRA #define SUBU_W DSUBU +#define STORE_W SD +#define LOAD_W LD #endif #define SIMM_MAX (0x7fff) @@ -315,19 +348,21 @@ static SLJIT_INLINE sljit_ins invert_branch(sljit_uw flags) return (1 << 16); } -static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset) +static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code, sljit_sw executable_offset) { sljit_sw diff; sljit_uw target_addr; sljit_ins *inst; sljit_ins saved_inst; + inst = (sljit_ins *)jump->addr; + #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) if (jump->flags & (SLJIT_REWRITABLE_JUMP | IS_CALL)) - return code_ptr; + goto exit; #else if (jump->flags & SLJIT_REWRITABLE_JUMP) - return code_ptr; + goto exit; #endif if (jump->flags & JUMP_ADDR) @@ -337,13 +372,12 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset; } - inst = (sljit_ins *)jump->addr; if (jump->flags & IS_COND) inst--; #if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) if (jump->flags & IS_CALL) - goto keep_address; + goto preserve_addr; #endif /* B instructions. */ @@ -364,15 +398,14 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i jump->addr -= 2 * sizeof(sljit_ins); return inst; } - } - else { + } else { diff = ((sljit_sw)target_addr - (sljit_sw)(inst + 1) - executable_offset) >> 2; if (diff <= SIMM_MAX && diff >= SIMM_MIN) { jump->flags |= PATCH_B; if (!(jump->flags & IS_COND)) { inst[0] = (jump->flags & IS_JAL) ? BAL : B; - inst[1] = NOP; + /* Keep inst[1] */ return inst + 1; } inst[0] ^= invert_branch(jump->flags); @@ -415,36 +448,46 @@ static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_i if ((target_addr & ~(sljit_uw)0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~(sljit_uw)0xfffffff)) { jump->flags |= PATCH_J; inst[0] = (jump->flags & IS_JAL) ? JAL : J; - inst[1] = NOP; + /* Keep inst[1] */ return inst + 1; } } + if (jump->flags & IS_COND) + inst++; + #if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) -keep_address: +preserve_addr: if (target_addr <= 0x7fffffff) { jump->flags |= PATCH_ABS32; - if (jump->flags & IS_COND) { - inst[0] -= 4; - inst++; - } - inst[2] = inst[6]; - inst[3] = inst[7]; + if (jump->flags & IS_COND) + inst[-1] -= 4; + + inst[2] = inst[0]; + inst[3] = inst[1]; return inst + 3; } if (target_addr <= 0x7fffffffffffl) { jump->flags |= PATCH_ABS48; - if (jump->flags & IS_COND) { - inst[0] -= 2; - inst++; - } - inst[4] = inst[6]; - inst[5] = inst[7]; + if (jump->flags & IS_COND) + inst[-1] -= 2; + + inst[4] = inst[0]; + inst[5] = inst[1]; return inst + 5; } #endif - return code_ptr; +exit: +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + inst[2] = inst[0]; + inst[3] = inst[1]; + return inst + 3; +#else + inst[6] = inst[0]; + inst[7] = inst[1]; + return inst + 7; +#endif } #ifdef __GNUC__ @@ -459,30 +502,52 @@ static __attribute__ ((noinline)) void sljit_cache_flush(void* code, void* code_ static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label) { if (max_label < 0x80000000l) { - put_label->flags = 0; + put_label->flags = PATCH_ABS32; return 1; } if (max_label < 0x800000000000l) { - put_label->flags = 1; + put_label->flags = PATCH_ABS48; return 3; } - put_label->flags = 2; + put_label->flags = 0; return 5; } -static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label) +#endif /* SLJIT_CONFIG_MIPS_64 */ + +static SLJIT_INLINE void load_addr_to_reg(void *dst, sljit_u32 reg) { - sljit_uw addr = put_label->label->addr; - sljit_ins *inst = (sljit_ins *)put_label->addr; - sljit_u32 reg = *inst; + struct sljit_jump *jump; + struct sljit_put_label *put_label; + sljit_uw flags; + sljit_ins *inst; + sljit_uw addr; - if (put_label->flags == 0) { + if (reg != 0) { + jump = (struct sljit_jump*)dst; + flags = jump->flags; + inst = (sljit_ins*)jump->addr; + addr = (flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + } else { + put_label = (struct sljit_put_label*)dst; +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + flags = put_label->flags; +#endif + inst = (sljit_ins*)put_label->addr; + addr = put_label->label->addr; + reg = *inst; + } + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + inst[0] = LUI | T(reg) | IMM(addr >> 16); +#else /* !SLJIT_CONFIG_MIPS_32 */ + if (flags & PATCH_ABS32) { SLJIT_ASSERT(addr < 0x80000000l); inst[0] = LUI | T(reg) | IMM(addr >> 16); } - else if (put_label->flags == 1) { + else if (flags & PATCH_ABS48) { SLJIT_ASSERT(addr < 0x800000000000l); inst[0] = LUI | T(reg) | IMM(addr >> 32); inst[1] = ORI | S(reg) | T(reg) | IMM((addr >> 16) & 0xffff); @@ -497,12 +562,11 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label) inst[4] = DSLL | T(reg) | D(reg) | SH_IMM(16); inst += 4; } +#endif /* SLJIT_CONFIG_MIPS_32 */ inst[1] = ORI | S(reg) | T(reg) | IMM(addr & 0xffff); } -#endif - SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) { struct sljit_memory_fragment *buf; @@ -557,11 +621,12 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil } if (jump && jump->addr == word_count) { #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - jump->addr = (sljit_uw)(code_ptr - 3); + word_count += 2; #else - jump->addr = (sljit_uw)(code_ptr - 7); + word_count += 6; #endif - code_ptr = detect_jump_type(jump, code_ptr, code, executable_offset); + jump->addr = (sljit_uw)(code_ptr - 1); + code_ptr = detect_jump_type(jump, code, executable_offset); jump = jump->next; } if (const_ && const_->addr == word_count) { @@ -571,7 +636,10 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil if (put_label && put_label->addr == word_count) { SLJIT_ASSERT(put_label->label); put_label->addr = (sljit_uw)code_ptr; -#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + code_ptr += 1; + word_count += 1; +#else code_ptr += put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size)); word_count += 5; #endif @@ -579,8 +647,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil } next_addr = compute_next_addr(label, jump, const_, put_label); } - code_ptr ++; - word_count ++; + code_ptr++; + word_count++; } while (buf_ptr < buf_end); buf = buf->next; @@ -617,51 +685,14 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil break; } - /* Set the fields of immediate loads. */ -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1]) & 0xffff) == 0); - buf_ptr[0] |= (sljit_ins)(addr >> 16) & 0xffff; - buf_ptr[1] |= (sljit_ins)addr & 0xffff; -#else - if (jump->flags & PATCH_ABS32) { - SLJIT_ASSERT(addr <= 0x7fffffff); - SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1]) & 0xffff) == 0); - buf_ptr[0] |= (sljit_ins)(addr >> 16) & 0xffff; - buf_ptr[1] |= (sljit_ins)addr & 0xffff; - break; - } - - if (jump->flags & PATCH_ABS48) { - SLJIT_ASSERT(addr <= 0x7fffffffffffl); - SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1] | buf_ptr[3]) & 0xffff) == 0); - buf_ptr[0] |= (sljit_ins)(addr >> 32) & 0xffff; - buf_ptr[1] |= (sljit_ins)(addr >> 16) & 0xffff; - buf_ptr[3] |= (sljit_ins)addr & 0xffff; - break; - } - - SLJIT_ASSERT(((buf_ptr[0] | buf_ptr[1] | buf_ptr[3] | buf_ptr[5]) & 0xffff) == 0); - buf_ptr[0] |= (sljit_ins)(addr >> 48) & 0xffff; - buf_ptr[1] |= (sljit_ins)(addr >> 32) & 0xffff; - buf_ptr[3] |= (sljit_ins)(addr >> 16) & 0xffff; - buf_ptr[5] |= (sljit_ins)addr & 0xffff; -#endif + load_addr_to_reg(jump, PIC_ADDR_REG); } while (0); jump = jump->next; } put_label = compiler->put_labels; while (put_label) { -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - addr = put_label->label->addr; - buf_ptr = (sljit_ins *)put_label->addr; - - SLJIT_ASSERT((buf_ptr[0] & 0xffe00000) == LUI && (buf_ptr[1] & 0xfc000000) == ORI); - buf_ptr[0] |= (addr >> 16) & 0xffff; - buf_ptr[1] |= addr & 0xffff; -#else - put_label_set(put_label); -#endif + load_addr_to_reg(put_label, 0); put_label = put_label->next; } @@ -700,19 +731,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) #endif case SLJIT_HAS_ZERO_REGISTER: return 1; - #if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) case SLJIT_HAS_CLZ: case SLJIT_HAS_CMOV: case SLJIT_HAS_PREFETCH: return 1; -#endif /* SLJIT_MIPS_REV >= 1 */ + case SLJIT_HAS_CTZ: + return 2; +#endif /* SLJIT_MIPS_REV >= 1 */ +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2) + case SLJIT_HAS_ROT: + return 1; +#endif /* SLJIT_MIPS_REV >= 2 */ default: return 0; } } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type) +{ + return (type >= SLJIT_ORDERED_EQUAL && type <= SLJIT_ORDERED_LESS_EQUAL); +} + /* --------------------------------------------------------------------- */ /* Entry, exit */ /* --------------------------------------------------------------------- */ @@ -747,14 +788,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) #define SLOW_SRC2 0x20000 #define SLOW_DEST 0x40000 -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) -#define STACK_STORE SW -#define STACK_LOAD LW -#else -#define STACK_STORE SD -#define STACK_LOAD LD -#endif - static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw); static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size, sljit_ins *ins_ptr); @@ -770,13 +803,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi { sljit_ins base; sljit_s32 i, tmp, offset; - sljit_s32 arg_count, word_arg_count, saved_arg_count, float_arg_count; + sljit_s32 arg_count, word_arg_count, float_arg_count; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); CHECK_ERROR(); CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1); #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) { if ((local_size & SSIZE_OF(sw)) != 0) @@ -791,27 +825,27 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi #endif compiler->local_size = local_size; -#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - tmp = arg_types >> SLJIT_ARG_SHIFT; - arg_count = 0; offset = 0; - - while (tmp) { - offset = arg_count; - if ((tmp & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F64) { - if ((arg_count & 0x1) != 0) +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + if (!(options & SLJIT_ENTER_REG_ARG)) { + tmp = arg_types >> SLJIT_ARG_SHIFT; + arg_count = 0; + + while (tmp) { + offset = arg_count; + if ((tmp & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F64) { + if ((arg_count & 0x1) != 0) + arg_count++; arg_count++; + } + arg_count++; + tmp >>= SLJIT_ARG_SHIFT; } - arg_count++; - tmp >>= SLJIT_ARG_SHIFT; + compiler->args_size = (sljit_uw)arg_count << 2; + offset = (offset >= 4) ? (offset << 2) : 0; } - - compiler->args_size = (sljit_uw)arg_count << 2; - offset = (offset >= 4) ? (offset << 2) : 0; -#else /* !SLJIT_CONFIG_MIPS_32 */ - offset = 0; #endif /* SLJIT_CONFIG_MIPS_32 */ if (local_size + offset <= -SIMM_MIN) { @@ -820,9 +854,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi base = S(SLJIT_SP); offset = local_size - SSIZE_OF(sw); } else { - FAIL_IF(load_immediate(compiler, DR(OTHER_FLAG), local_size)); + FAIL_IF(load_immediate(compiler, OTHER_FLAG, local_size)); FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SP) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); - FAIL_IF(push_inst(compiler, SUBU_W | S(SLJIT_SP) | T(OTHER_FLAG) | D(SLJIT_SP), DR(SLJIT_SP))); + FAIL_IF(push_inst(compiler, SUBU_W | S(SLJIT_SP) | TA(OTHER_FLAG) | D(SLJIT_SP), DR(SLJIT_SP))); base = S(TMP_REG2); offset = -SSIZE_OF(sw); #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) @@ -830,17 +864,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi #endif } - FAIL_IF(push_inst(compiler, STACK_STORE | base | TA(RETURN_ADDR_REG) | IMM(offset), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, STORE_W | base | TA(RETURN_ADDR_REG) | IMM(offset), UNMOVABLE_INS)); tmp = SLJIT_S0 - saveds; - for (i = SLJIT_S0; i > tmp; i--) { + for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) { offset -= SSIZE_OF(sw); - FAIL_IF(push_inst(compiler, STACK_STORE | base | T(i) | IMM(offset), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, STORE_W | base | T(i) | IMM(offset), MOVABLE_INS)); } for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { offset -= SSIZE_OF(sw); - FAIL_IF(push_inst(compiler, STACK_STORE | base | T(i) | IMM(offset), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, STORE_W | base | T(i) | IMM(offset), MOVABLE_INS)); } #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) @@ -860,10 +894,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi FAIL_IF(push_inst(compiler, SDC1 | base | FT(i) | IMM(offset), MOVABLE_INS)); } + if (options & SLJIT_ENTER_REG_ARG) + return SLJIT_SUCCESS; + arg_types >>= SLJIT_ARG_SHIFT; arg_count = 0; word_arg_count = 0; - saved_arg_count = 0; float_arg_count = 0; #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) @@ -970,7 +1006,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1); #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) { if ((local_size & SSIZE_OF(sw)) != 0) @@ -989,14 +1025,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 frame_size, sljit_ins *ins_ptr) { sljit_s32 local_size, i, tmp, offset; + sljit_s32 load_return_addr = (frame_size == 0); sljit_s32 scratches = compiler->scratches; sljit_s32 saveds = compiler->saveds; sljit_s32 fsaveds = compiler->fsaveds; sljit_s32 fscratches = compiler->fscratches; + sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options); + + SLJIT_ASSERT(frame_size == 1 || (frame_size & 0xf) == 0); + frame_size &= ~0xf; local_size = compiler->local_size; - tmp = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + tmp = GET_SAVED_REGISTERS_SIZE(scratches, saveds - kept_saveds_count, 1); #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) { if ((tmp & SSIZE_OF(sw)) != 0) @@ -1024,18 +1065,18 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit SLJIT_ASSERT(local_size >= frame_size); offset = local_size - SSIZE_OF(sw); - if (frame_size == 0) - FAIL_IF(push_inst(compiler, STACK_LOAD | S(SLJIT_SP) | TA(RETURN_ADDR_REG) | IMM(offset), RETURN_ADDR_REG)); + if (load_return_addr) + FAIL_IF(push_inst(compiler, LOAD_W | S(SLJIT_SP) | TA(RETURN_ADDR_REG) | IMM(offset), RETURN_ADDR_REG)); tmp = SLJIT_S0 - saveds; - for (i = SLJIT_S0; i > tmp; i--) { + for (i = SLJIT_S0 - kept_saveds_count; i > tmp; i--) { offset -= SSIZE_OF(sw); - FAIL_IF(push_inst(compiler, STACK_LOAD | S(SLJIT_SP) | T(i) | IMM(offset), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, LOAD_W | S(SLJIT_SP) | T(i) | IMM(offset), MOVABLE_INS)); } for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { offset -= SSIZE_OF(sw); - FAIL_IF(push_inst(compiler, STACK_LOAD | S(SLJIT_SP) | T(i) | IMM(offset), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, LOAD_W | S(SLJIT_SP) | T(i) | IMM(offset), MOVABLE_INS)); } #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) @@ -1076,8 +1117,38 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler return push_inst(compiler, ins, UNMOVABLE_INS); } -#undef STACK_STORE -#undef STACK_LOAD +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + sljit_ins ins; + + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw)); + src = PIC_ADDR_REG; + srcw = 0; + } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG))); + src = PIC_ADDR_REG; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 1, &ins)); + + if (!(src & SLJIT_IMM)) { + FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS)); + return push_inst(compiler, ins, UNMOVABLE_INS); + } + + if (ins != NOP) + FAIL_IF(push_inst(compiler, ins, MOVABLE_INS)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} /* --------------------------------------------------------------------- */ /* Operators */ @@ -1134,9 +1205,10 @@ static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flag return 0; } +#define TO_ARGW_HI(argw) (((argw) & ~0xffff) + (((argw) & 0x8000) ? 0x10000 : 0)) + /* See getput_arg below. - Note: can_cache is called only for binary operators. Those - operators always uses word arguments without write back. */ + Note: can_cache is called only for binary operators. */ static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) { SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); @@ -1151,7 +1223,8 @@ static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, slj } if (arg == next_arg) { - if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN)) + if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN) + || TO_ARGW_HI(argw) == TO_ARGW_HI(next_argw)) return 1; return 0; } @@ -1163,6 +1236,7 @@ static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, slj static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) { sljit_s32 tmp_ar, base, delay_slot; + sljit_sw offset, argw_hi; SLJIT_ASSERT(arg & SLJIT_MEM); if (!(next_arg & SLJIT_MEM)) { @@ -1170,6 +1244,8 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl next_argw = 0; } + /* Since tmp can be the same as base or offset registers, + * these might be unavailable after modifying tmp. */ if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) { tmp_ar = reg_ar; delay_slot = reg_ar; @@ -1217,35 +1293,39 @@ static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sl return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); } - if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) { - if (argw != compiler->cache_argw) { - FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); - compiler->cache_argw = argw; - } - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); - } + if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar) | IMM(argw - compiler->cache_argw), delay_slot); - if (compiler->cache_arg == SLJIT_MEM && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) { - if (argw != compiler->cache_argw) - FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); - } - else { + if (compiler->cache_arg == SLJIT_MEM && (argw - compiler->cache_argw) <= SIMM_MAX && (argw - compiler->cache_argw) >= SIMM_MIN) { + offset = argw - compiler->cache_argw; + } else { compiler->cache_arg = SLJIT_MEM; - FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw)); + + argw_hi = TO_ARGW_HI(argw); + + if (next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN && argw_hi != TO_ARGW_HI(next_argw)) { + FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw)); + compiler->cache_argw = argw; + offset = 0; + } else { + FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw_hi)); + compiler->cache_argw = argw_hi; + offset = argw & 0xffff; + argw = argw_hi; + } } - compiler->cache_argw = argw; if (!base) - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar) | IMM(offset), delay_slot); if (arg == next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN) { compiler->cache_arg = arg; FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | T(base) | D(TMP_REG3), DR(TMP_REG3))); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar) | IMM(offset), delay_slot); } FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | T(base) | DA(tmp_ar), tmp_ar)); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar) | IMM(offset), delay_slot); } static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg_ar, sljit_s32 arg, sljit_sw argw) @@ -1270,19 +1350,19 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, s if (SLJIT_UNLIKELY(argw)) { FAIL_IF(push_inst(compiler, SLL_W | T(OFFS_REG(arg)) | DA(tmp_ar) | SH_IMM(argw), tmp_ar)); - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | TA(tmp_ar) | DA(tmp_ar), tmp_ar)); + FAIL_IF(push_inst(compiler, ADDU_W | SA(tmp_ar) | T(base) | DA(tmp_ar), tmp_ar)); } else FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(OFFS_REG(arg)) | DA(tmp_ar), tmp_ar)); return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); } - FAIL_IF(load_immediate(compiler, tmp_ar, argw)); + FAIL_IF(load_immediate(compiler, tmp_ar, TO_ARGW_HI(argw))); if (base != 0) - FAIL_IF(push_inst(compiler, ADDU_W | S(base) | TA(tmp_ar) | DA(tmp_ar), tmp_ar)); + FAIL_IF(push_inst(compiler, ADDU_W | SA(tmp_ar) | T(base) | DA(tmp_ar), tmp_ar)); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar) | IMM(argw), delay_slot); } static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w) @@ -1292,6 +1372,649 @@ static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, slji return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); } +#define EMIT_LOGICAL(op_imm, op_reg) \ + if (flags & SRC2_IMM) { \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ + } \ + else { \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, op_reg | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, op_reg | S(src1) | T(src2) | D(dst), DR(dst))); \ + } + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + +#define SELECT_OP(a, b) (b) + +#define EMIT_SHIFT(dimm, dimm32, imm, dv, v) \ + op_imm = (imm); \ + op_v = (v); + +#else /* !SLJIT_CONFIG_MIPS_32 */ + +#define SELECT_OP(a, b) \ + (!(op & SLJIT_32) ? a : b) + +#define EMIT_SHIFT(dimm, dimm32, imm, dv, v) \ + op_dimm = (dimm); \ + op_dimm32 = (dimm32); \ + op_imm = (imm); \ + op_dv = (dv); \ + op_v = (v); + +#endif /* SLJIT_CONFIG_MIPS_32 */ + +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV < 1) + +static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src) +{ + sljit_s32 is_clz = (GET_OPCODE(op) == SLJIT_CLZ); +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + sljit_ins max = (op & SLJIT_32) ? 32 : 64; +#else /* !SLJIT_CONFIG_RISCV_64 */ + sljit_ins max = 32; +#endif /* SLJIT_CONFIG_RISCV_64 */ + + /* The TMP_REG2 is the next value. */ + if (src != TMP_REG2) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); + + FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG2) | TA(0) | IMM(is_clz ? 13 : 14), UNMOVABLE_INS)); + /* The OTHER_FLAG is the counter. Delay slot. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(max), OTHER_FLAG)); + + if (!is_clz) { + FAIL_IF(push_inst(compiler, ANDI | S(TMP_REG2) | T(TMP_REG1) | IMM(1), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, BNE | S(TMP_REG1) | TA(0) | IMM(11), UNMOVABLE_INS)); + } else + FAIL_IF(push_inst(compiler, BLTZ | S(TMP_REG2) | TA(0) | IMM(11), UNMOVABLE_INS)); + + /* Delay slot. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(0), OTHER_FLAG)); + + /* The TMP_REG1 is the next shift. */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | T(TMP_REG1) | IMM(max), DR(TMP_REG1))); + + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(TMP_REG2) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, SELECT_OP(DSRL, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), DR(TMP_REG1))); + + FAIL_IF(push_inst(compiler, (is_clz ? SELECT_OP(DSRLV, SRLV) : SELECT_OP(DSLLV, SLLV)) | S(TMP_REG1) | TA(EQUAL_FLAG) | D(TMP_REG2), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, BNE | S(TMP_REG2) | TA(0) | IMM(-4), UNMOVABLE_INS)); + /* Delay slot. */ + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(TMP_REG1) | T(TMP_REG2) | IMM(-1), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, (is_clz ? SELECT_OP(DSRLV, SRLV) : SELECT_OP(DSLLV, SLLV)) | S(TMP_REG2) | TA(EQUAL_FLAG) | D(TMP_REG2), DR(TMP_REG2))); + + FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG2) | TA(0) | IMM(-7), UNMOVABLE_INS)); + /* Delay slot. */ + FAIL_IF(push_inst(compiler, OR | SA(OTHER_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG)); + + return push_inst(compiler, SELECT_OP(DADDU, ADDU) | SA(OTHER_FLAG) | TA(0) | D(dst), DR(dst)); +} + +#endif /* SLJIT_MIPS_REV < 1 */ + +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_sw src2) +{ + sljit_s32 is_overflow, is_carry, carry_src_ar, is_handled; + sljit_ins op_imm, op_v; +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + sljit_ins ins, op_dimm, op_dimm32, op_dv; +#endif + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (dst != src2) + return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(dst), DR(dst)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_U8: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) + return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_S8: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) + return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst)); +#else /* SLJIT_MIPS_REV < 1 */ + FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst))); + return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(24), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 1 */ +#else /* !SLJIT_CONFIG_MIPS_32 */ +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) + if (op & SLJIT_32) + return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 1 */ + FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(24), DR(dst))); + return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(24), DR(dst)); +#endif /* SLJIT_CONFIG_MIPS_32 */ + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_U16: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) + return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_S16: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) + return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst)); +#else /* SLJIT_MIPS_REV < 1 */ + FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst))); + return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(16), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 1 */ +#else /* !SLJIT_CONFIG_MIPS_32 */ +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) + if (op & SLJIT_32) + return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 1 */ + FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(16), DR(dst))); + return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(16), DR(dst)); +#endif /* SLJIT_CONFIG_MIPS_32 */ + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + case SLJIT_MOV_U32: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2) + if (dst == src2) + return push_inst(compiler, DINSU | T(src2) | SA(0) | (31 << 11) | (0 << 11), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 2 */ + FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(0), DR(dst))); + return push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst)); + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_S32: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + return push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(0), DR(dst)); + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; +#endif /* SLJIT_CONFIG_MIPS_64 */ + + case SLJIT_NOT: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); + return SLJIT_SUCCESS; + +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) + case SLJIT_CLZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + return push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | D(dst), DR(dst)); +#else /* SLJIT_MIPS_REV < 6 */ + return push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | T(dst) | D(dst), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 6 */ + case SLJIT_CTZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(0) | T(src2) | D(TMP_REG1), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, AND | S(src2) | T(TMP_REG1) | D(dst), DR(dst))); +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(dst) | D(dst), DR(dst))); +#else /* SLJIT_MIPS_REV < 6 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(dst) | T(dst) | D(dst), DR(dst))); +#endif /* SLJIT_MIPS_REV >= 6 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(dst) | T(TMP_REG1) | IMM(SELECT_OP(-64, -32)), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(SELECT_OP(26, 27)), DR(TMP_REG1))); + return push_inst(compiler, XOR | S(dst) | T(TMP_REG1) | D(dst), DR(dst)); +#else /* SLJIT_MIPS_REV < 1 */ + case SLJIT_CLZ: + case SLJIT_CTZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + return emit_clz_ctz(compiler, op, dst, src2); +#endif /* SLJIT_MIPS_REV >= 1 */ + + case SLJIT_ADD: + /* Overflow computation (both add and sub): overflow = src1_sign ^ src2_sign ^ result_sign ^ carry_flag */ + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + carry_src_ar = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else + FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + } + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); + } + else { + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + if (is_overflow || carry_src_ar != 0) { + if (src1 != dst) + carry_src_ar = DR(src1); + else if (src2 != dst) + carry_src_ar = DR(src2); + else { + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | TA(0) | DA(OTHER_FLAG), OTHER_FLAG)); + carry_src_ar = OTHER_FLAG; + } + } + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + + /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */ + if (is_overflow || carry_src_ar != 0) { + if (flags & SRC2_IMM) + FAIL_IF(push_inst(compiler, SLTIU | S(dst) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + else + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(carry_src_ar) | DA(OTHER_FLAG), OTHER_FLAG)); + } + + if (!is_overflow) + return SLJIT_SUCCESS; + + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | D(TMP_REG1), DR(TMP_REG1))); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); + return push_inst(compiler, XOR | S(TMP_REG1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); + + case SLJIT_ADDC: + carry_src_ar = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); + } else { + if (carry_src_ar != 0) { + if (src1 != dst) + carry_src_ar = DR(src1); + else if (src2 != dst) + carry_src_ar = DR(src2); + else { + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + carry_src_ar = EQUAL_FLAG; + } + } + + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + + /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */ + if (carry_src_ar != 0) { + if (flags & SRC2_IMM) + FAIL_IF(push_inst(compiler, SLTIU | S(dst) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + else + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(carry_src_ar) | DA(EQUAL_FLAG), EQUAL_FLAG)); + } + + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); + + if (carry_src_ar == 0) + return SLJIT_SUCCESS; + + /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */ + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG)); + /* Set carry flag. */ + return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); + + case SLJIT_SUB: + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + is_handled = 0; + + if (flags & SRC2_IMM) { + if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + is_handled = 1; + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + is_handled = 1; + } + } + + if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) { + is_handled = 1; + + if (flags & SRC2_IMM) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + switch (GET_FLAG_TYPE(op)) { + case SLJIT_LESS: + case SLJIT_GREATER_EQUAL: + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + break; + case SLJIT_GREATER: + case SLJIT_LESS_EQUAL: + FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + break; + case SLJIT_SIG_LESS: + case SLJIT_SIG_GREATER_EQUAL: + FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + break; + case SLJIT_SIG_GREATER: + case SLJIT_SIG_LESS_EQUAL: + FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + break; + } + } + + if (is_handled) { + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)); + } + else { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)); + } + return SLJIT_SUCCESS; + } + + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else + FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + } + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG)); + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); + } + else { + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG)); + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + + if (!is_overflow) + return SLJIT_SUCCESS; + + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | D(TMP_REG1), DR(TMP_REG1))); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); + return push_inst(compiler, XOR | S(TMP_REG1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); + + case SLJIT_SUBC: + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_carry) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + + FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); + } + else { + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); + } + + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1))); + + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst))); + + if (!is_carry) + return SLJIT_SUCCESS; + + return push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG); + + case SLJIT_MUL: + SLJIT_ASSERT(!(flags & SRC2_IMM)); + + if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) { +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + return push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst)); +#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1) +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); +#else /* !SLJIT_CONFIG_MIPS_32 */ + if (op & SLJIT_32) + return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); + FAIL_IF(push_inst(compiler, DMULT | S(src1) | T(src2), MOVABLE_INS)); + return push_inst(compiler, MFLO | D(dst), DR(dst)); +#endif /* SLJIT_CONFIG_MIPS_32 */ +#else /* SLJIT_MIPS_REV < 1 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); + return push_inst(compiler, MFLO | D(dst), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 6 */ + } + +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + FAIL_IF(push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst))); + FAIL_IF(push_inst(compiler, SELECT_OP(DMUH, MUH) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); +#else /* SLJIT_MIPS_REV < 6 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); +#endif /* SLJIT_MIPS_REV >= 6 */ + FAIL_IF(push_inst(compiler, SELECT_OP(DSRA32, SRA) | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG)); + return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG); + + case SLJIT_AND: + EMIT_LOGICAL(ANDI, AND); + return SLJIT_SUCCESS; + + case SLJIT_OR: + EMIT_LOGICAL(ORI, OR); + return SLJIT_SUCCESS; + + case SLJIT_XOR: + EMIT_LOGICAL(XORI, XOR); + return SLJIT_SUCCESS; + + case SLJIT_SHL: + case SLJIT_MSHL: + EMIT_SHIFT(DSLL, DSLL32, SLL, DSLLV, SLLV); + break; + + case SLJIT_LSHR: + case SLJIT_MLSHR: + EMIT_SHIFT(DSRL, DSRL32, SRL, DSRLV, SRLV); + break; + + case SLJIT_ASHR: + case SLJIT_MASHR: + EMIT_SHIFT(DSRA, DSRA32, SRA, DSRAV, SRAV); + break; + +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2) + case SLJIT_ROTL: + if ((flags & SRC2_IMM) || src2 == 0) { +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + src2 = -src2 & 0x1f; +#else /* !SLJIT_CONFIG_MIPS_32 */ + src2 = -src2 & ((op & SLJIT_32) ? 0x1f : 0x3f); +#endif /* SLJIT_CONFIG_MIPS_32 */ + } else { + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(0) | T(src2) | D(TMP_REG2), DR(TMP_REG2))); + src2 = TMP_REG2; + } + /* fallthrough */ + + case SLJIT_ROTR: + EMIT_SHIFT(DROTR, DROTR32, ROTR, DROTRV, ROTRV); + break; +#else /* SLJIT_MIPS_REV < 1 */ + case SLJIT_ROTL: + case SLJIT_ROTR: + if (flags & SRC2_IMM) { + SLJIT_ASSERT(src2 != 0); +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (!(op & SLJIT_32)) { + if (GET_OPCODE(op) == SLJIT_ROTL) + op_imm = ((src2 < 32) ? DSLL : DSLL32); + else + op_imm = ((src2 < 32) ? DSRL : DSRL32); + + FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(OTHER_FLAG) | (((sljit_ins)src2 & 0x1f) << 6), OTHER_FLAG)); + + src2 = 64 - src2; + if (GET_OPCODE(op) == SLJIT_ROTL) + op_imm = ((src2 < 32) ? DSRL : DSRL32); + else + op_imm = ((src2 < 32) ? DSLL : DSLL32); + + FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | (((sljit_ins)src2 & 0x1f) << 6), DR(dst))); + return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)); + } +#endif /* SLJIT_CONFIG_MIPS_64 */ + + op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SLL : SRL; + FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(OTHER_FLAG) | ((sljit_ins)src2 << 6), OTHER_FLAG)); + + src2 = 32 - src2; + op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SRL : SLL; + FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | (((sljit_ins)src2 & 0x1f) << 6), DR(dst))); + return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)); + } + + if (src2 == 0) { + if (dst != src1) + return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | TA(0) | D(dst), DR(dst)); + return SLJIT_SUCCESS; + } + + FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(0) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + if (!(op & SLJIT_32)) { + op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? DSLLV : DSRLV; + FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? DSRLV : DSLLV; + FAIL_IF(push_inst(compiler, op_v | SA(EQUAL_FLAG) | T(src1) | D(dst), DR(dst))); + return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)); + } +#endif /* SLJIT_CONFIG_MIPS_64 */ + + op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? SLLV : SRLV; + FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG)); + op_v = (GET_OPCODE(op) == SLJIT_ROTL) ? SRLV : SLLV; + FAIL_IF(push_inst(compiler, op_v | SA(EQUAL_FLAG) | T(src1) | D(dst), DR(dst))); + return push_inst(compiler, OR | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)); +#endif /* SLJIT_MIPS_REV >= 2 */ + + default: + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; + } + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + if ((flags & SRC2_IMM) || src2 == 0) { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); + + if (flags & UNUSED_DEST) + return SLJIT_SUCCESS; + return push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst)); + } + + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + if (flags & UNUSED_DEST) + return SLJIT_SUCCESS; + return push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst)); +#else /* !SLJIT_CONFIG_MIPS_32 */ + if ((flags & SRC2_IMM) || src2 == 0) { + if (src2 >= 32) { + SLJIT_ASSERT(!(op & SLJIT_32)); + ins = op_dimm32; + src2 -= 32; + } + else + ins = (op & SLJIT_32) ? op_imm : op_dimm; + + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ins | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); + + if (flags & UNUSED_DEST) + return SLJIT_SUCCESS; + return push_inst(compiler, ins | T(src1) | D(dst) | SH_IMM(src2), DR(dst)); + } + + ins = (op & SLJIT_32) ? op_v : op_dv; + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); + + if (flags & UNUSED_DEST) + return SLJIT_SUCCESS; + return push_inst(compiler, ins | S(src2) | T(src1) | D(dst), DR(dst)); +#endif /* SLJIT_CONFIG_MIPS_32 */ +} + +#define CHECK_IMM(flags, srcw) \ + ((!((flags) & LOGICAL_OP) && ((srcw) <= SIMM_MAX && (srcw) >= SIMM_MIN)) \ + || (((flags) & LOGICAL_OP) && !((srcw) & ~UIMM_MAX))) + static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, sljit_s32 dst, sljit_sw dstw, sljit_s32 src1, sljit_sw src1w, @@ -1325,25 +2048,18 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3 flags |= SLOW_DEST; if (flags & IMM_OP) { - if ((src2 & SLJIT_IMM) && src2w) { - if ((!(flags & LOGICAL_OP) && (src2w <= SIMM_MAX && src2w >= SIMM_MIN)) - || ((flags & LOGICAL_OP) && !(src2w & ~UIMM_MAX))) { - flags |= SRC2_IMM; - src2_r = src2w; - } - } - if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) { - if ((!(flags & LOGICAL_OP) && (src1w <= SIMM_MAX && src1w >= SIMM_MIN)) - || ((flags & LOGICAL_OP) && !(src1w & ~UIMM_MAX))) { - flags |= SRC2_IMM; - src2_r = src1w; - - /* And swap arguments. */ - src1 = src2; - src1w = src2w; - src2 = SLJIT_IMM; - /* src2w = src2_r unneeded. */ - } + if ((src2 & SLJIT_IMM) && src2w != 0 && CHECK_IMM(flags, src2w)) { + flags |= SRC2_IMM; + src2_r = src2w; + } else if ((flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w != 0 && CHECK_IMM(flags, src1w)) { + flags |= SRC2_IMM; + src2_r = src1w; + + /* And swap arguments. */ + src1 = src2; + src1w = src2w; + src2 = SLJIT_IMM; + /* src2w = src2_r unneeded. */ } } @@ -1429,6 +2145,8 @@ static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s3 return SLJIT_SUCCESS; } +#undef CHECK_IMM + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { #if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) @@ -1584,6 +2302,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); case SLJIT_CLZ: + case SLJIT_CTZ: return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); } @@ -1635,8 +2354,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile return emit_op(compiler, op, flags | CUMULATIVE_OP | LOGICAL_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); case SLJIT_SHL: + case SLJIT_MSHL: case SLJIT_LSHR: + case SLJIT_MLSHR: case SLJIT_ASHR: + case SLJIT_MASHR: + case SLJIT_ROTL: + case SLJIT_ROTR: #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) if (src2 & SLJIT_IMM) src2w &= 0x1f; @@ -1662,13 +2386,106 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w); } +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) +#define SELECT_OP3(op, src2w, D, D32, W) (((op & SLJIT_32) ? (W) : ((src2w) < 32) ? (D) : (D32)) | (((sljit_ins)src2w & 0x1f) << 6)) +#define SELECT_OP2(op, D, W) ((op & SLJIT_32) ? (W) : (D)) +#else /* !SLJIT_CONFIG_MIPS_64 */ +#define SELECT_OP3(op, src2w, D, D32, W) ((W) | ((sljit_ins)(src2w) << 6)) +#define SELECT_OP2(op, D, W) (W) +#endif /* SLJIT_CONFIG_MIPS_64 */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 is_left; + sljit_ins ins1, ins2, ins3; +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) + sljit_s32 inp_flags = ((op & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA; + sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64; +#else /* !SLJIT_CONFIG_MIPS_64 */ + sljit_s32 inp_flags = WORD_DATA | LOAD_DATA; + sljit_sw bit_length = 32; +#endif /* SLJIT_CONFIG_MIPS_64 */ + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + + is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL); + + if (src_dst == src1) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, (is_left ? SLJIT_ROTL : SLJIT_ROTR) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w); + } + + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (src2 & SLJIT_IMM) { + src2w &= bit_length - 1; + + if (src2w == 0) + return SLJIT_SUCCESS; + } else if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inp_flags, DR(TMP_REG2), src2, src2w)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inp_flags, DR(TMP_REG1), src1, src1w)); + src1 = TMP_REG1; + } else if (src1 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), src1w)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_IMM) { + if (is_left) { + ins1 = SELECT_OP3(op, src2w, DSLL, DSLL32, SLL); + src2w = bit_length - src2w; + ins2 = SELECT_OP3(op, src2w, DSRL, DSRL32, SRL); + } else { + ins1 = SELECT_OP3(op, src2w, DSRL, DSRL32, SRL); + src2w = bit_length - src2w; + ins2 = SELECT_OP3(op, src2w, DSLL, DSLL32, SLL); + } + + FAIL_IF(push_inst(compiler, ins1 | T(src_dst) | D(src_dst), DR(src_dst))); + FAIL_IF(push_inst(compiler, ins2 | T(src1) | D(TMP_REG1), DR(TMP_REG1))); + return push_inst(compiler, OR | S(src_dst) | T(TMP_REG1) | D(src_dst), DR(src_dst)); + } + + if (is_left) { + ins1 = SELECT_OP2(op, DSRL, SRL); + ins2 = SELECT_OP2(op, DSLLV, SLLV); + ins3 = SELECT_OP2(op, DSRLV, SRLV); + } else { + ins1 = SELECT_OP2(op, DSLL, SLL); + ins2 = SELECT_OP2(op, DSRLV, SRLV); + ins3 = SELECT_OP2(op, DSLLV, SLLV); + } + + FAIL_IF(push_inst(compiler, ins2 | S(src2) | T(src_dst) | D(src_dst), DR(src_dst))); + + if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) { + FAIL_IF(push_inst(compiler, ins1 | T(src1) | D(TMP_REG1) | (1 << 6), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, XORI | S(src2) | T(TMP_REG2) | ((sljit_ins)bit_length - 1), DR(TMP_REG2))); + src1 = TMP_REG1; + } else + FAIL_IF(push_inst(compiler, SELECT_OP2(op, DSUBU, SUBU) | SA(0) | T(src2) | D(TMP_REG2), DR(TMP_REG2))); + + FAIL_IF(push_inst(compiler, ins3 | S(TMP_REG2) | T(src1) | D(TMP_REG1), DR(TMP_REG1))); + return push_inst(compiler, OR | S(src_dst) | T(TMP_REG1) | D(src_dst), DR(src_dst)); +} + +#undef SELECT_OP3 +#undef SELECT_OP2 + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -1746,14 +2563,19 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp FAIL_IF(push_inst(compiler, (TRUNC_W_S ^ (flags >> 19)) | FMT(op) | FS(src) | FD(TMP_FREG1), MOVABLE_INS)); - if (FAST_IS_REG(dst)) - return push_inst(compiler, MFC1 | flags | T(dst) | FS(TMP_FREG1), MOVABLE_INS); + if (FAST_IS_REG(dst)) { + FAIL_IF(push_inst(compiler, MFC1 | flags | T(dst) | FS(TMP_FREG1), MOVABLE_INS)); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + return SLJIT_SUCCESS; + } /* Store the integer value from a VFP register. */ return emit_op_mem2(compiler, flags ? DOUBLE_DATA : SINGLE_DATA, FR(TMP_FREG1), dst, dstw, 0, 0); #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) -# undef is_long +# undef flags #endif } @@ -1769,19 +2591,25 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; - if (FAST_IS_REG(src)) + if (FAST_IS_REG(src)) { FAIL_IF(push_inst(compiler, MTC1 | flags | T(src) | FS(TMP_FREG1), MOVABLE_INS)); - else if (src & SLJIT_MEM) { +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + } else if (src & SLJIT_MEM) { /* Load the integer value into a VFP register. */ - FAIL_IF(emit_op_mem2(compiler, ((flags) ? DOUBLE_DATA : SINGLE_DATA) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw)); + FAIL_IF(emit_op_mem2(compiler, (flags ? DOUBLE_DATA : SINGLE_DATA) | LOAD_DATA, FR(TMP_FREG1), src, srcw, dst, dstw)); } else { -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) srcw = (sljit_s32)srcw; #endif FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw)); FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG1) | FS(TMP_FREG1), MOVABLE_INS)); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif } FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | ((((sljit_ins)op & SLJIT_32) ^ SLJIT_32) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS)); @@ -1812,20 +2640,38 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile } switch (GET_FLAG_TYPE(op)) { - case SLJIT_EQUAL_F64: - case SLJIT_NOT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + inst = C_EQ_S; + break; + case SLJIT_F_NOT_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: inst = C_UEQ_S; break; - case SLJIT_LESS_F64: - case SLJIT_GREATER_EQUAL_F64: + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + inst = C_OLT_S; + break; + case SLJIT_F_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_ORDERED_GREATER_EQUAL: inst = C_ULT_S; break; - case SLJIT_GREATER_F64: - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_GREATER: + case SLJIT_ORDERED_GREATER: + case SLJIT_UNORDERED_OR_LESS_EQUAL: inst = C_ULE_S; break; + case SLJIT_F_LESS_EQUAL: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_ORDERED_LESS_EQUAL: + inst = C_OLE_S; + break; default: - SLJIT_ASSERT(GET_FLAG_TYPE(op) == SLJIT_UNORDERED_F64 || GET_FLAG_TYPE(op) == SLJIT_ORDERED_F64); + SLJIT_ASSERT(GET_FLAG_TYPE(op) == SLJIT_UNORDERED || GET_FLAG_TYPE(op) == SLJIT_ORDERED); inst = C_UN_S; break; } @@ -1871,6 +2717,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil FAIL_IF(push_inst(compiler, ABS_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS)); break; case SLJIT_CONV_F64_FROM_F32: + /* The SLJIT_32 bit is inverted because sljit_f32 needs to be loaded from the memory. */ FAIL_IF(push_inst(compiler, CVT_S_S | (sljit_ins)((op & SLJIT_32) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS)); op ^= SLJIT_32; break; @@ -1959,6 +2806,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil return SLJIT_SUCCESS; } +#undef FLOAT_DATA +#undef FMT + /* --------------------------------------------------------------------- */ /* Other instructions */ /* --------------------------------------------------------------------- */ @@ -2000,18 +2850,18 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi } #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) -#define JUMP_LENGTH 4 +#define BRANCH_LENGTH 4 #else -#define JUMP_LENGTH 8 +#define BRANCH_LENGTH 8 #endif #define BR_Z(src) \ - inst = BEQ | SA(src) | TA(0) | JUMP_LENGTH; \ + inst = BEQ | SA(src) | TA(0) | BRANCH_LENGTH; \ flags = IS_BIT26_COND; \ delay_check = src; #define BR_NZ(src) \ - inst = BNE | SA(src) | TA(0) | JUMP_LENGTH; \ + inst = BNE | SA(src) | TA(0) | BRANCH_LENGTH; \ flags = IS_BIT26_COND; \ delay_check = src; @@ -2029,11 +2879,11 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi #else /* SLJIT_MIPS_REV < 6 */ #define BR_T() \ - inst = BC1T | JUMP_LENGTH; \ + inst = BC1T | BRANCH_LENGTH; \ flags = IS_BIT16_COND; \ delay_check = FCSR_FCC; #define BR_F() \ - inst = BC1F | JUMP_LENGTH; \ + inst = BC1F | BRANCH_LENGTH; \ flags = IS_BIT16_COND; \ delay_check = FCSR_FCC; @@ -2077,16 +2927,28 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile case SLJIT_NOT_CARRY: BR_NZ(OTHER_FLAG); break; - case SLJIT_NOT_EQUAL_F64: - case SLJIT_GREATER_EQUAL_F64: - case SLJIT_GREATER_F64: - case SLJIT_ORDERED_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_F_GREATER: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_ORDERED: BR_T(); break; - case SLJIT_EQUAL_F64: - case SLJIT_LESS_F64: - case SLJIT_LESS_EQUAL_F64: - case SLJIT_UNORDERED_F64: + case SLJIT_F_EQUAL: + case SLJIT_F_LESS: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: + case SLJIT_ORDERED_LESS: + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_UNORDERED_OR_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: + case SLJIT_UNORDERED: BR_F(); break; default: @@ -2102,8 +2964,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile if (inst) PTR_FAIL_IF(push_inst(compiler, inst, UNMOVABLE_INS)); - PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); - if (type <= SLJIT_JUMP) PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS)); else { @@ -2113,6 +2973,13 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile jump->addr = compiler->size; PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + + /* Maximum number of instructions required for generating a constant. */ +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + compiler->size += 2; +#else + compiler->size += 6; +#endif return jump; } @@ -2151,11 +3018,17 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler compiler->cache_arg = 0; compiler->cache_argw = 0; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + flags = WORD_DATA | LOAD_DATA; +#else /* !SLJIT_CONFIG_MIPS_32 */ flags = ((type & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA; +#endif /* SLJIT_CONFIG_MIPS_32 */ + if (src1 & SLJIT_MEM) { PTR_FAIL_IF(emit_op_mem2(compiler, flags, DR(TMP_REG1), src1, src1w, src2, src2w)); src1 = TMP_REG1; } + if (src2 & SLJIT_MEM) { PTR_FAIL_IF(emit_op_mem2(compiler, flags, DR(TMP_REG2), src2, src2w, 0, 0)); src2 = TMP_REG2; @@ -2172,7 +3045,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler jump->flags |= IS_BIT26_COND; if (compiler->delay_slot == MOVABLE_INS || (compiler->delay_slot != UNMOVABLE_INS && compiler->delay_slot != DR(src1) && compiler->delay_slot != DR(src2))) jump->flags |= IS_MOVABLE; - PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(src1) | T(src2) | JUMP_LENGTH, UNMOVABLE_INS)); + PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(src1) | T(src2) | BRANCH_LENGTH, UNMOVABLE_INS)); } else if (type >= SLJIT_SIG_LESS && (((src1 & SLJIT_IMM) && (src1w == 0)) || ((src2 & SLJIT_IMM) && (src2w == 0)))) { inst = NOP; @@ -2219,7 +3092,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler break; } } - PTR_FAIL_IF(push_inst(compiler, inst | S(src1) | JUMP_LENGTH, UNMOVABLE_INS)); + PTR_FAIL_IF(push_inst(compiler, inst | S(src1) | BRANCH_LENGTH, UNMOVABLE_INS)); } else { if (type == SLJIT_LESS || type == SLJIT_GREATER_EQUAL || type == SLJIT_SIG_LESS || type == SLJIT_SIG_GREATER_EQUAL) { @@ -2244,20 +3117,26 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler } jump->flags |= IS_BIT26_COND; - PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(TMP_REG1) | TA(0) | JUMP_LENGTH, UNMOVABLE_INS)); + PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_EQUAL ? BNE : BEQ) | S(TMP_REG1) | TA(0) | BRANCH_LENGTH, UNMOVABLE_INS)); } - PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS)); jump->addr = compiler->size; PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + + /* Maximum number of instructions required for generating a constant. */ +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + compiler->size += 2; +#else + compiler->size += 6; +#endif return jump; } #undef RESOLVE_IMM1 #undef RESOLVE_IMM2 -#undef JUMP_LENGTH +#undef BRANCH_LENGTH #undef BR_Z #undef BR_NZ #undef BR_T @@ -2272,7 +3151,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi CHECK_ERROR(); CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); - ADJUST_LOCAL_OFFSET(src, srcw); if (src & SLJIT_IMM) { jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); @@ -2283,17 +3161,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi if (compiler->delay_slot != UNMOVABLE_INS) jump->flags |= IS_MOVABLE; - FAIL_IF(emit_const(compiler, TMP_REG2, 0)); src = TMP_REG2; - } - else if (src & SLJIT_MEM) { + } else if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(TMP_REG2), src, srcw)); src = TMP_REG2; } - FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS)); - if (jump) + if (type <= SLJIT_JUMP) + FAIL_IF(push_inst(compiler, JR | S(src), UNMOVABLE_INS)); + else + FAIL_IF(push_inst(compiler, JALR | S(src) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + + if (jump != NULL) { jump->addr = compiler->size; + + /* Maximum number of instructions required for generating a constant. */ +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + compiler->size += 2; +#else + compiler->size += 6; +#endif + } + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); return SLJIT_SUCCESS; } @@ -2302,7 +3192,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co sljit_s32 dst, sljit_sw dstw, sljit_s32 type) { - sljit_s32 src_ar, dst_ar; + sljit_s32 src_ar, dst_ar, invert; sljit_s32 saved_op = op; #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) sljit_s32 mem_type = WORD_DATA; @@ -2323,32 +3213,45 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co if (op >= SLJIT_ADD && (dst & SLJIT_MEM)) FAIL_IF(emit_op_mem2(compiler, mem_type | LOAD_DATA, DR(TMP_REG1), dst, dstw, dst, dstw)); - switch (type & 0xff) { - case SLJIT_EQUAL: - case SLJIT_NOT_EQUAL: - FAIL_IF(push_inst(compiler, SLTIU | SA(EQUAL_FLAG) | TA(dst_ar) | IMM(1), dst_ar)); - src_ar = dst_ar; - break; - case SLJIT_OVERFLOW: - case SLJIT_NOT_OVERFLOW: - if (compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)) { - src_ar = OTHER_FLAG; + if (type < SLJIT_F_EQUAL) { + src_ar = OTHER_FLAG; + invert = type & 0x1; + + switch (type) { + case SLJIT_EQUAL: + case SLJIT_NOT_EQUAL: + FAIL_IF(push_inst(compiler, SLTIU | SA(EQUAL_FLAG) | TA(dst_ar) | IMM(1), dst_ar)); + src_ar = dst_ar; + break; + case SLJIT_OVERFLOW: + case SLJIT_NOT_OVERFLOW: + if (compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)) { + src_ar = OTHER_FLAG; + break; + } + FAIL_IF(push_inst(compiler, SLTIU | SA(OTHER_FLAG) | TA(dst_ar) | IMM(1), dst_ar)); + src_ar = dst_ar; + invert ^= 0x1; break; } - FAIL_IF(push_inst(compiler, SLTIU | SA(OTHER_FLAG) | TA(dst_ar) | IMM(1), dst_ar)); - src_ar = dst_ar; - type ^= 0x1; /* Flip type bit for the XORI below. */ - break; - case SLJIT_GREATER_F64: - case SLJIT_LESS_EQUAL_F64: - type ^= 0x1; /* Flip type bit for the XORI below. */ - /* fallthrough */ - case SLJIT_EQUAL_F64: - case SLJIT_NOT_EQUAL_F64: - case SLJIT_LESS_F64: - case SLJIT_GREATER_EQUAL_F64: - case SLJIT_UNORDERED_F64: - case SLJIT_ORDERED_F64: + } else { + invert = 0; + + switch (type) { + case SLJIT_F_NOT_EQUAL: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_F_GREATER: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_ORDERED: + invert = 1; + break; + } + #if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) FAIL_IF(push_inst(compiler, MFC1 | TA(dst_ar) | FS(TMP_FREG3), dst_ar)); #else /* SLJIT_MIPS_REV < 6 */ @@ -2357,14 +3260,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co FAIL_IF(push_inst(compiler, SRL | TA(dst_ar) | DA(dst_ar) | SH_IMM(23), dst_ar)); FAIL_IF(push_inst(compiler, ANDI | SA(dst_ar) | TA(dst_ar) | IMM(1), dst_ar)); src_ar = dst_ar; - break; - - default: - src_ar = OTHER_FLAG; - break; } - if (type & 0x1) { + if (invert) { FAIL_IF(push_inst(compiler, XORI | SA(src_ar) | TA(dst_ar) | IMM(1), dst_ar)); src_ar = dst_ar; } @@ -2404,7 +3302,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { #if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) - if (dst_reg & SLJIT_32) + if (type & SLJIT_32) srcw = (sljit_s32)srcw; #endif FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw)); @@ -2412,9 +3310,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil srcw = 0; } - dst_reg &= ~SLJIT_32; - - switch (type & 0xff) { + switch (type & ~SLJIT_32) { case SLJIT_EQUAL: ins = MOVZ | TA(EQUAL_FLAG); break; @@ -2435,16 +3331,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil case SLJIT_NOT_OVERFLOW: ins = MOVZ | TA(OTHER_FLAG); break; - case SLJIT_EQUAL_F64: - case SLJIT_LESS_F64: - case SLJIT_LESS_EQUAL_F64: - case SLJIT_UNORDERED_F64: + case SLJIT_F_EQUAL: + case SLJIT_F_LESS: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: + case SLJIT_ORDERED_LESS: + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_UNORDERED_OR_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: + case SLJIT_UNORDERED: ins = MOVT; break; - case SLJIT_NOT_EQUAL_F64: - case SLJIT_GREATER_EQUAL_F64: - case SLJIT_GREATER_F64: - case SLJIT_ORDERED_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_F_GREATER: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_ORDERED: ins = MOVF; break; default: @@ -2460,6 +3368,308 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil #endif /* SLJIT_MIPS_REV >= 1 */ } +static sljit_s32 update_mem_addr(struct sljit_compiler *compiler, sljit_s32 *mem, sljit_sw *memw, sljit_s16 max_offset) +{ + sljit_s32 arg = *mem; + sljit_sw argw = *memw; + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + argw &= 0x3; + + if (SLJIT_UNLIKELY(argw)) { + FAIL_IF(push_inst(compiler, SLL_W | T(OFFS_REG(arg)) | D(TMP_REG1) | SH_IMM(argw), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG1) | T(arg & REG_MASK) | D(TMP_REG1), DR(TMP_REG1))); + } else + FAIL_IF(push_inst(compiler, ADDU_W | S(arg & REG_MASK) | T(OFFS_REG(arg)) | D(TMP_REG1), DR(TMP_REG1))); + + *mem = TMP_REG1; + *memw = 0; + + return SLJIT_SUCCESS; + } + + if (argw <= max_offset && argw >= SIMM_MIN) { + *mem = arg & REG_MASK; + return SLJIT_SUCCESS; + } + + *mem = TMP_REG1; + + if ((sljit_s16)argw > max_offset) { + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), argw)); + *memw = 0; + } else { + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), TO_ARGW_HI(argw))); + *memw = (sljit_s16)argw; + } + + if ((arg & REG_MASK) == 0) + return SLJIT_SUCCESS; + + return push_inst(compiler, ADDU_W | S(TMP_REG1) | T(arg & REG_MASK) | D(TMP_REG1), DR(TMP_REG1)); +} + +#if (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +#define MEM16_IMM_FIRST(memw) IMM((memw) + 1) +#define MEM16_IMM_SECOND(memw) IMM(memw) +#define MEMF64_FS_FIRST(freg) FS(freg) +#define MEMF64_FS_SECOND(freg) (FS(freg) | ((sljit_ins)1 << 11)) +#else /* !SLJIT_LITTLE_ENDIAN */ +#define MEM16_IMM_FIRST(memw) IMM(memw) +#define MEM16_IMM_SECOND(memw) IMM((memw) + 1) +#define MEMF64_FS_FIRST(freg) (FS(freg) | ((sljit_ins)1 << 11)) +#define MEMF64_FS_SECOND(freg) FS(freg) +#endif /* SLJIT_LITTLE_ENDIAN */ + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#define MEM_CHECK_UNALIGNED(type) ((type) & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16)) +#else /* !SLJIT_CONFIG_MIPS_32 */ +#define MEM_CHECK_UNALIGNED(type) ((type) & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16 | SLJIT_MEM_UNALIGNED_32)) +#endif /* SLJIT_CONFIG_MIPS_32 */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 op = type & 0xff; + sljit_s32 flags = 0; + sljit_ins ins; +#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + sljit_ins ins_right; +#endif /* !(SLJIT_MIPS_REV >= 6) */ + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if (reg & REG_PAIR_MASK) { + ADJUST_LOCAL_OFFSET(mem, memw); + +#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + if (MEM_CHECK_UNALIGNED(type)) { + FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - (2 * SSIZE_OF(sw) - 1))); + + if (!(type & SLJIT_MEM_STORE) && (mem == REG_PAIR_FIRST(reg) || mem == REG_PAIR_SECOND(reg))) { + FAIL_IF(push_inst(compiler, ADDU_W | S(mem) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + mem = TMP_REG1; + } + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + ins = ((type & SLJIT_MEM_STORE) ? SWL : LWL) | S(mem); + ins_right = ((type & SLJIT_MEM_STORE) ? SWR : LWR) | S(mem); +#else /* !SLJIT_CONFIG_MIPS_32 */ + ins = ((type & SLJIT_MEM_STORE) ? SDL : LDL) | S(mem); + ins_right = ((type & SLJIT_MEM_STORE) ? SDR : LDR) | S(mem); +#endif /* SLJIT_CONFIG_MIPS_32 */ + + FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_FIRST(reg)) | IMM(memw), DR(REG_PAIR_FIRST(reg)))); + FAIL_IF(push_inst(compiler, ins_right | T(REG_PAIR_FIRST(reg)) | IMM(memw + (SSIZE_OF(sw) - 1)), DR(REG_PAIR_FIRST(reg)))); + FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_SECOND(reg)) | IMM(memw + SSIZE_OF(sw)), DR(REG_PAIR_SECOND(reg)))); + return push_inst(compiler, ins_right | T(REG_PAIR_SECOND(reg)) | IMM((memw + 2 * SSIZE_OF(sw) - 1)), DR(REG_PAIR_SECOND(reg))); + } +#endif /* !(SLJIT_MIPS_REV >= 6) */ + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - SSIZE_OF(sw))); + + ins = ((type & SLJIT_MEM_STORE) ? STORE_W : LOAD_W) | S(mem); + + if (!(type & SLJIT_MEM_STORE) && mem == REG_PAIR_FIRST(reg)) { + FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_SECOND(reg)) | IMM(memw + SSIZE_OF(sw)), DR(REG_PAIR_SECOND(reg)))); + return push_inst(compiler, ins | T(REG_PAIR_FIRST(reg)) | IMM(memw), DR(REG_PAIR_FIRST(reg))); + } + + FAIL_IF(push_inst(compiler, ins | T(REG_PAIR_FIRST(reg)) | IMM(memw), DR(REG_PAIR_FIRST(reg)))); + return push_inst(compiler, ins | T(REG_PAIR_SECOND(reg)) | IMM(memw + SSIZE_OF(sw)), DR(REG_PAIR_SECOND(reg))); + } + +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); +#else /* !(SLJIT_MIPS_REV >= 6) */ + ADJUST_LOCAL_OFFSET(mem, memw); + + switch (op) { + case SLJIT_MOV_U8: + case SLJIT_MOV_S8: + flags = BYTE_DATA; + if (!(type & SLJIT_MEM_STORE)) + flags |= LOAD_DATA; + + if (op == SLJIT_MOV_S8) + flags |= SIGNED_DATA; + + return emit_op_mem(compiler, flags, DR(reg), mem, memw); + + case SLJIT_MOV_U16: + case SLJIT_MOV_S16: + FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - 1)); + SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2); + + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst(compiler, SRA_W | T(reg) | D(TMP_REG2) | SH_IMM(8), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, data_transfer_insts[BYTE_DATA] | S(mem) | T(TMP_REG2) | MEM16_IMM_FIRST(memw), MOVABLE_INS)); + return push_inst(compiler, data_transfer_insts[BYTE_DATA] | S(mem) | T(reg) | MEM16_IMM_SECOND(memw), MOVABLE_INS); + } + + flags = BYTE_DATA | LOAD_DATA; + + if (op == SLJIT_MOV_S16) + flags |= SIGNED_DATA; + + FAIL_IF(push_inst(compiler, data_transfer_insts[flags] | S(mem) | T(TMP_REG2) | MEM16_IMM_FIRST(memw), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, data_transfer_insts[BYTE_DATA | LOAD_DATA] | S(mem) | T(reg) | MEM16_IMM_SECOND(memw), DR(reg))); + FAIL_IF(push_inst(compiler, SLL_W | T(TMP_REG2) | D(TMP_REG2) | SH_IMM(8), DR(TMP_REG2))); + return push_inst(compiler, OR | S(reg) | T(TMP_REG2) | D(reg), DR(reg)); + + case SLJIT_MOV: + case SLJIT_MOV_P: +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + if (type & SLJIT_MEM_UNALIGNED_32) { + flags = WORD_DATA; + if (!(type & SLJIT_MEM_STORE)) + flags |= LOAD_DATA; + + return emit_op_mem(compiler, flags, DR(reg), mem, memw); + } +#else /* !SLJIT_CONFIG_MIPS_32 */ + FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - 7)); + SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2); + + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst(compiler, SDL | S(mem) | T(reg) | IMM(memw), MOVABLE_INS)); + return push_inst(compiler, SDR | S(mem) | T(reg) | IMM(memw + 7), MOVABLE_INS); + } + + if (mem == reg) { + FAIL_IF(push_inst(compiler, ADDU_W | S(mem) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + mem = TMP_REG1; + } + + FAIL_IF(push_inst(compiler, LDL | S(mem) | T(reg) | IMM(memw), DR(reg))); + return push_inst(compiler, LDR | S(mem) | T(reg) | IMM(memw + 7), DR(reg)); +#endif /* SLJIT_CONFIG_MIPS_32 */ + } + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - 3)); + SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2); + + if (type & SLJIT_MEM_STORE) { + FAIL_IF(push_inst(compiler, SWL | S(mem) | T(reg) | IMM(memw), MOVABLE_INS)); + return push_inst(compiler, SWR | S(mem) | T(reg) | IMM(memw + 3), MOVABLE_INS); + } + + if (mem == reg) { + FAIL_IF(push_inst(compiler, ADDU_W | S(mem) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + mem = TMP_REG1; + } + + FAIL_IF(push_inst(compiler, LWL | S(mem) | T(reg) | IMM(memw), DR(reg))); +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + return push_inst(compiler, LWR | S(mem) | T(reg) | IMM(memw + 3), DR(reg)); +#else /* !SLJIT_CONFIG_MIPS_32 */ + FAIL_IF(push_inst(compiler, LWR | S(mem) | T(reg) | IMM(memw + 3), DR(reg))); + + if (op != SLJIT_MOV_U32) + return SLJIT_SUCCESS; + +#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2) + return push_inst(compiler, DINSU | T(reg) | SA(0) | (31 << 11) | (0 << 11), DR(reg)); +#else /* SLJIT_MIPS_REV < 1 */ + FAIL_IF(push_inst(compiler, DSLL32 | T(reg) | D(reg) | SH_IMM(0), DR(reg))); + return push_inst(compiler, DSRL32 | T(reg) | D(reg) | SH_IMM(0), DR(reg)); +#endif /* SLJIT_MIPS_REV >= 2 */ +#endif /* SLJIT_CONFIG_MIPS_32 */ +#endif /* SLJIT_MIPS_REV >= 6 */ +} + +#if !(defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6) + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 freg, + sljit_s32 mem, sljit_sw memw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); + + FAIL_IF(update_mem_addr(compiler, &mem, &memw, SIMM_MAX - (type & SLJIT_32) ? 3 : 7)); + SLJIT_ASSERT(FAST_IS_REG(mem) && mem != TMP_REG2); + + if (type & SLJIT_MEM_STORE) { + if (type & SLJIT_32) { + FAIL_IF(push_inst(compiler, MFC1 | T(TMP_REG2) | FS(freg), DR(TMP_REG2))); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + FAIL_IF(push_inst(compiler, SWL | S(mem) | T(TMP_REG2) | IMM(memw), MOVABLE_INS)); + return push_inst(compiler, SWR | S(mem) | T(TMP_REG2) | IMM(memw + 3), MOVABLE_INS); + } + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + FAIL_IF(push_inst(compiler, MFC1 | T(TMP_REG2) | MEMF64_FS_FIRST(freg), DR(TMP_REG2))); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + FAIL_IF(push_inst(compiler, SWL | S(mem) | T(TMP_REG2) | IMM(memw), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, SWR | S(mem) | T(TMP_REG2) | IMM(memw + 3), MOVABLE_INS)); + + FAIL_IF(push_inst(compiler, MFC1 | T(TMP_REG2) | MEMF64_FS_SECOND(freg), DR(TMP_REG2))); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + FAIL_IF(push_inst(compiler, SWL | S(mem) | T(TMP_REG2) | IMM(memw + 4), MOVABLE_INS)); + return push_inst(compiler, SWR | S(mem) | T(TMP_REG2) | IMM(memw + 7), MOVABLE_INS); +#else /* !SLJIT_CONFIG_MIPS_32 */ + FAIL_IF(push_inst(compiler, MFC1 | (1 << 21) | T(TMP_REG2) | FS(freg), DR(TMP_REG2))); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + FAIL_IF(push_inst(compiler, SDL | S(mem) | T(TMP_REG2) | IMM(memw), MOVABLE_INS)); + return push_inst(compiler, SDR | S(mem) | T(TMP_REG2) | IMM(memw + 7), MOVABLE_INS); +#endif /* SLJIT_CONFIG_MIPS_32 */ + } + + if (type & SLJIT_32) { + FAIL_IF(push_inst(compiler, LWL | S(mem) | T(TMP_REG2) | IMM(memw), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, LWR | S(mem) | T(TMP_REG2) | IMM(memw + 3), DR(TMP_REG2))); + + FAIL_IF(push_inst(compiler, MTC1 | T(TMP_REG2) | FS(freg), MOVABLE_INS)); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + return SLJIT_SUCCESS; + } + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + FAIL_IF(push_inst(compiler, LWL | S(mem) | T(TMP_REG2) | IMM(memw), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, LWR | S(mem) | T(TMP_REG2) | IMM(memw + 3), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, MTC1 | T(TMP_REG2) | MEMF64_FS_FIRST(freg), MOVABLE_INS)); + + FAIL_IF(push_inst(compiler, LWL | S(mem) | T(TMP_REG2) | IMM(memw + 4), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, LWR | S(mem) | T(TMP_REG2) | IMM(memw + 7), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, MTC1 | T(TMP_REG2) | MEMF64_FS_SECOND(freg), MOVABLE_INS)); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif +#else /* !SLJIT_CONFIG_MIPS_32 */ + FAIL_IF(push_inst(compiler, LDL | S(mem) | T(TMP_REG2) | IMM(memw), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, LDR | S(mem) | T(TMP_REG2) | IMM(memw + 7), DR(TMP_REG2))); + + FAIL_IF(push_inst(compiler, MTC1 | (1 << 21) | T(TMP_REG2) | FS(freg), MOVABLE_INS)); +#if (!defined SLJIT_MIPS_REV || SLJIT_MIPS_REV <= 3) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif +#endif /* SLJIT_CONFIG_MIPS_32 */ + return SLJIT_SUCCESS; +} + +#endif /* !SLJIT_MIPS_REV || SLJIT_MIPS_REV < 6 */ + +#undef MEM16_IMM_FIRST +#undef MEM16_IMM_SECOND +#undef MEMF64_FS_FIRST +#undef MEMF64_FS_SECOND +#undef MEM_CHECK_UNALIGNED + +#undef TO_ARGW_HI + SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) { struct sljit_const *const_; @@ -2477,7 +3687,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi PTR_FAIL_IF(emit_const(compiler, dst_r, init_value)); if (dst & SLJIT_MEM) - PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, DR(TMP_REG2), dst, dstw)); return const_; } @@ -2496,15 +3706,15 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj set_put_label(put_label, compiler, 0); dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + PTR_FAIL_IF(push_inst(compiler, (sljit_ins)dst_r, UNMOVABLE_INS)); #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) - PTR_FAIL_IF(emit_const(compiler, dst_r, 0)); + compiler->size += 1; #else - PTR_FAIL_IF(push_inst(compiler, (sljit_ins)dst_r, UNMOVABLE_INS)); compiler->size += 5; #endif if (dst & SLJIT_MEM) - PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, DR(TMP_REG2), dst, dstw)); return put_label; } diff --git a/thirdparty/pcre2/src/sljit/sljitNativePPC_32.c b/thirdparty/pcre2/src/sljit/sljitNativePPC_32.c index 95fe6bbe0e..9449e4b9d7 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativePPC_32.c +++ b/thirdparty/pcre2/src/sljit/sljitNativePPC_32.c @@ -38,12 +38,15 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS; } +/* Simplified mnemonics: clrlwi. */ #define INS_CLEAR_LEFT(dst, src, from) \ - (RLWINM | S(src) | A(dst) | ((from) << 6) | (31 << 1)) + (RLWINM | S(src) | A(dst) | RLWI_MBE(from, 31)) static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) { + sljit_u32 imm; + switch (op) { case SLJIT_MOV: case SLJIT_MOV_U32: @@ -90,6 +93,16 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl SLJIT_ASSERT(src1 == TMP_REG1); return push_inst(compiler, CNTLZW | S(src2) | A(dst)); + case SLJIT_CTZ: + SLJIT_ASSERT(src1 == TMP_REG1); + FAIL_IF(push_inst(compiler, NEG | D(TMP_REG1) | A(src2))); + FAIL_IF(push_inst(compiler, AND | S(src2) | A(dst) | B(TMP_REG1))); + FAIL_IF(push_inst(compiler, CNTLZW | S(dst) | A(dst))); + FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG1) | A(dst) | IMM(-32))); + /* The highest bits are set, if dst < 32, zero otherwise. */ + FAIL_IF(push_inst(compiler, SRWI(27) | S(TMP_REG1) | A(TMP_REG1))); + return push_inst(compiler, XOR | S(dst) | A(dst) | B(TMP_REG1)); + case SLJIT_ADD: if (flags & ALT_FORM1) { /* Setting XER SO is not enough, CR SO is also needed. */ @@ -103,12 +116,14 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl if (flags & ALT_FORM3) return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + imm = compiler->imm; + if (flags & ALT_FORM4) { - FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1)))); + FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((imm >> 16) & 0xffff) + ((imm >> 15) & 0x1)))); src1 = dst; } - return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff)); + return push_inst(compiler, ADDI | D(dst) | A(src1) | (imm & 0xffff)); } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); @@ -208,8 +223,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); - FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm))); - return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + imm = compiler->imm; + + FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(imm))); + return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(imm >> 16)); } return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2)); @@ -224,34 +241,78 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); - FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm))); - return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + imm = compiler->imm; + + FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(imm))); + return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(imm >> 16)); } return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_SHL: + case SLJIT_MSHL: if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); - compiler->imm &= 0x1f; - return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1)); + imm = compiler->imm & 0x1f; + return push_inst(compiler, SLWI(imm) | RC(flags) | S(src1) | A(dst)); + } + + if (op == SLJIT_MSHL) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f)); + src2 = TMP_REG2; } + return push_inst(compiler, SLW | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_LSHR: + case SLJIT_MLSHR: if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); - compiler->imm &= 0x1f; - return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1)); + imm = compiler->imm & 0x1f; + /* Since imm can be 0, SRWI() cannot be used. */ + return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | RLWI_SH((32 - imm) & 0x1f) | RLWI_MBE(imm, 31)); + } + + if (op == SLJIT_MLSHR) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f)); + src2 = TMP_REG2; } + return push_inst(compiler, SRW | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_ASHR: + case SLJIT_MASHR: if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); - compiler->imm &= 0x1f; - return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11)); + imm = compiler->imm & 0x1f; + return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (imm << 11)); + } + + if (op == SLJIT_MASHR) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f)); + src2 = TMP_REG2; } + return push_inst(compiler, SRAW | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_ROTL: + case SLJIT_ROTR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + imm = compiler->imm; + + if (op == SLJIT_ROTR) + imm = (sljit_u32)(-(sljit_s32)imm); + + imm &= 0x1f; + return push_inst(compiler, RLWINM | S(src1) | A(dst) | RLWI_SH(imm) | RLWI_MBE(0, 31)); + } + + if (op == SLJIT_ROTR) { + FAIL_IF(push_inst(compiler, SUBFIC | D(TMP_REG2) | A(src2) | 0)); + src2 = TMP_REG2; + } + + return push_inst(compiler, RLWNM | S(src1) | A(dst) | B(src2) | RLWI_MBE(0, 31)); } SLJIT_UNREACHABLE(); @@ -277,8 +338,3 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 2); } - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) -{ - sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset); -} diff --git a/thirdparty/pcre2/src/sljit/sljitNativePPC_64.c b/thirdparty/pcre2/src/sljit/sljitNativePPC_64.c index d104f6d75f..80549108bf 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativePPC_64.c +++ b/thirdparty/pcre2/src/sljit/sljitNativePPC_64.c @@ -35,8 +35,9 @@ #error "Must implement count leading zeroes" #endif -#define PUSH_RLDICR(reg, shift) \ - push_inst(compiler, RLDI(reg, reg, 63 - shift, shift, 1)) +/* Computes SLDI(63 - shift). */ +#define PUSH_SLDI_NEG(reg, shift) \ + push_inst(compiler, RLDICR | S(reg) | A(reg) | RLDI_SH(63 - shift) | RLDI_ME(shift)) static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm) { @@ -66,14 +67,14 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, if ((tmp & ~0xffff000000000000ul) == 0) { FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48))); shift += 15; - return PUSH_RLDICR(reg, shift); + return PUSH_SLDI_NEG(reg, shift); } if ((tmp & ~0xffffffff00000000ul) == 0) { FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | (sljit_ins)(tmp >> 48))); FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp >> 32))); shift += 31; - return PUSH_RLDICR(reg, shift); + return PUSH_SLDI_NEG(reg, shift); } /* Cut out the 16 bit from immediate. */ @@ -82,13 +83,13 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, if (tmp2 <= 0xffff) { FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48))); - FAIL_IF(PUSH_RLDICR(reg, shift)); + FAIL_IF(PUSH_SLDI_NEG(reg, shift)); return push_inst(compiler, ORI | S(reg) | A(reg) | (sljit_ins)tmp2); } if (tmp2 <= 0xffffffff) { FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); - FAIL_IF(PUSH_RLDICR(reg, shift)); + FAIL_IF(PUSH_SLDI_NEG(reg, shift)); FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | (sljit_ins)(tmp2 >> 16))); return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp2)) : SLJIT_SUCCESS; } @@ -100,22 +101,23 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48))); shift2 += 15; shift += (63 - shift2); - FAIL_IF(PUSH_RLDICR(reg, shift)); + FAIL_IF(PUSH_SLDI_NEG(reg, shift)); FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | (sljit_ins)(tmp2 >> 48))); - return PUSH_RLDICR(reg, shift2); + return PUSH_SLDI_NEG(reg, shift2); } /* The general version. */ FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | (sljit_ins)((sljit_uw)imm >> 48))); FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm >> 32))); - FAIL_IF(PUSH_RLDICR(reg, 31)); + FAIL_IF(PUSH_SLDI_NEG(reg, 31)); FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(imm >> 16))); return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)); } -/* Simplified mnemonics: clrldi. */ -#define INS_CLEAR_LEFT(dst, src, from) \ - (RLDICL | S(src) | A(dst) | ((from) << 6) | (1 << 5)) +#undef PUSH_SLDI_NEG + +#define CLRLDI(dst, src, n) \ + (RLDICL | S(src) | A(dst) | RLDI_SH(0) | RLDI_MB(n)) /* Sign extension for integer operations. */ #define UN_EXTS() \ @@ -145,6 +147,8 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) { + sljit_u32 imm; + switch (op) { case SLJIT_MOV: case SLJIT_MOV_P: @@ -159,7 +163,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { if (op == SLJIT_MOV_S32) return push_inst(compiler, EXTSW | S(src2) | A(dst)); - return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 0)); + return push_inst(compiler, CLRLDI(dst, src2, 32)); } else { SLJIT_ASSERT(dst == src2); @@ -172,7 +176,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { if (op == SLJIT_MOV_S8) return push_inst(compiler, EXTSB | S(src2) | A(dst)); - return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); + return push_inst(compiler, CLRLDI(dst, src2, 56)); } else if ((flags & REG_DEST) && op == SLJIT_MOV_S8) return push_inst(compiler, EXTSB | S(src2) | A(dst)); @@ -187,7 +191,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { if (op == SLJIT_MOV_S16) return push_inst(compiler, EXTSH | S(src2) | A(dst)); - return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); + return push_inst(compiler, CLRLDI(dst, src2, 48)); } else { SLJIT_ASSERT(dst == src2); @@ -201,22 +205,30 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl case SLJIT_CLZ: SLJIT_ASSERT(src1 == TMP_REG1); - if (flags & ALT_FORM1) - return push_inst(compiler, CNTLZW | S(src2) | A(dst)); - return push_inst(compiler, CNTLZD | S(src2) | A(dst)); + return push_inst(compiler, ((flags & ALT_FORM1) ? CNTLZW : CNTLZD) | S(src2) | A(dst)); + + case SLJIT_CTZ: + SLJIT_ASSERT(src1 == TMP_REG1); + FAIL_IF(push_inst(compiler, NEG | D(TMP_REG1) | A(src2))); + FAIL_IF(push_inst(compiler, AND | S(src2) | A(dst) | B(TMP_REG1))); + FAIL_IF(push_inst(compiler, ((flags & ALT_FORM1) ? CNTLZW : CNTLZD) | S(dst) | A(dst))); + FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG1) | A(dst) | IMM((flags & ALT_FORM1) ? -32 : -64))); + /* The highest bits are set, if dst < bit width, zero otherwise. */ + FAIL_IF(push_inst(compiler, ((flags & ALT_FORM1) ? SRWI(27) : SRDI(58)) | S(TMP_REG1) | A(TMP_REG1))); + return push_inst(compiler, XOR | S(dst) | A(dst) | B(TMP_REG1)); case SLJIT_ADD: if (flags & ALT_FORM1) { if (flags & ALT_SIGN_EXT) { - FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1))); + FAIL_IF(push_inst(compiler, SLDI(32) | S(src1) | A(TMP_REG1))); src1 = TMP_REG1; - FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1))); + FAIL_IF(push_inst(compiler, SLDI(32) | S(src2) | A(TMP_REG2))); src2 = TMP_REG2; } /* Setting XER SO is not enough, CR SO is also needed. */ FAIL_IF(push_inst(compiler, ADD | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2))); if (flags & ALT_SIGN_EXT) - return push_inst(compiler, RLDI(dst, dst, 32, 32, 0)); + return push_inst(compiler, SRDI(32) | S(dst) | A(dst)); return SLJIT_SUCCESS; } @@ -227,12 +239,14 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl if (flags & ALT_FORM3) return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + imm = compiler->imm; + if (flags & ALT_FORM4) { - FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1)))); + FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((imm >> 16) & 0xffff) + ((imm >> 15) & 0x1)))); src1 = dst; } - return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff)); + return push_inst(compiler, ADDI | D(dst) | A(src1) | (imm & 0xffff)); } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); @@ -287,11 +301,11 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl if (flags & ALT_FORM3) { if (flags & ALT_SIGN_EXT) { if (src1 != TMP_ZERO) { - FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1))); + FAIL_IF(push_inst(compiler, SLDI(32) | S(src1) | A(TMP_REG1))); src1 = TMP_REG1; } if (src2 != TMP_ZERO) { - FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1))); + FAIL_IF(push_inst(compiler, SLDI(32) | S(src2) | A(TMP_REG2))); src2 = TMP_REG2; } } @@ -303,7 +317,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl FAIL_IF(push_inst(compiler, NEG | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2))); if (flags & ALT_SIGN_EXT) - return push_inst(compiler, RLDI(dst, dst, 32, 32, 0)); + return push_inst(compiler, SRDI(32) | S(dst) | A(dst)); return SLJIT_SUCCESS; } @@ -362,8 +376,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); - FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm))); - return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + imm = compiler->imm; + + FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(imm))); + return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(imm >> 16)); } return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2)); @@ -378,46 +394,105 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl } if (flags & ALT_FORM3) { SLJIT_ASSERT(src2 == TMP_REG2); - FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm))); - return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + imm = compiler->imm; + + FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(imm))); + return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(imm >> 16)); } return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_SHL: + case SLJIT_MSHL: if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); + imm = compiler->imm; + if (flags & ALT_FORM2) { - compiler->imm &= 0x1f; - return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1)); + imm &= 0x1f; + return push_inst(compiler, SLWI(imm) | RC(flags) | S(src1) | A(dst)); } - compiler->imm &= 0x3f; - return push_inst(compiler, RLDI(dst, src1, compiler->imm, 63 - compiler->imm, 1) | RC(flags)); + + imm &= 0x3f; + return push_inst(compiler, SLDI(imm) | RC(flags) | S(src1) | A(dst)); } + + if (op == SLJIT_MSHL) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f))); + src2 = TMP_REG2; + } + return push_inst(compiler, ((flags & ALT_FORM2) ? SLW : SLD) | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_LSHR: + case SLJIT_MLSHR: if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); + imm = compiler->imm; + if (flags & ALT_FORM2) { - compiler->imm &= 0x1f; - return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1)); + imm &= 0x1f; + /* Since imm can be 0, SRWI() cannot be used. */ + return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | RLWI_SH((32 - imm) & 0x1f) | RLWI_MBE(imm, 31)); } - compiler->imm &= 0x3f; - return push_inst(compiler, RLDI(dst, src1, 64 - compiler->imm, compiler->imm, 0) | RC(flags)); + + imm &= 0x3f; + /* Since imm can be 0, SRDI() cannot be used. */ + return push_inst(compiler, RLDICL | RC(flags) | S(src1) | A(dst) | RLDI_SH((64 - imm) & 0x3f) | RLDI_MB(imm)); } + + if (op == SLJIT_MLSHR) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f))); + src2 = TMP_REG2; + } + return push_inst(compiler, ((flags & ALT_FORM2) ? SRW : SRD) | RC(flags) | S(src1) | A(dst) | B(src2)); case SLJIT_ASHR: + case SLJIT_MASHR: if (flags & ALT_FORM1) { SLJIT_ASSERT(src2 == TMP_REG2); + imm = compiler->imm; + if (flags & ALT_FORM2) { - compiler->imm &= 0x1f; - return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11)); + imm &= 0x1f; + return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (imm << 11)); } - compiler->imm &= 0x3f; - return push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | ((compiler->imm & 0x1f) << 11) | ((compiler->imm & 0x20) >> 4)); + + imm &= 0x3f; + return push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | RLDI_SH(imm)); } + + if (op == SLJIT_MASHR) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f))); + src2 = TMP_REG2; + } + return push_inst(compiler, ((flags & ALT_FORM2) ? SRAW : SRAD) | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_ROTL: + case SLJIT_ROTR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + imm = compiler->imm; + + if (op == SLJIT_ROTR) + imm = (sljit_u32)(-(sljit_s32)imm); + + if (flags & ALT_FORM2) { + imm &= 0x1f; + return push_inst(compiler, RLWINM | S(src1) | A(dst) | RLWI_SH(imm) | RLWI_MBE(0, 31)); + } + + imm &= 0x3f; + return push_inst(compiler, RLDICL | S(src1) | A(dst) | RLDI_SH(imm)); + } + + if (op == SLJIT_ROTR) { + FAIL_IF(push_inst(compiler, SUBFIC | D(TMP_REG2) | A(src2) | 0)); + src2 = TMP_REG2; + } + + return push_inst(compiler, ((flags & ALT_FORM2) ? (RLWNM | RLWI_MBE(0, 31)) : (RLDCL | RLDI_MB(0))) | S(src1) | A(dst) | B(src2)); } SLJIT_UNREACHABLE(); @@ -483,7 +558,7 @@ static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_ { FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 48))); FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value >> 32))); - FAIL_IF(PUSH_RLDICR(reg, 31)); + FAIL_IF(push_inst(compiler, SLDI(32) | S(reg) | A(reg))); FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(init_value >> 16))); return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value)); } @@ -502,8 +577,3 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); SLJIT_CACHE_FLUSH(inst, inst + 5); } - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) -{ - sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset); -} diff --git a/thirdparty/pcre2/src/sljit/sljitNativePPC_common.c b/thirdparty/pcre2/src/sljit/sljitNativePPC_common.c index 8bfdc69522..f387114733 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativePPC_common.c +++ b/thirdparty/pcre2/src/sljit/sljitNativePPC_common.c @@ -203,8 +203,13 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define OR (HI(31) | LO(444)) #define ORI (HI(24)) #define ORIS (HI(25)) -#define RLDICL (HI(30)) +#define RLDCL (HI(30) | LO(8)) +#define RLDICL (HI(30) | LO(0 << 1)) +#define RLDICR (HI(30) | LO(1 << 1)) +#define RLDIMI (HI(30) | LO(3 << 1)) +#define RLWIMI (HI(20)) #define RLWINM (HI(21)) +#define RLWNM (HI(23)) #define SLD (HI(31) | LO(27)) #define SLW (HI(31) | LO(24)) #define SRAD (HI(31) | LO(794)) @@ -233,9 +238,24 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { #define SIMM_MIN (-0x8000) #define UIMM_MAX (0xffff) -#define RLDI(dst, src, sh, mb, type) \ - (HI(30) | S(src) | A(dst) | ((sljit_ins)(type) << 2) | (((sljit_ins)(sh) & 0x1f) << 11) \ - | (((sljit_ins)(sh) & 0x20) >> 4) | (((sljit_ins)(mb) & 0x1f) << 6) | ((sljit_ins)(mb) & 0x20)) +/* Shift helpers. */ +#define RLWI_SH(sh) ((sljit_ins)(sh) << 11) +#define RLWI_MBE(mb, me) (((sljit_ins)(mb) << 6) | ((sljit_ins)(me) << 1)) +#define RLDI_SH(sh) ((((sljit_ins)(sh) & 0x1f) << 11) | (((sljit_ins)(sh) & 0x20) >> 4)) +#define RLDI_MB(mb) ((((sljit_ins)(mb) & 0x1f) << 6) | ((sljit_ins)(mb) & 0x20)) +#define RLDI_ME(me) RLDI_MB(me) + +#define SLWI(shift) (RLWINM | RLWI_SH(shift) | RLWI_MBE(0, 31 - (shift))) +#define SLDI(shift) (RLDICR | RLDI_SH(shift) | RLDI_ME(63 - (shift))) +/* shift > 0 */ +#define SRWI(shift) (RLWINM | RLWI_SH(32 - (shift)) | RLWI_MBE((shift), 31)) +#define SRDI(shift) (RLDICL | RLDI_SH(64 - (shift)) | RLDI_MB(shift)) + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) +#define SLWI_W(shift) SLWI(shift) +#else /* !SLJIT_CONFIG_PPC_32 */ +#define SLWI_W(shift) SLDI(shift) +#endif /* SLJIT_CONFIG_PPC_32 */ #if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_uw addr, void* func) @@ -368,10 +388,10 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label) else { inst[0] = ORIS | S(TMP_ZERO) | A(reg) | IMM(addr >> 48); inst[1] = ORI | S(reg) | A(reg) | IMM((addr >> 32) & 0xffff); - inst ++; + inst++; } - inst[1] = RLDI(reg, reg, 32, 31, 1); + inst[1] = SLDI(32) | S(reg) | A(reg); inst[2] = ORIS | S(reg) | A(reg) | IMM((addr >> 16) & 0xffff); inst += 2; } @@ -379,7 +399,7 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label) inst[1] = ORI | S(reg) | A(reg) | IMM(addr & 0xffff); } -#endif +#endif /* SLJIT_CONFIG_PPC_64 */ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) { @@ -497,8 +517,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil } next_addr = compute_next_addr(label, jump, const_, put_label); } - code_ptr ++; - word_count ++; + code_ptr++; + word_count++; } while (buf_ptr < buf_end); buf = buf->next; @@ -641,14 +661,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) /* A saved register is set to a zero value. */ case SLJIT_HAS_ZERO_REGISTER: case SLJIT_HAS_CLZ: + case SLJIT_HAS_ROT: case SLJIT_HAS_PREFETCH: return 1; + case SLJIT_HAS_CTZ: + return 2; + default: return 0; } } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type) +{ + return (type >= SLJIT_UNORDERED && type <= SLJIT_ORDERED_LESS_EQUAL); +} + /* --------------------------------------------------------------------- */ /* Entry, exit */ /* --------------------------------------------------------------------- */ @@ -715,13 +744,16 @@ ALT_FORM5 0x010000 */ #define STACK_MAX_DISTANCE (0x8000 - SSIZE_OF(sw) - LR_SAVE_OFFSET) +static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg, + sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg); + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { sljit_s32 i, tmp, base, offset; sljit_s32 word_arg_count = 0; - sljit_s32 saved_arg_count = 0; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) sljit_s32 arg_count = 0; #endif @@ -730,8 +762,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 0) + GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64)); + + if (!(options & SLJIT_ENTER_REG_ARG)) + local_size += SSIZE_OF(sw); + local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf; compiler->local_size = local_size; @@ -770,11 +806,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi FAIL_IF(push_inst(compiler, STFD | FS(i) | A(base) | IMM(offset))); } - offset -= SSIZE_OF(sw); - FAIL_IF(push_inst(compiler, STACK_STORE | S(TMP_ZERO) | A(base) | IMM(offset))); + if (!(options & SLJIT_ENTER_REG_ARG)) { + offset -= SSIZE_OF(sw); + FAIL_IF(push_inst(compiler, STACK_STORE | S(TMP_ZERO) | A(base) | IMM(offset))); + } tmp = SLJIT_S0 - saveds; - for (i = SLJIT_S0; i > tmp; i--) { + for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) { offset -= SSIZE_OF(sw); FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(base) | IMM(offset))); } @@ -785,9 +823,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi } FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(base) | IMM(local_size + LR_SAVE_OFFSET))); + + if (options & SLJIT_ENTER_REG_ARG) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, ADDI | D(TMP_ZERO) | A(0) | 0)); arg_types >>= SLJIT_ARG_SHIFT; + saved_arg_count = 0; while (arg_types > 0) { if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) { @@ -829,14 +872,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1) + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 0) + GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64)); + + if (!(options & SLJIT_ENTER_REG_ARG)) + local_size += SSIZE_OF(sw); + compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf; return SLJIT_SUCCESS; } - -static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) +static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to) { sljit_s32 i, tmp, base, offset; sljit_s32 local_size = compiler->local_size; @@ -854,7 +900,8 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) } offset = local_size; - FAIL_IF(push_inst(compiler, STACK_LOAD | S(0) | A(base) | IMM(offset + LR_SAVE_OFFSET))); + if (!is_return_to) + FAIL_IF(push_inst(compiler, STACK_LOAD | S(0) | A(base) | IMM(offset + LR_SAVE_OFFSET))); tmp = SLJIT_FS0 - compiler->fsaveds; for (i = SLJIT_FS0; i > tmp; i--) { @@ -867,11 +914,13 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) FAIL_IF(push_inst(compiler, LFD | FS(i) | A(base) | IMM(offset))); } - offset -= SSIZE_OF(sw); - FAIL_IF(push_inst(compiler, STACK_LOAD | S(TMP_ZERO) | A(base) | IMM(offset))); + if (!(compiler->options & SLJIT_ENTER_REG_ARG)) { + offset -= SSIZE_OF(sw); + FAIL_IF(push_inst(compiler, STACK_LOAD | S(TMP_ZERO) | A(base) | IMM(offset))); + } tmp = SLJIT_S0 - compiler->saveds; - for (i = SLJIT_S0; i > tmp; i--) { + for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) { offset -= SSIZE_OF(sw); FAIL_IF(push_inst(compiler, STACK_LOAD | S(i) | A(base) | IMM(offset))); } @@ -881,7 +930,8 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) FAIL_IF(push_inst(compiler, STACK_LOAD | S(i) | A(base) | IMM(offset))); } - push_inst(compiler, MTLR | S(0)); + if (!is_return_to) + push_inst(compiler, MTLR | S(0)); if (local_size > 0) return push_inst(compiler, ADDI | D(SLJIT_SP) | A(base) | IMM(local_size)); @@ -890,17 +940,40 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) return push_inst(compiler, OR | S(base) | A(SLJIT_SP) | B(base)); } +#undef STACK_STORE +#undef STACK_LOAD + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) { CHECK_ERROR(); CHECK(check_sljit_emit_return_void(compiler)); - FAIL_IF(emit_stack_frame_release(compiler)); + FAIL_IF(emit_stack_frame_release(compiler, 0)); return push_inst(compiler, BLR); } -#undef STACK_STORE -#undef STACK_LOAD +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG)); + src = TMP_CALL_REG; + srcw = 0; + } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src))); + src = TMP_CALL_REG; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 1)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} /* --------------------------------------------------------------------- */ /* Operators */ @@ -1066,7 +1139,6 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag { sljit_ins inst; sljit_s32 offs_reg; - sljit_sw high_short; /* Should work when (arg & REG_MASK) == 0. */ SLJIT_ASSERT(A(0) == 0); @@ -1077,11 +1149,7 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag offs_reg = OFFS_REG(arg); if (argw != 0) { -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(arg)) | A(tmp_reg) | ((sljit_ins)argw << 11) | ((31 - (sljit_ins)argw) << 1))); -#else - FAIL_IF(push_inst(compiler, RLDI(tmp_reg, OFFS_REG(arg), argw, 63 - argw, 1))); -#endif + FAIL_IF(push_inst(compiler, SLWI_W(argw) | S(OFFS_REG(arg)) | A(tmp_reg))); offs_reg = tmp_reg; } @@ -1089,7 +1157,7 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) SLJIT_ASSERT(!(inst & INT_ALIGNED)); -#endif +#endif /* SLJIT_CONFIG_PPC_64 */ return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(offs_reg)); } @@ -1104,36 +1172,24 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg)); } -#endif +#endif /* SLJIT_CONFIG_PPC_64 */ if (argw <= SIMM_MAX && argw >= SIMM_MIN) return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | IMM(argw)); #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) if (argw <= 0x7fff7fffl && argw >= -0x80000000l) { -#endif - - high_short = (sljit_s32)(argw + ((argw & 0x8000) << 1)) & ~0xffff; - -#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - SLJIT_ASSERT(high_short && high_short <= 0x7fffffffl && high_short >= -0x80000000l); -#else - SLJIT_ASSERT(high_short); -#endif - - FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM(high_short >> 16))); +#endif /* SLJIT_CONFIG_PPC_64 */ + FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM((argw + 0x8000) >> 16))); return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_reg) | IMM(argw)); - #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) } - /* The rest is PPC-64 only. */ - FAIL_IF(load_immediate(compiler, tmp_reg, argw)); inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg)); -#endif +#endif /* SLJIT_CONFIG_PPC_64 */ } static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 input_flags, @@ -1273,11 +1329,7 @@ static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, if (srcw == 0) return push_inst(compiler, DCBT | A(src & REG_MASK) | B(OFFS_REG(src))); -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(src)) | A(TMP_REG1) | ((sljit_ins)srcw << 11) | ((31 - (sljit_ins)srcw) << 1))); -#else - FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(src), srcw, 63 - srcw, 1))); -#endif + FAIL_IF(push_inst(compiler, SLWI_W(srcw) | S(OFFS_REG(src)) | A(TMP_REG1))); return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1)); } @@ -1362,10 +1414,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile return emit_op(compiler, SLJIT_NOT, flags, dst, dstw, TMP_REG1, 0, src, srcw); case SLJIT_CLZ: + case SLJIT_CTZ: #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - return emit_op(compiler, SLJIT_CLZ, flags | (!(op_flags & SLJIT_32) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw); + return emit_op(compiler, op, flags | (!(op_flags & SLJIT_32) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw); #else - return emit_op(compiler, SLJIT_CLZ, flags, dst, dstw, TMP_REG1, 0, src, srcw); + return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); #endif } @@ -1626,7 +1679,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); } } - if (GET_OPCODE(op) != SLJIT_AND) { + if (!HAS_FLAGS(op) && GET_OPCODE(op) != SLJIT_AND) { /* Unlike or and xor, the and resets unwanted bits as well. */ if (TEST_UI_IMM(src2, src2w)) { compiler->imm = (sljit_ins)src2w; @@ -1640,8 +1693,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile return emit_op(compiler, GET_OPCODE(op), flags, dst, dstw, src1, src1w, src2, src2w); case SLJIT_SHL: + case SLJIT_MSHL: case SLJIT_LSHR: + case SLJIT_MLSHR: case SLJIT_ASHR: + case SLJIT_MASHR: + case SLJIT_ROTL: + case SLJIT_ROTR: #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) if (op & SLJIT_32) flags |= ALT_FORM2; @@ -1663,10 +1721,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w); } @@ -1674,6 +1729,102 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil #undef TEST_SUB_FORM2 #undef TEST_SUB_FORM3 +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 is_right; +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + sljit_s32 inp_flags = ((op & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA; + sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64; +#else /* !SLJIT_CONFIG_PPC_64 */ + sljit_s32 inp_flags = WORD_DATA | LOAD_DATA; + sljit_sw bit_length = 32; +#endif /* SLJIT_CONFIG_PPC_64 */ + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + + is_right = (GET_OPCODE(op) == SLJIT_LSHR || GET_OPCODE(op) == SLJIT_MLSHR); + + if (src_dst == src1) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, (is_right ? SLJIT_ROTR : SLJIT_ROTL) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w); + } + + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (src2 & SLJIT_IMM) { + src2w &= bit_length - 1; + + if (src2w == 0) + return SLJIT_SUCCESS; + } else if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG2, src2, src2w, TMP_REG2)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG1, src1, src1w, TMP_REG1)); + src1 = TMP_REG1; + } else if (src1 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (!(op & SLJIT_32)) { + if (is_right) { + FAIL_IF(push_inst(compiler, SRDI(src2w) | S(src_dst) | A(src_dst))); + return push_inst(compiler, RLDIMI | S(src1) | A(src_dst) | RLDI_SH(64 - src2w) | RLDI_MB(0)); + } + + FAIL_IF(push_inst(compiler, SLDI(src2w) | S(src_dst) | A(src_dst))); + /* Computes SRDI(64 - src2w). */ + FAIL_IF(push_inst(compiler, RLDICL | S(src1) | A(TMP_REG1) | RLDI_SH(src2w) | RLDI_MB(64 - src2w))); + return push_inst(compiler, OR | S(src_dst) | A(src_dst) | B(TMP_REG1)); + } +#endif /* SLJIT_CONFIG_PPC_64 */ + + if (is_right) { + FAIL_IF(push_inst(compiler, SRWI(src2w) | S(src_dst) | A(src_dst))); + return push_inst(compiler, RLWIMI | S(src1) | A(src_dst) | RLWI_SH(32 - src2w) | RLWI_MBE(0, src2w - 1)); + } + + FAIL_IF(push_inst(compiler, SLWI(src2w) | S(src_dst) | A(src_dst))); + return push_inst(compiler, RLWIMI | S(src1) | A(src_dst) | RLWI_SH(src2w) | RLWI_MBE(32 - src2w, 31)); + } + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (!(op & SLJIT_32)) { + if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x3f)); + src2 = TMP_REG2; + } + + FAIL_IF(push_inst(compiler, (is_right ? SRD : SLD) | S(src_dst) | A(src_dst) | B(src2))); + FAIL_IF(push_inst(compiler, (is_right ? SLDI(1) : SRDI(1)) | S(src1) | A(TMP_REG1))); + FAIL_IF(push_inst(compiler, XORI | S(src2) | A(TMP_REG2) | 0x3f)); + FAIL_IF(push_inst(compiler, (is_right ? SLD : SRD) | S(TMP_REG1) | A(TMP_REG1) | B(TMP_REG2))); + return push_inst(compiler, OR | S(src_dst) | A(src_dst) | B(TMP_REG1)); + } +#endif /* SLJIT_CONFIG_PPC_64 */ + + if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR) { + FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f)); + src2 = TMP_REG2; + } + + FAIL_IF(push_inst(compiler, (is_right ? SRW : SLW) | S(src_dst) | A(src_dst) | B(src2))); + FAIL_IF(push_inst(compiler, (is_right ? SLWI(1) : SRWI(1)) | S(src1) | A(TMP_REG1))); + FAIL_IF(push_inst(compiler, XORI | S(src2) | A(TMP_REG2) | 0x1f)); + FAIL_IF(push_inst(compiler, (is_right ? SLW : SRW) | S(TMP_REG1) | A(TMP_REG1) | B(TMP_REG2))); + return push_inst(compiler, OR | S(src_dst) | A(src_dst) | B(TMP_REG1)); +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -1686,7 +1837,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp if (FAST_IS_REG(src)) FAIL_IF(push_inst(compiler, MTLR | S(src))); else { - FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw, TMP_REG2)); FAIL_IF(push_inst(compiler, MTLR | S(TMP_REG2))); } @@ -1782,11 +1933,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp if (dst & OFFS_REG_MASK) { dstw &= 0x3; if (dstw) { -#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) - FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(dst)) | A(TMP_REG1) | ((sljit_ins)dstw << 11) | ((31 - (sljit_ins)dstw) << 1))); -#else - FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(dst), dstw, 63 - dstw, 1))); -#endif + FAIL_IF(push_inst(compiler, SLWI_W(dstw) | S(OFFS_REG(dst)) | A(TMP_REG1))); dstw = TMP_REG1; } else @@ -1818,6 +1965,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp if (src & SLJIT_IMM) { if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) srcw = (sljit_s32)srcw; + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); src = TMP_REG1; } @@ -1863,7 +2011,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp The double precision format has exactly 53 bit precision, so the lower 32 bit represents the lower 32 bit of such value. The result of xor 2^31 is the same as adding 0x80000000 to the input, which shifts it into the 0 - 0xffffffff range. To get the converted floating - point value, we need to substract 2^53 + 2^31 from the constructed value. */ + point value, we need to subtract 2^53 + 2^31 from the constructed value. */ FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(0) | 0x4330)); if (invert_sign) FAIL_IF(push_inst(compiler, XORIS | S(src) | A(TMP_REG1) | 0x8000)); @@ -1899,7 +2047,21 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile src2 = TMP_FREG2; } - return push_inst(compiler, FCMPU | CRD(4) | FA(src1) | FB(src2)); + FAIL_IF(push_inst(compiler, FCMPU | CRD(4) | FA(src1) | FB(src2))); + + switch (GET_FLAG_TYPE(op)) { + case SLJIT_UNORDERED_OR_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + return push_inst(compiler, CROR | ((4 + 2) << 21) | ((4 + 2) << 16) | ((4 + 3) << 11)); + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_ORDERED_GREATER_EQUAL: + return push_inst(compiler, CROR | ((4 + 0) << 21) | ((4 + 0) << 16) | ((4 + 3) << 11)); + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_ORDERED_LESS_EQUAL: + return push_inst(compiler, CROR | ((4 + 1) << 21) | ((4 + 1) << 16) | ((4 + 3) << 11)); + } + + return SLJIT_SUCCESS; } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, @@ -2076,38 +2238,50 @@ static sljit_ins get_bo_bi_flags(struct sljit_compiler *compiler, sljit_s32 type case SLJIT_SIG_LESS_EQUAL: return (4 << 21) | (1 << 16); - case SLJIT_LESS_F64: + case SLJIT_OVERFLOW: + return (12 << 21) | (3 << 16); + + case SLJIT_NOT_OVERFLOW: + return (4 << 21) | (3 << 16); + + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: + case SLJIT_UNORDERED_OR_LESS: return (12 << 21) | ((4 + 0) << 16); - case SLJIT_GREATER_EQUAL_F64: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: return (4 << 21) | ((4 + 0) << 16); - case SLJIT_GREATER_F64: + case SLJIT_F_GREATER: + case SLJIT_ORDERED_GREATER: + case SLJIT_UNORDERED_OR_GREATER: return (12 << 21) | ((4 + 1) << 16); - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: return (4 << 21) | ((4 + 1) << 16); - case SLJIT_OVERFLOW: - return (12 << 21) | (3 << 16); - - case SLJIT_NOT_OVERFLOW: - return (4 << 21) | (3 << 16); - - case SLJIT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: return (12 << 21) | ((4 + 2) << 16); - case SLJIT_NOT_EQUAL_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: return (4 << 21) | ((4 + 2) << 16); - case SLJIT_UNORDERED_F64: + case SLJIT_UNORDERED: return (12 << 21) | ((4 + 3) << 16); - case SLJIT_ORDERED_F64: + case SLJIT_ORDERED: return (4 << 21) | ((4 + 3) << 16); default: - SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL); + SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_REG_ARG); return (20 << 21); } } @@ -2154,19 +2328,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL)); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL)); #endif if (type & SLJIT_CALL_RETURN) { - PTR_FAIL_IF(emit_stack_frame_release(compiler)); + PTR_FAIL_IF(emit_stack_frame_release(compiler, 0)); type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, type); } @@ -2177,7 +2348,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi CHECK_ERROR(); CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); - ADJUST_LOCAL_OFFSET(src, srcw); if (FAST_IS_REG(src)) { #if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL) @@ -2204,9 +2374,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi FAIL_IF(emit_const(compiler, TMP_CALL_REG, 0)); src_r = TMP_CALL_REG; - } - else { - FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw)); + } else { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG)); src_r = TMP_CALL_REG; } @@ -2225,29 +2395,26 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi if (src & SLJIT_MEM) { ADJUST_LOCAL_OFFSET(src, srcw); - FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw)); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG)); src = TMP_CALL_REG; } if (type & SLJIT_CALL_RETURN) { - if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) { + if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src))); src = TMP_CALL_REG; } - FAIL_IF(emit_stack_frame_release(compiler)); + FAIL_IF(emit_stack_frame_release(compiler, 0)); type = SLJIT_JUMP; } #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) - FAIL_IF(call_with_args(compiler, arg_types, &src)); -#endif - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + FAIL_IF(call_with_args(compiler, arg_types, &src)); #endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_ijump(compiler, type, src, srcw); } @@ -2279,7 +2446,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co bit = 0; from_xer = 0; - switch (type & 0xff) { + switch (type) { case SLJIT_LESS: case SLJIT_SIG_LESS: break; @@ -2332,38 +2499,50 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co invert = (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD) != 0; break; - case SLJIT_LESS_F64: + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: + case SLJIT_UNORDERED_OR_LESS: bit = 4 + 0; break; - case SLJIT_GREATER_EQUAL_F64: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: bit = 4 + 0; invert = 1; break; - case SLJIT_GREATER_F64: + case SLJIT_F_GREATER: + case SLJIT_ORDERED_GREATER: + case SLJIT_UNORDERED_OR_GREATER: bit = 4 + 1; break; - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: bit = 4 + 1; invert = 1; break; - case SLJIT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: bit = 4 + 2; break; - case SLJIT_NOT_EQUAL_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: bit = 4 + 2; invert = 1; break; - case SLJIT_UNORDERED_F64: + case SLJIT_UNORDERED: bit = 4 + 3; break; - case SLJIT_ORDERED_F64: + case SLJIT_ORDERED: bit = 4 + 3; invert = 1; break; @@ -2374,7 +2553,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co } FAIL_IF(push_inst(compiler, (from_xer ? MFXER : MFCR) | D(reg))); - FAIL_IF(push_inst(compiler, RLWINM | S(reg) | A(reg) | ((1 + bit) << 11) | (31 << 6) | (31 << 1))); + /* Simplified mnemonics: extrwi. */ + FAIL_IF(push_inst(compiler, RLWINM | S(reg) | A(reg) | RLWI_SH(1 + bit) | RLWI_MBE(31, 31))); if (invert) FAIL_IF(push_inst(compiler, XORI | S(reg) | A(reg) | 0x1)); @@ -2385,10 +2565,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co return emit_op_mem(compiler, input_flags, reg, dst, dstw, TMP_REG1); } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); + if (dst & SLJIT_MEM) return sljit_emit_op2(compiler, saved_op, dst, saved_dstw, TMP_REG1, 0, TMP_REG2, 0); return sljit_emit_op2(compiler, saved_op, dst, 0, dst, 0, TMP_REG2, 0); @@ -2404,16 +2582,95 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);; } +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + +#define EMIT_MEM_LOAD_IMM(inst, mem, memw) \ + ((sljit_s16)(memw) > SIMM_MAX - SSIZE_OF(sw)) + +#else /* !SLJIT_CONFIG_PPC_32 */ + +#define EMIT_MEM_LOAD_IMM(inst, mem, memw) \ + ((((inst) & INT_ALIGNED) && ((memw) & 0x3) != 0) \ + || ((sljit_s16)(memw) > SIMM_MAX - SSIZE_OF(sw)) \ + || ((memw) > 0x7fff7fffl || (memw) < -0x80000000l)) \ + +#endif /* SLJIT_CONFIG_PPC_32 */ + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 reg, sljit_s32 mem, sljit_sw memw) { - sljit_s32 mem_flags; sljit_ins inst; CHECK_ERROR(); CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + if (!(reg & REG_PAIR_MASK)) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + + ADJUST_LOCAL_OFFSET(mem, memw); + + inst = data_transfer_insts[WORD_DATA | ((type & SLJIT_MEM_STORE) ? 0 : LOAD_DATA)]; + + if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { + memw &= 0x3; + + if (memw != 0) { + FAIL_IF(push_inst(compiler, SLWI_W(memw) | S(OFFS_REG(mem)) | A(TMP_REG1))); + FAIL_IF(push_inst(compiler, ADD | D(TMP_REG1) | A(TMP_REG1) | B(mem & REG_MASK))); + } else + FAIL_IF(push_inst(compiler, ADD | D(TMP_REG1) | A(mem & REG_MASK) | B(OFFS_REG(mem)))); + + mem = TMP_REG1; + memw = 0; + } else { + if (EMIT_MEM_LOAD_IMM(inst, mem, memw)) { + if ((mem & REG_MASK) != 0) { + SLJIT_SKIP_CHECKS(compiler); + FAIL_IF(sljit_emit_op2(compiler, SLJIT_ADD, TMP_REG1, 0, mem & REG_MASK, 0, SLJIT_IMM, memw)); + } else + FAIL_IF(load_immediate(compiler, TMP_REG1, memw)); + + memw = 0; + mem = TMP_REG1; + } else if (memw > SIMM_MAX || memw < SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG1) | A(mem & REG_MASK) | IMM((memw + 0x8000) >> 16))); + + memw &= 0xffff; + mem = TMP_REG1; + } else { + memw &= 0xffff; + mem &= REG_MASK; + } + } + + SLJIT_ASSERT((memw >= 0 && memw <= SIMM_MAX - SSIZE_OF(sw)) || (memw >= 0x8000 && memw <= 0xffff)); + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + inst &= (sljit_ins)~INT_ALIGNED; +#endif /* SLJIT_CONFIG_PPC_64 */ + + if (!(type & SLJIT_MEM_STORE) && mem == REG_PAIR_FIRST(reg)) { + FAIL_IF(push_inst(compiler, inst | D(REG_PAIR_SECOND(reg)) | A(mem) | IMM(memw + SSIZE_OF(sw)))); + return push_inst(compiler, inst | D(REG_PAIR_FIRST(reg)) | A(mem) | IMM(memw)); + } + + FAIL_IF(push_inst(compiler, inst | D(REG_PAIR_FIRST(reg)) | A(mem) | IMM(memw))); + return push_inst(compiler, inst | D(REG_PAIR_SECOND(reg)) | A(mem) | IMM(memw + SSIZE_OF(sw))); +} + +#undef EMIT_MEM_LOAD_IMM + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 mem_flags; + sljit_ins inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw)); + if (type & SLJIT_MEM_POST) return SLJIT_ERR_UNSUPPORTED; @@ -2500,7 +2757,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile return SLJIT_SUCCESS; } -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type, +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 freg, sljit_s32 mem, sljit_sw memw) { @@ -2508,7 +2765,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil sljit_ins inst; CHECK_ERROR(); - CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw)); + CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw)); if (type & SLJIT_MEM_POST) return SLJIT_ERR_UNSUPPORTED; @@ -2587,3 +2844,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj return put_label; } + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) +{ + sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset); +} diff --git a/thirdparty/pcre2/src/sljit/sljitNativeRISCV_32.c b/thirdparty/pcre2/src/sljit/sljitNativeRISCV_32.c new file mode 100644 index 0000000000..b38e6924c8 --- /dev/null +++ b/thirdparty/pcre2/src/sljit/sljitNativeRISCV_32.c @@ -0,0 +1,73 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_r, sljit_sw imm, sljit_s32 tmp_r) +{ + SLJIT_UNUSED_ARG(tmp_r); + SLJIT_ASSERT(dst_r != tmp_r); + + if (imm <= SIMM_MAX && imm >= SIMM_MIN) + return push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm)); + + if (imm & 0x800) + imm += 0x1000; + + FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff))); + + if ((imm & 0xfff) == 0) + return SLJIT_SUCCESS; + + return push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)); +} + +static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value, sljit_ins last_ins) +{ + if ((init_value & 0x800) != 0) + init_value += 0x1000; + + FAIL_IF(push_inst(compiler, LUI | RD(dst) | (sljit_ins)(init_value & ~0xfff))); + return push_inst(compiler, last_ins | RS1(dst) | IMM_I(init_value)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) +{ + sljit_ins *inst = (sljit_ins*)addr; + SLJIT_UNUSED_ARG(executable_offset); + + if ((new_target & 0x800) != 0) + new_target += 0x1000; + + SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 0); + + SLJIT_ASSERT((inst[0] & 0x7f) == LUI); + inst[0] = (inst[0] & 0xfff) | (sljit_ins)((sljit_sw)new_target & ~0xfff); + SLJIT_ASSERT((inst[1] & 0x707f) == ADDI || (inst[1] & 0x707f) == JALR); + inst[1] = (inst[1] & 0xfffff) | IMM_I(new_target); + + SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 1); + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 5); +} diff --git a/thirdparty/pcre2/src/sljit/sljitNativeRISCV_64.c b/thirdparty/pcre2/src/sljit/sljitNativeRISCV_64.c new file mode 100644 index 0000000000..32cec7848d --- /dev/null +++ b/thirdparty/pcre2/src/sljit/sljitNativeRISCV_64.c @@ -0,0 +1,183 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_r, sljit_sw imm, sljit_s32 tmp_r) +{ + sljit_sw high; + + SLJIT_ASSERT(dst_r != tmp_r); + + if (imm <= SIMM_MAX && imm >= SIMM_MIN) + return push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm)); + + if (imm <= 0x7fffffffl && imm >= S32_MIN) { + if (imm > S32_MAX) { + SLJIT_ASSERT((imm & 0x800) != 0); + FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u)); + return push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)); + } + + if ((imm & 0x800) != 0) + imm += 0x1000; + + FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff))); + + if ((imm & 0xfff) == 0) + return SLJIT_SUCCESS; + + return push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)); + } + + /* Trailing zeroes could be used to produce shifted immediates. */ + + if (imm <= 0x7ffffffffffl && imm >= -0x80000000000l) { + high = imm >> 12; + + if (imm & 0x800) + high = ~high; + + if (high > S32_MAX) { + SLJIT_ASSERT((high & 0x800) != 0); + FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u)); + FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(high))); + } else { + if ((high & 0x800) != 0) + high += 0x1000; + + FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(high & ~0xfff))); + + if ((high & 0xfff) != 0) + FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(high))); + } + + FAIL_IF(push_inst(compiler, SLLI | RD(dst_r) | RS1(dst_r) | IMM_I(12))); + + if ((imm & 0xfff) != 0) + return push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)); + + return SLJIT_SUCCESS; + } + + high = imm >> 32; + imm = (sljit_s32)imm; + + if ((imm & 0x80000000l) != 0) + high = ~high; + + if (high <= 0x7ffff && high >= -0x80000) { + FAIL_IF(push_inst(compiler, LUI | RD(tmp_r) | (sljit_ins)(high << 12))); + high = 0x1000; + } else { + if ((high & 0x800) != 0) + high += 0x1000; + + FAIL_IF(push_inst(compiler, LUI | RD(tmp_r) | (sljit_ins)(high & ~0xfff))); + high &= 0xfff; + } + + if (imm <= SIMM_MAX && imm >= SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm))); + imm = 0; + } else if (imm > S32_MAX) { + SLJIT_ASSERT((imm & 0x800) != 0); + + FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u)); + imm = 0x1000 | (imm & 0xfff); + } else { + if ((imm & 0x800) != 0) + imm += 0x1000; + + FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff))); + imm &= 0xfff; + } + + if ((high & 0xfff) != 0) + FAIL_IF(push_inst(compiler, ADDI | RD(tmp_r) | RS1(tmp_r) | IMM_I(high))); + + if (imm & 0x1000) + FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm))); + else if (imm != 0) + FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm))); + + FAIL_IF(push_inst(compiler, SLLI | RD(tmp_r) | RS1(tmp_r) | IMM_I((high & 0x1000) ? 20 : 32))); + return push_inst(compiler, XOR | RD(dst_r) | RS1(dst_r) | RS2(tmp_r)); +} + +static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value, sljit_ins last_ins) +{ + sljit_sw high; + + if ((init_value & 0x800) != 0) + init_value += 0x1000; + + high = init_value >> 32; + + if ((init_value & 0x80000000l) != 0) + high = ~high; + + if ((high & 0x800) != 0) + high += 0x1000; + + FAIL_IF(push_inst(compiler, LUI | RD(TMP_REG3) | (sljit_ins)(high & ~0xfff))); + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(high))); + FAIL_IF(push_inst(compiler, LUI | RD(dst) | (sljit_ins)(init_value & ~0xfff))); + FAIL_IF(push_inst(compiler, SLLI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(32))); + FAIL_IF(push_inst(compiler, XOR | RD(dst) | RS1(dst) | RS2(TMP_REG3))); + return push_inst(compiler, last_ins | RS1(dst) | IMM_I(init_value)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) +{ + sljit_ins *inst = (sljit_ins*)addr; + sljit_sw high; + SLJIT_UNUSED_ARG(executable_offset); + + if ((new_target & 0x800) != 0) + new_target += 0x1000; + + high = (sljit_sw)new_target >> 32; + + if ((new_target & 0x80000000l) != 0) + high = ~high; + + if ((high & 0x800) != 0) + high += 0x1000; + + SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 0); + + SLJIT_ASSERT((inst[0] & 0x7f) == LUI); + inst[0] = (inst[0] & 0xfff) | (sljit_ins)(high & ~0xfff); + SLJIT_ASSERT((inst[1] & 0x707f) == ADDI); + inst[1] = (inst[1] & 0xfffff) | IMM_I(high); + SLJIT_ASSERT((inst[2] & 0x7f) == LUI); + inst[2] = (inst[2] & 0xfff) | (sljit_ins)((sljit_sw)new_target & ~0xfff); + SLJIT_ASSERT((inst[5] & 0x707f) == ADDI || (inst[5] & 0x707f) == JALR); + inst[5] = (inst[5] & 0xfffff) | IMM_I(new_target); + SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 1); + + inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); + SLJIT_CACHE_FLUSH(inst, inst + 5); +} diff --git a/thirdparty/pcre2/src/sljit/sljitNativeRISCV_common.c b/thirdparty/pcre2/src/sljit/sljitNativeRISCV_common.c new file mode 100644 index 0000000000..58a48c649c --- /dev/null +++ b/thirdparty/pcre2/src/sljit/sljitNativeRISCV_common.c @@ -0,0 +1,2762 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) +{ +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + return "RISC-V-32" SLJIT_CPUINFO; +#else /* !SLJIT_CONFIG_RISCV_32 */ + return "RISC-V-64" SLJIT_CPUINFO; +#endif /* SLJIT_CONFIG_RISCV_32 */ +} + +/* Length of an instruction word + Both for riscv-32 and riscv-64 */ +typedef sljit_u32 sljit_ins; + +#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) +#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) +#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4) +#define TMP_ZERO 0 + +/* Flags are kept in volatile registers. */ +#define EQUAL_FLAG (SLJIT_NUMBER_OF_REGISTERS + 5) +#define RETURN_ADDR_REG TMP_REG2 +#define OTHER_FLAG (SLJIT_NUMBER_OF_REGISTERS + 6) + +#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) +#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) + +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 7] = { + 0, 10, 11, 12, 13, 14, 15, 16, 17, 29, 30, 31, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 9, 8, 2, 6, 1, 7, 5, 28 +}; + +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { + 0, 10, 11, 12, 13, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 28, 29, 30, 31, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 9, 8, 0, 1, +}; + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +#define RD(rd) ((sljit_ins)reg_map[rd] << 7) +#define RS1(rs1) ((sljit_ins)reg_map[rs1] << 15) +#define RS2(rs2) ((sljit_ins)reg_map[rs2] << 20) +#define FRD(rd) ((sljit_ins)freg_map[rd] << 7) +#define FRS1(rs1) ((sljit_ins)freg_map[rs1] << 15) +#define FRS2(rs2) ((sljit_ins)freg_map[rs2] << 20) +#define IMM_I(imm) ((sljit_ins)(imm) << 20) +#define IMM_S(imm) ((((sljit_ins)(imm) & 0xfe0) << 20) | (((sljit_ins)(imm) & 0x1f) << 7)) + +/* Represents funct(i) parts of the instructions. */ +#define OPC(o) ((sljit_ins)(o)) +#define F3(f) ((sljit_ins)(f) << 12) +#define F12(f) ((sljit_ins)(f) << 20) +#define F7(f) ((sljit_ins)(f) << 25) + +#define ADD (F7(0x0) | F3(0x0) | OPC(0x33)) +#define ADDI (F3(0x0) | OPC(0x13)) +#define AND (F7(0x0) | F3(0x7) | OPC(0x33)) +#define ANDI (F3(0x7) | OPC(0x13)) +#define AUIPC (OPC(0x17)) +#define BEQ (F3(0x0) | OPC(0x63)) +#define BNE (F3(0x1) | OPC(0x63)) +#define BLT (F3(0x4) | OPC(0x63)) +#define BGE (F3(0x5) | OPC(0x63)) +#define BLTU (F3(0x6) | OPC(0x63)) +#define BGEU (F3(0x7) | OPC(0x63)) +#define DIV (F7(0x1) | F3(0x4) | OPC(0x33)) +#define DIVU (F7(0x1) | F3(0x5) | OPC(0x33)) +#define EBREAK (F12(0x1) | F3(0x0) | OPC(0x73)) +#define FADD_S (F7(0x0) | F3(0x7) | OPC(0x53)) +#define FDIV_S (F7(0xc) | F3(0x7) | OPC(0x53)) +#define FEQ_S (F7(0x50) | F3(0x2) | OPC(0x53)) +#define FLD (F3(0x3) | OPC(0x7)) +#define FLE_S (F7(0x50) | F3(0x0) | OPC(0x53)) +#define FLT_S (F7(0x50) | F3(0x1) | OPC(0x53)) +#define FSD (F3(0x3) | OPC(0x27)) +/* These conversion opcodes are partly defined. */ +#define FCVT_S_D (F7(0x20) | OPC(0x53)) +#define FCVT_S_W (F7(0x68) | OPC(0x53)) +#define FCVT_W_S (F7(0x60) | F3(0x1) | OPC(0x53)) +#define FMUL_S (F7(0x8) | F3(0x7) | OPC(0x53)) +#define FSGNJ_S (F7(0x10) | F3(0x0) | OPC(0x53)) +#define FSGNJN_S (F7(0x10) | F3(0x1) | OPC(0x53)) +#define FSGNJX_S (F7(0x10) | F3(0x2) | OPC(0x53)) +#define FSUB_S (F7(0x4) | F3(0x7) | OPC(0x53)) +#define JAL (OPC(0x6f)) +#define JALR (F3(0x0) | OPC(0x67)) +#define LD (F3(0x3) | OPC(0x3)) +#define LUI (OPC(0x37)) +#define LW (F3(0x2) | OPC(0x3)) +#define MUL (F7(0x1) | F3(0x0) | OPC(0x33)) +#define MULH (F7(0x1) | F3(0x1) | OPC(0x33)) +#define MULHU (F7(0x1) | F3(0x3) | OPC(0x33)) +#define OR (F7(0x0) | F3(0x6) | OPC(0x33)) +#define ORI (F3(0x6) | OPC(0x13)) +#define REM (F7(0x1) | F3(0x6) | OPC(0x33)) +#define REMU (F7(0x1) | F3(0x7) | OPC(0x33)) +#define SD (F3(0x3) | OPC(0x23)) +#define SLL (F7(0x0) | F3(0x1) | OPC(0x33)) +#define SLLI (IMM_I(0x0) | F3(0x1) | OPC(0x13)) +#define SLT (F7(0x0) | F3(0x2) | OPC(0x33)) +#define SLTI (F3(0x2) | OPC(0x13)) +#define SLTU (F7(0x0) | F3(0x3) | OPC(0x33)) +#define SLTUI (F3(0x3) | OPC(0x13)) +#define SRL (F7(0x0) | F3(0x5) | OPC(0x33)) +#define SRLI (IMM_I(0x0) | F3(0x5) | OPC(0x13)) +#define SRA (F7(0x20) | F3(0x5) | OPC(0x33)) +#define SRAI (IMM_I(0x400) | F3(0x5) | OPC(0x13)) +#define SUB (F7(0x20) | F3(0x0) | OPC(0x33)) +#define SW (F3(0x2) | OPC(0x23)) +#define XOR (F7(0x0) | F3(0x4) | OPC(0x33)) +#define XORI (F3(0x4) | OPC(0x13)) + +#define SIMM_MAX (0x7ff) +#define SIMM_MIN (-0x800) +#define BRANCH_MAX (0xfff) +#define BRANCH_MIN (-0x1000) +#define JUMP_MAX (0xfffff) +#define JUMP_MIN (-0x100000) + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) +#define S32_MAX (0x7ffff7ffl) +#define S32_MIN (-0x80000000l) +#define S44_MAX (0x7fffffff7ffl) +#define S52_MAX (0x7ffffffffffffl) +#endif + +static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins) +{ + sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); + FAIL_IF(!ptr); + *ptr = ins; + compiler->size++; + return SLJIT_SUCCESS; +} + +static sljit_s32 push_imm_s_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_sw imm) +{ + return push_inst(compiler, ins | IMM_S(imm)); +} + +static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code, sljit_sw executable_offset) +{ + sljit_sw diff; + sljit_uw target_addr; + sljit_ins *inst; + + inst = (sljit_ins *)jump->addr; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + goto exit; + + if (jump->flags & JUMP_ADDR) + target_addr = jump->u.target; + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset; + } + + diff = (sljit_sw)target_addr - (sljit_sw)inst - executable_offset; + + if (jump->flags & IS_COND) { + inst--; + diff += SSIZE_OF(ins); + + if (diff >= BRANCH_MIN && diff <= BRANCH_MAX) { + jump->flags |= PATCH_B; + inst[0] = (inst[0] & 0x1fff07f) ^ 0x1000; + jump->addr = (sljit_uw)inst; + return inst; + } + + inst++; + diff -= SSIZE_OF(ins); + } + + if (diff >= JUMP_MIN && diff <= JUMP_MAX) { + if (jump->flags & IS_COND) { +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + inst[-1] -= (sljit_ins)(1 * sizeof(sljit_ins)) << 7; +#else + inst[-1] -= (sljit_ins)(5 * sizeof(sljit_ins)) << 7; +#endif + } + + jump->flags |= PATCH_J; + return inst; + } + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + if (diff >= S32_MIN && diff <= S32_MAX) { + if (jump->flags & IS_COND) + inst[-1] -= (sljit_ins)(4 * sizeof(sljit_ins)) << 7; + + jump->flags |= PATCH_REL32; + inst[1] = inst[0]; + return inst + 1; + } + + if (target_addr <= (sljit_uw)S32_MAX) { + if (jump->flags & IS_COND) + inst[-1] -= (sljit_ins)(4 * sizeof(sljit_ins)) << 7; + + jump->flags |= PATCH_ABS32; + inst[1] = inst[0]; + return inst + 1; + } + + if (target_addr <= S44_MAX) { + if (jump->flags & IS_COND) + inst[-1] -= (sljit_ins)(2 * sizeof(sljit_ins)) << 7; + + jump->flags |= PATCH_ABS44; + inst[3] = inst[0]; + return inst + 3; + } + + if (target_addr <= S52_MAX) { + if (jump->flags & IS_COND) + inst[-1] -= (sljit_ins)(1 * sizeof(sljit_ins)) << 7; + + jump->flags |= PATCH_ABS52; + inst[4] = inst[0]; + return inst + 4; + } +#endif + +exit: +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + inst[1] = inst[0]; + return inst + 1; +#else + inst[5] = inst[0]; + return inst + 5; +#endif +} + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + +static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label) +{ + if (max_label <= (sljit_uw)S32_MAX) { + put_label->flags = PATCH_ABS32; + return 1; + } + + if (max_label <= S44_MAX) { + put_label->flags = PATCH_ABS44; + return 3; + } + + if (max_label <= S52_MAX) { + put_label->flags = PATCH_ABS52; + return 4; + } + + put_label->flags = 0; + return 5; +} + +#endif /* SLJIT_CONFIG_RISCV_64 */ + +static SLJIT_INLINE void load_addr_to_reg(void *dst, sljit_u32 reg) +{ + struct sljit_jump *jump = NULL; + struct sljit_put_label *put_label; + sljit_uw flags; + sljit_ins *inst; +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + sljit_sw high; +#endif + sljit_uw addr; + + if (reg != 0) { + jump = (struct sljit_jump*)dst; + flags = jump->flags; + inst = (sljit_ins*)jump->addr; + addr = (flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + } else { + put_label = (struct sljit_put_label*)dst; +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + flags = put_label->flags; +#endif + inst = (sljit_ins*)put_label->addr; + addr = put_label->label->addr; + reg = *inst; + } + + if ((addr & 0x800) != 0) + addr += 0x1000; + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + inst[0] = LUI | RD(reg) | (sljit_ins)((sljit_sw)addr & ~0xfff); +#else /* !SLJIT_CONFIG_RISCV_32 */ + + if (flags & PATCH_ABS32) { + SLJIT_ASSERT(addr <= S32_MAX); + inst[0] = LUI | RD(reg) | (sljit_ins)((sljit_sw)addr & ~0xfff); + } else if (flags & PATCH_ABS44) { + high = (sljit_sw)addr >> 12; + SLJIT_ASSERT((sljit_uw)high <= 0x7fffffff); + + if (high > S32_MAX) { + SLJIT_ASSERT((high & 0x800) != 0); + inst[0] = LUI | RD(reg) | (sljit_ins)0x80000000u; + inst[1] = XORI | RD(reg) | RS1(reg) | IMM_I(high); + } else { + if ((high & 0x800) != 0) + high += 0x1000; + + inst[0] = LUI | RD(reg) | (sljit_ins)(high & ~0xfff); + inst[1] = ADDI | RD(reg) | RS1(reg) | IMM_I(high); + } + + inst[2] = SLLI | RD(reg) | RS1(reg) | IMM_I(12); + inst += 2; + } else { + high = (sljit_sw)addr >> 32; + + if ((addr & 0x80000000l) != 0) + high = ~high; + + if ((high & 0x800) != 0) + high += 0x1000; + + if (flags & PATCH_ABS52) { + SLJIT_ASSERT(addr <= S52_MAX); + inst[0] = LUI | RD(TMP_REG3) | (sljit_ins)(high << 12); + } else { + inst[0] = LUI | RD(TMP_REG3) | (sljit_ins)(high & ~0xfff); + inst[1] = ADDI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(high); + inst++; + } + + inst[1] = LUI | RD(reg) | (sljit_ins)((sljit_sw)addr & ~0xfff); + inst[2] = SLLI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I((flags & PATCH_ABS52) ? 20 : 32); + inst[3] = XOR | RD(reg) | RS1(reg) | RS2(TMP_REG3); + inst += 3; + } +#endif /* !SLJIT_CONFIG_RISCV_32 */ + + if (jump != NULL) { + SLJIT_ASSERT((inst[1] & 0x707f) == JALR); + inst[1] = (inst[1] & 0xfffff) | IMM_I(addr); + } else + inst[1] = ADDI | RD(reg) | RS1(reg) | IMM_I(addr); +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_ins *code; + sljit_ins *code_ptr; + sljit_ins *buf_ptr; + sljit_ins *buf_end; + sljit_uw word_count; + sljit_uw next_addr; + sljit_sw executable_offset; + sljit_uw addr; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + struct sljit_put_label *put_label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_generate_code(compiler)); + reverse_buf(compiler); + + code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins), compiler->exec_allocator_data); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + word_count = 0; + next_addr = 0; + executable_offset = SLJIT_EXEC_OFFSET(code); + + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + put_label = compiler->put_labels; + + do { + buf_ptr = (sljit_ins*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + *code_ptr = *buf_ptr++; + if (next_addr == word_count) { + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + SLJIT_ASSERT(!put_label || put_label->addr >= word_count); + + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + label->size = (sljit_uw)(code_ptr - code); + label = label->next; + } + if (jump && jump->addr == word_count) { +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + word_count += 1; +#else + word_count += 5; +#endif + jump->addr = (sljit_uw)code_ptr; + code_ptr = detect_jump_type(jump, code, executable_offset); + jump = jump->next; + } + if (const_ && const_->addr == word_count) { + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + if (put_label && put_label->addr == word_count) { + SLJIT_ASSERT(put_label->label); + put_label->addr = (sljit_uw)code_ptr; +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + code_ptr += 1; + word_count += 1; +#else + code_ptr += put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size)); + word_count += 5; +#endif + put_label = put_label->next; + } + next_addr = compute_next_addr(label, jump, const_, put_label); + } + code_ptr++; + word_count++; + } while (buf_ptr < buf_end); + + buf = buf->next; + } while (buf); + + if (label && label->size == word_count) { + label->addr = (sljit_uw)code_ptr; + label->size = (sljit_uw)(code_ptr - code); + label = label->next; + } + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + SLJIT_ASSERT(!put_label); + SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size); + + jump = compiler->jumps; + while (jump) { + do { + if (!(jump->flags & (PATCH_B | PATCH_J | PATCH_REL32))) { + load_addr_to_reg(jump, TMP_REG1); + break; + } + + addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + buf_ptr = (sljit_ins *)jump->addr; + addr -= (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset); + + if (jump->flags & PATCH_B) { + SLJIT_ASSERT((sljit_sw)addr >= BRANCH_MIN && (sljit_sw)addr <= BRANCH_MAX); + addr = ((addr & 0x800) >> 4) | ((addr & 0x1e) << 7) | ((addr & 0x7e0) << 20) | ((addr & 0x1000) << 19); + buf_ptr[0] |= (sljit_ins)addr; + break; + } + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + if (jump->flags & PATCH_REL32) { + SLJIT_ASSERT((sljit_sw)addr >= S32_MIN && (sljit_sw)addr <= S32_MAX); + + if ((addr & 0x800) != 0) + addr += 0x1000; + + buf_ptr[0] = AUIPC | RD(TMP_REG1) | (sljit_ins)((sljit_sw)addr & ~0xfff); + SLJIT_ASSERT((buf_ptr[1] & 0x707f) == JALR); + buf_ptr[1] |= IMM_I(addr); + break; + } +#endif + + SLJIT_ASSERT((sljit_sw)addr >= JUMP_MIN && (sljit_sw)addr <= JUMP_MAX); + addr = (addr & 0xff000) | ((addr & 0x800) << 9) | ((addr & 0x7fe) << 20) | ((addr & 0x100000) << 11); + buf_ptr[0] = JAL | RD((jump->flags & IS_CALL) ? RETURN_ADDR_REG : TMP_ZERO) | (sljit_ins)addr; + } while (0); + jump = jump->next; + } + + put_label = compiler->put_labels; + while (put_label) { + load_addr_to_reg(put_label, 0); + put_label = put_label->next; + } + + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_offset = executable_offset; + compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins); + + code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); + + SLJIT_CACHE_FLUSH(code, code_ptr); + SLJIT_UPDATE_WX_FLAGS(code, code_ptr, 1); + return code; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + switch (feature_type) { + case SLJIT_HAS_FPU: + case SLJIT_HAS_ZERO_REGISTER: + return 1; + default: + return 0; + } +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type) +{ + return (type >= SLJIT_ORDERED_EQUAL && type <= SLJIT_ORDERED_LESS_EQUAL); +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +/* Creates an index in data_transfer_insts array. */ +#define LOAD_DATA 0x01 +#define WORD_DATA 0x00 +#define BYTE_DATA 0x02 +#define HALF_DATA 0x04 +#define INT_DATA 0x06 +#define SIGNED_DATA 0x08 +/* Separates integer and floating point registers */ +#define GPR_REG 0x0f +#define DOUBLE_DATA 0x10 +#define SINGLE_DATA 0x12 + +#define MEM_MASK 0x1f + +#define ARG_TEST 0x00020 +#define ALT_KEEP_CACHE 0x00040 +#define CUMULATIVE_OP 0x00080 +#define IMM_OP 0x00100 +#define MOVE_OP 0x00200 +#define SRC2_IMM 0x00400 + +#define UNUSED_DEST 0x00800 +#define REG_DEST 0x01000 +#define REG1_SOURCE 0x02000 +#define REG2_SOURCE 0x04000 +#define SLOW_SRC1 0x08000 +#define SLOW_SRC2 0x10000 +#define SLOW_DEST 0x20000 + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) +#define STACK_STORE SW +#define STACK_LOAD LW +#else +#define STACK_STORE SD +#define STACK_LOAD LD +#endif + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) +#include "sljitNativeRISCV_32.c" +#else +#include "sljitNativeRISCV_64.c" +#endif + +#define STACK_MAX_DISTANCE (-SIMM_MIN) + +static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw); + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + sljit_s32 i, tmp, offset; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); + + CHECK_ERROR(); + CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1); +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) { + if ((local_size & SSIZE_OF(sw)) != 0) + local_size += SSIZE_OF(sw); + local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64)); + } +#else + local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64)); +#endif + local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf; + compiler->local_size = local_size; + + if (local_size <= STACK_MAX_DISTANCE) { + /* Frequent case. */ + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(-local_size))); + offset = local_size - SSIZE_OF(sw); + local_size = 0; + } else { + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(STACK_MAX_DISTANCE))); + local_size -= STACK_MAX_DISTANCE; + + if (local_size > STACK_MAX_DISTANCE) + FAIL_IF(load_immediate(compiler, TMP_REG1, local_size, TMP_REG3)); + offset = STACK_MAX_DISTANCE - SSIZE_OF(sw); + } + + FAIL_IF(push_imm_s_inst(compiler, STACK_STORE | RS1(SLJIT_SP) | RS2(RETURN_ADDR_REG), offset)); + + tmp = SLJIT_S0 - saveds; + for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) { + offset -= SSIZE_OF(sw); + FAIL_IF(push_imm_s_inst(compiler, STACK_STORE | RS1(SLJIT_SP) | RS2(i), offset)); + } + + for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { + offset -= SSIZE_OF(sw); + FAIL_IF(push_imm_s_inst(compiler, STACK_STORE | RS1(SLJIT_SP) | RS2(i), offset)); + } + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + /* This alignment is valid because offset is not used after storing FPU regs. */ + if ((offset & SSIZE_OF(sw)) != 0) + offset -= SSIZE_OF(sw); +#endif + + tmp = SLJIT_FS0 - fsaveds; + for (i = SLJIT_FS0; i > tmp; i--) { + offset -= SSIZE_OF(f64); + FAIL_IF(push_imm_s_inst(compiler, FSD | RS1(SLJIT_SP) | FRS2(i), offset)); + } + + for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) { + offset -= SSIZE_OF(f64); + FAIL_IF(push_imm_s_inst(compiler, FSD | RS1(SLJIT_SP) | FRS2(i), offset)); + } + + if (local_size > STACK_MAX_DISTANCE) + FAIL_IF(push_inst(compiler, SUB | RD(SLJIT_SP) | RS1(SLJIT_SP) | RS2(TMP_REG1))); + else if (local_size > 0) + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(-local_size))); + + if (options & SLJIT_ENTER_REG_ARG) + return SLJIT_SUCCESS; + + arg_types >>= SLJIT_ARG_SHIFT; + saved_arg_count = 0; + tmp = SLJIT_R0; + + while (arg_types > 0) { + if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) { + if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) { + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_S0 - saved_arg_count) | RS1(tmp) | IMM_I(0))); + saved_arg_count++; + } + tmp++; + } + + arg_types >>= SLJIT_ARG_SHIFT; + } + + return SLJIT_SUCCESS; +} + +#undef STACK_MAX_DISTANCE + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, + sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, + sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) +{ + CHECK_ERROR(); + CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); + set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + + local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1); +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + if (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) { + if ((local_size & SSIZE_OF(sw)) != 0) + local_size += SSIZE_OF(sw); + local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64)); + } +#else + local_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64)); +#endif + compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf; + + return SLJIT_SUCCESS; +} + +#define STACK_MAX_DISTANCE (-SIMM_MIN - 16) + +static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to) +{ + sljit_s32 i, tmp, offset; + sljit_s32 local_size = compiler->local_size; + + if (local_size > STACK_MAX_DISTANCE) { + local_size -= STACK_MAX_DISTANCE; + + if (local_size > STACK_MAX_DISTANCE) { + FAIL_IF(load_immediate(compiler, TMP_REG2, local_size, TMP_REG3)); + FAIL_IF(push_inst(compiler, ADD | RD(SLJIT_SP) | RS1(SLJIT_SP) | RS2(TMP_REG2))); + } else + FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(local_size))); + + local_size = STACK_MAX_DISTANCE; + } + + SLJIT_ASSERT(local_size > 0); + + offset = local_size - SSIZE_OF(sw); + if (!is_return_to) + FAIL_IF(push_inst(compiler, STACK_LOAD | RD(RETURN_ADDR_REG) | RS1(SLJIT_SP) | IMM_I(offset))); + + tmp = SLJIT_S0 - compiler->saveds; + for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) { + offset -= SSIZE_OF(sw); + FAIL_IF(push_inst(compiler, STACK_LOAD | RD(i) | RS1(SLJIT_SP) | IMM_I(offset))); + } + + for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { + offset -= SSIZE_OF(sw); + FAIL_IF(push_inst(compiler, STACK_LOAD | RD(i) | RS1(SLJIT_SP) | IMM_I(offset))); + } + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + /* This alignment is valid because offset is not used after storing FPU regs. */ + if ((offset & SSIZE_OF(sw)) != 0) + offset -= SSIZE_OF(sw); +#endif + + tmp = SLJIT_FS0 - compiler->fsaveds; + for (i = SLJIT_FS0; i > tmp; i--) { + offset -= SSIZE_OF(f64); + FAIL_IF(push_inst(compiler, FLD | FRD(i) | RS1(SLJIT_SP) | IMM_I(offset))); + } + + for (i = compiler->fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) { + offset -= SSIZE_OF(f64); + FAIL_IF(push_inst(compiler, FLD | FRD(i) | RS1(SLJIT_SP) | IMM_I(offset))); + } + + return push_inst(compiler, ADDI | RD(SLJIT_SP) | RS1(SLJIT_SP) | IMM_I(local_size)); +} + +#undef STACK_MAX_DISTANCE + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_void(compiler)); + + FAIL_IF(emit_stack_frame_release(compiler, 0)); + return push_inst(compiler, JALR | RD(TMP_ZERO) | RS1(RETURN_ADDR_REG) | IMM_I(0)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw)); + src = TMP_REG1; + srcw = 0; + } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(src) | IMM_I(0))); + src = TMP_REG1; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 1)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) +#define ARCH_32_64(a, b) a +#else +#define ARCH_32_64(a, b) b +#endif + +static const sljit_ins data_transfer_insts[16 + 4] = { +/* u w s */ ARCH_32_64(F3(0x2) | OPC(0x23) /* sw */, F3(0x3) | OPC(0x23) /* sd */), +/* u w l */ ARCH_32_64(F3(0x2) | OPC(0x3) /* lw */, F3(0x3) | OPC(0x3) /* ld */), +/* u b s */ F3(0x0) | OPC(0x23) /* sb */, +/* u b l */ F3(0x4) | OPC(0x3) /* lbu */, +/* u h s */ F3(0x1) | OPC(0x23) /* sh */, +/* u h l */ F3(0x5) | OPC(0x3) /* lhu */, +/* u i s */ F3(0x2) | OPC(0x23) /* sw */, +/* u i l */ ARCH_32_64(F3(0x2) | OPC(0x3) /* lw */, F3(0x6) | OPC(0x3) /* lwu */), + +/* s w s */ ARCH_32_64(F3(0x2) | OPC(0x23) /* sw */, F3(0x3) | OPC(0x23) /* sd */), +/* s w l */ ARCH_32_64(F3(0x2) | OPC(0x3) /* lw */, F3(0x3) | OPC(0x3) /* ld */), +/* s b s */ F3(0x0) | OPC(0x23) /* sb */, +/* s b l */ F3(0x0) | OPC(0x3) /* lb */, +/* s h s */ F3(0x1) | OPC(0x23) /* sh */, +/* s h l */ F3(0x1) | OPC(0x3) /* lh */, +/* s i s */ F3(0x2) | OPC(0x23) /* sw */, +/* s i l */ F3(0x2) | OPC(0x3) /* lw */, + +/* d s */ F3(0x3) | OPC(0x27) /* fsd */, +/* d l */ F3(0x3) | OPC(0x7) /* fld */, +/* s s */ F3(0x2) | OPC(0x27) /* fsw */, +/* s l */ F3(0x2) | OPC(0x7) /* flw */, +}; + +#undef ARCH_32_64 + +static sljit_s32 push_mem_inst(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 base, sljit_sw offset) +{ + sljit_ins ins; + + SLJIT_ASSERT(FAST_IS_REG(base) && offset <= 0xfff && offset >= SIMM_MIN); + + ins = data_transfer_insts[flags & MEM_MASK] | RS1(base); + if (flags & LOAD_DATA) + ins |= ((flags & MEM_MASK) <= GPR_REG ? RD(reg) : FRD(reg)) | IMM_I(offset); + else + ins |= ((flags & MEM_MASK) <= GPR_REG ? RS2(reg) : FRS2(reg)) | IMM_S(offset); + + return push_inst(compiler, ins); +} + +/* Can perform an operation using at most 1 instruction. */ +static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) +{ + + SLJIT_ASSERT(arg & SLJIT_MEM); + + if (!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN) { + /* Works for both absoulte and relative addresses. */ + if (SLJIT_UNLIKELY(flags & ARG_TEST)) + return 1; + + FAIL_IF(push_mem_inst(compiler, flags, reg, arg & REG_MASK, argw)); + return -1; + } + return 0; +} + +#define TO_ARGW_HI(argw) (((argw) & ~0xfff) + (((argw) & 0x800) ? 0x1000 : 0)) + +/* See getput_arg below. + Note: can_cache is called only for binary operators. */ +static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) +{ + SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); + + /* Simple operation except for updates. */ + if (arg & OFFS_REG_MASK) { + argw &= 0x3; + next_argw &= 0x3; + if (argw && argw == next_argw && (arg == next_arg || (arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK))) + return 1; + return 0; + } + + if (arg == next_arg) { + if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN) + || TO_ARGW_HI(argw) == TO_ARGW_HI(next_argw)) + return 1; + return 0; + } + + return 0; +} + +/* Emit the necessary instructions. See can_cache above. */ +static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) +{ + sljit_s32 base = arg & REG_MASK; + sljit_s32 tmp_r = TMP_REG1; + sljit_sw offset, argw_hi; + + SLJIT_ASSERT(arg & SLJIT_MEM); + if (!(next_arg & SLJIT_MEM)) { + next_arg = 0; + next_argw = 0; + } + + /* Since tmp can be the same as base or offset registers, + * these might be unavailable after modifying tmp. */ + if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) + tmp_r = reg; + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + argw &= 0x3; + + /* Using the cache. */ + if (argw == compiler->cache_argw) { + if (arg == compiler->cache_arg) + return push_mem_inst(compiler, flags, reg, TMP_REG3, 0); + + if ((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg) { + if (arg == next_arg && argw == (next_argw & 0x3)) { + compiler->cache_arg = arg; + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG3) | RS1(TMP_REG3) | RS2(base))); + return push_mem_inst(compiler, flags, reg, TMP_REG3, 0); + } + FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(base) | RS2(TMP_REG3))); + return push_mem_inst(compiler, flags, reg, tmp_r, 0); + } + } + + if (SLJIT_UNLIKELY(argw)) { + compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK); + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, SLLI | RD(TMP_REG3) | RS1(OFFS_REG(arg)) | IMM_I(argw))); + } + + if (arg == next_arg && argw == (next_argw & 0x3)) { + compiler->cache_arg = arg; + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG3) | RS1(base) | RS2(!argw ? OFFS_REG(arg) : TMP_REG3))); + tmp_r = TMP_REG3; + } + else + FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(base) | RS2(!argw ? OFFS_REG(arg) : TMP_REG3))); + return push_mem_inst(compiler, flags, reg, tmp_r, 0); + } + + if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) + return push_mem_inst(compiler, flags, reg, TMP_REG3, argw - compiler->cache_argw); + + if (compiler->cache_arg == SLJIT_MEM && (argw - compiler->cache_argw <= SIMM_MAX) && (argw - compiler->cache_argw >= SIMM_MIN)) { + offset = argw - compiler->cache_argw; + } else { + compiler->cache_arg = SLJIT_MEM; + + argw_hi = TO_ARGW_HI(argw); + + if (next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN && argw_hi != TO_ARGW_HI(next_argw)) { + FAIL_IF(load_immediate(compiler, TMP_REG3, argw, tmp_r)); + compiler->cache_argw = argw; + offset = 0; + } else { + FAIL_IF(load_immediate(compiler, TMP_REG3, argw_hi, tmp_r)); + compiler->cache_argw = argw_hi; + offset = argw & 0xfff; + argw = argw_hi; + } + } + + if (!base) + return push_mem_inst(compiler, flags, reg, TMP_REG3, offset); + + if (arg == next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN) { + compiler->cache_arg = arg; + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG3) | RS1(TMP_REG3) | RS2(base))); + return push_mem_inst(compiler, flags, reg, TMP_REG3, offset); + } + + FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(TMP_REG3) | RS2(base))); + return push_mem_inst(compiler, flags, reg, tmp_r, offset); +} + +static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) +{ + sljit_s32 base = arg & REG_MASK; + sljit_s32 tmp_r = TMP_REG1; + + if (getput_arg_fast(compiler, flags, reg, arg, argw)) + return compiler->error; + + if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) + tmp_r = reg; + + if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { + argw &= 0x3; + + if (SLJIT_UNLIKELY(argw)) { + FAIL_IF(push_inst(compiler, SLLI | RD(tmp_r) | RS1(OFFS_REG(arg)) | IMM_I(argw))); + FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(tmp_r) | RS2(base))); + } + else + FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(base) | RS2(OFFS_REG(arg)))); + + argw = 0; + } else { + FAIL_IF(load_immediate(compiler, tmp_r, TO_ARGW_HI(argw), TMP_REG3)); + + if (base != 0) + FAIL_IF(push_inst(compiler, ADD | RD(tmp_r) | RS1(tmp_r) | RS2(base))); + } + + return push_mem_inst(compiler, flags, reg, tmp_r, argw & 0xfff); +} + +static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w) +{ + if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) + return compiler->error; + return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); +} + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) +#define WORD 0 +#define IMM_EXTEND(v) (IMM_I(v)) +#else /* !SLJIT_CONFIG_RISCV_32 */ +#define WORD word +#define IMM_EXTEND(v) (IMM_I((op & SLJIT_32) ? (v) : (32 + (v)))) +#endif /* SLJIT_CONFIG_RISCV_32 */ + +static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw src) +{ + sljit_s32 is_clz = (GET_OPCODE(op) == SLJIT_CLZ); +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + sljit_ins word = (op & SLJIT_32) >> 5; + sljit_ins max = (op & SLJIT_32) ? 32 : 64; +#else /* !SLJIT_CONFIG_RISCV_64 */ + sljit_ins max = 32; +#endif /* SLJIT_CONFIG_RISCV_64 */ + + SLJIT_ASSERT(WORD == 0 || WORD == 0x8); + + /* The OTHER_FLAG is the counter. */ + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(OTHER_FLAG) | RS1(TMP_ZERO) | IMM_I(max))); + + /* The TMP_REG2 is the next value. */ + if (src != TMP_REG2) + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(TMP_REG2) | RS1(src) | IMM_I(0))); + + FAIL_IF(push_inst(compiler, BEQ | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)((is_clz ? 4 : 5) * SSIZE_OF(ins)) << 7) | ((sljit_ins)(8 * SSIZE_OF(ins)) << 20))); + + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(OTHER_FLAG) | RS1(TMP_ZERO) | IMM_I(0))); + if (!is_clz) { + FAIL_IF(push_inst(compiler, ANDI | RD(TMP_REG1) | RS1(TMP_REG2) | IMM_I(1))); + FAIL_IF(push_inst(compiler, BNE | RS1(TMP_REG1) | RS2(TMP_ZERO) | ((sljit_ins)(2 * SSIZE_OF(ins)) << 7) | ((sljit_ins)(8 * SSIZE_OF(ins)) << 20))); + } else + FAIL_IF(push_inst(compiler, BLT | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)(2 * SSIZE_OF(ins)) << 7) | ((sljit_ins)(8 * SSIZE_OF(ins)) << 20))); + + /* The TMP_REG1 is the next shift. */ + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(TMP_REG1) | RS1(TMP_ZERO) | IMM_I(max))); + + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(TMP_REG2) | IMM_I(0))); + FAIL_IF(push_inst(compiler, SRLI | WORD | RD(TMP_REG1) | RS1(TMP_REG1) | IMM_I(1))); + + FAIL_IF(push_inst(compiler, (is_clz ? SRL : SLL) | WORD | RD(TMP_REG2) | RS1(EQUAL_FLAG) | RS2(TMP_REG1))); + FAIL_IF(push_inst(compiler, BNE | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)0xfe000e80 - ((2 * SSIZE_OF(ins)) << 7)))); + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(TMP_REG2) | RS1(TMP_REG1) | IMM_I(-1))); + FAIL_IF(push_inst(compiler, (is_clz ? SRL : SLL) | WORD | RD(TMP_REG2) | RS1(EQUAL_FLAG) | RS2(TMP_REG2))); + FAIL_IF(push_inst(compiler, OR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(TMP_REG1))); + FAIL_IF(push_inst(compiler, BEQ | RS1(TMP_REG2) | RS2(TMP_ZERO) | ((sljit_ins)0xfe000e80 - ((5 * SSIZE_OF(ins)) << 7)))); + + return push_inst(compiler, ADDI | WORD | RD(dst) | RS1(OTHER_FLAG) | IMM_I(0)); +} + +#define EMIT_LOGICAL(op_imm, op_reg) \ + if (flags & SRC2_IMM) { \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, op_imm | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2))); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, op_imm | RD(dst) | RS1(src1) | IMM_I(src2))); \ + } \ + else { \ + if (op & SLJIT_SET_Z) \ + FAIL_IF(push_inst(compiler, op_reg | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); \ + if (!(flags & UNUSED_DEST)) \ + FAIL_IF(push_inst(compiler, op_reg | RD(dst) | RS1(src1) | RS2(src2))); \ + } + +#define EMIT_SHIFT(imm, reg) \ + op_imm = (imm); \ + op_reg = (reg); + +static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_s32 src1, sljit_sw src2) +{ + sljit_s32 is_overflow, is_carry, carry_src_r, is_handled; + sljit_ins op_imm, op_reg; +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + sljit_ins word = (op & SLJIT_32) >> 5; +#endif /* SLJIT_CONFIG_RISCV_64 */ + + SLJIT_ASSERT(WORD == 0 || WORD == 0x8); + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (dst != src2) + return push_inst(compiler, ADDI | RD(dst) | RS1(src2) | IMM_I(0)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_U8: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) + return push_inst(compiler, ANDI | RD(dst) | RS1(src2) | IMM_I(0xff)); + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_S8: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + FAIL_IF(push_inst(compiler, SLLI | WORD | RD(dst) | RS1(src2) | IMM_EXTEND(24))); + return push_inst(compiler, SRAI | WORD | RD(dst) | RS1(dst) | IMM_EXTEND(24)); + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_U16: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + FAIL_IF(push_inst(compiler, SLLI | WORD | RD(dst) | RS1(src2) | IMM_EXTEND(16))); + return push_inst(compiler, SRLI | WORD | RD(dst) | RS1(dst) | IMM_EXTEND(16)); + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_S16: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + FAIL_IF(push_inst(compiler, SLLI | WORD | RD(dst) | RS1(src2) | IMM_EXTEND(16))); + return push_inst(compiler, SRAI | WORD | RD(dst) | RS1(dst) | IMM_EXTEND(16)); + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + case SLJIT_MOV_U32: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + FAIL_IF(push_inst(compiler, SLLI | RD(dst) | RS1(src2) | IMM_I(32))); + return push_inst(compiler, SRLI | RD(dst) | RS1(dst) | IMM_I(32)); + } + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; + + case SLJIT_MOV_S32: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) + return push_inst(compiler, ADDI | 0x8 | RD(dst) | RS1(src2) | IMM_I(0)); + SLJIT_ASSERT(dst == src2); + return SLJIT_SUCCESS; +#endif /* SLJIT_CONFIG_RISCV_64 */ + + case SLJIT_CLZ: + case SLJIT_CTZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + return emit_clz_ctz(compiler, op, dst, src2); + + case SLJIT_ADD: + /* Overflow computation (both add and sub): overflow = src1_sign ^ src2_sign ^ result_sign ^ carry_flag */ + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + carry_src_r = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(0))); + else + FAIL_IF(push_inst(compiler, XORI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-1))); + } + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2))); + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(src2))); + } + else { + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADD | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + + if (is_overflow || carry_src_r != 0) { + if (src1 != dst) + carry_src_r = (sljit_s32)src1; + else if (src2 != dst) + carry_src_r = (sljit_s32)src2; + else { + FAIL_IF(push_inst(compiler, ADDI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(0))); + carry_src_r = OTHER_FLAG; + } + } + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, ADD | WORD | RD(dst) | RS1(src1) | RS2(src2))); + } + + /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */ + if (is_overflow || carry_src_r != 0) { + if (flags & SRC2_IMM) + FAIL_IF(push_inst(compiler, SLTUI | RD(OTHER_FLAG) | RS1(dst) | IMM_I(src2))); + else + FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(dst) | RS2(carry_src_r))); + } + + if (!is_overflow) + return SLJIT_SUCCESS; + + FAIL_IF(push_inst(compiler, XOR | RD(TMP_REG1) | RS1(dst) | RS2(EQUAL_FLAG))); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(dst) | IMM_I(0))); + FAIL_IF(push_inst(compiler, SRLI | WORD | RD(TMP_REG1) | RS1(TMP_REG1) | IMM_EXTEND(31))); + return push_inst(compiler, XOR | RD(OTHER_FLAG) | RS1(TMP_REG1) | RS2(OTHER_FLAG)); + + case SLJIT_ADDC: + carry_src_r = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(src2))); + } else { + if (carry_src_r != 0) { + if (src1 != dst) + carry_src_r = (sljit_s32)src1; + else if (src2 != dst) + carry_src_r = (sljit_s32)src2; + else { + FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(0))); + carry_src_r = EQUAL_FLAG; + } + } + + FAIL_IF(push_inst(compiler, ADD | WORD | RD(dst) | RS1(src1) | RS2(src2))); + } + + /* Carry is zero if a + b >= a or a + b >= b, otherwise it is 1. */ + if (carry_src_r != 0) { + if (flags & SRC2_IMM) + FAIL_IF(push_inst(compiler, SLTUI | RD(EQUAL_FLAG) | RS1(dst) | IMM_I(src2))); + else + FAIL_IF(push_inst(compiler, SLTU | RD(EQUAL_FLAG) | RS1(dst) | RS2(carry_src_r))); + } + + FAIL_IF(push_inst(compiler, ADD | WORD | RD(dst) | RS1(dst) | RS2(OTHER_FLAG))); + + if (carry_src_r == 0) + return SLJIT_SUCCESS; + + /* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */ + FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(dst) | RS2(OTHER_FLAG))); + /* Set carry flag. */ + return push_inst(compiler, OR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(EQUAL_FLAG)); + + case SLJIT_SUB: + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG2) | RS1(TMP_ZERO) | IMM_I(src2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + is_handled = 0; + + if (flags & SRC2_IMM) { + if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTUI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2))); + is_handled = 1; + } + else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) { + FAIL_IF(push_inst(compiler, SLTI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2))); + is_handled = 1; + } + } + + if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) { + is_handled = 1; + + if (flags & SRC2_IMM) { + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG2) | RS1(TMP_ZERO) | IMM_I(src2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + switch (GET_FLAG_TYPE(op)) { + case SLJIT_LESS: + case SLJIT_GREATER_EQUAL: + FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(src1) | RS2(src2))); + break; + case SLJIT_GREATER: + case SLJIT_LESS_EQUAL: + FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(src2) | RS2(src1))); + break; + case SLJIT_SIG_LESS: + case SLJIT_SIG_GREATER_EQUAL: + FAIL_IF(push_inst(compiler, SLT | RD(OTHER_FLAG) | RS1(src1) | RS2(src2))); + break; + case SLJIT_SIG_GREATER: + case SLJIT_SIG_LESS_EQUAL: + FAIL_IF(push_inst(compiler, SLT | RD(OTHER_FLAG) | RS1(src2) | RS2(src1))); + break; + } + } + + if (is_handled) { + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-src2))); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(-src2)); + } + else { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SUB | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + if (!(flags & UNUSED_DEST)) + return push_inst(compiler, SUB | WORD | RD(dst) | RS1(src1) | RS2(src2)); + } + return SLJIT_SUCCESS; + } + + is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW; + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_overflow) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(0))); + else + FAIL_IF(push_inst(compiler, XORI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-1))); + } + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(-src2))); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTUI | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2))); + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(-src2))); + } + else { + if (is_overflow) + FAIL_IF(push_inst(compiler, XOR | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + else if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, SUB | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + + if (is_overflow || is_carry) + FAIL_IF(push_inst(compiler, SLTU | RD(OTHER_FLAG) | RS1(src1) | RS2(src2))); + + /* Only the zero flag is needed. */ + if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK)) + FAIL_IF(push_inst(compiler, SUB | WORD | RD(dst) | RS1(src1) | RS2(src2))); + } + + if (!is_overflow) + return SLJIT_SUCCESS; + + FAIL_IF(push_inst(compiler, XOR | RD(TMP_REG1) | RS1(dst) | RS2(EQUAL_FLAG))); + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, ADDI | RD(EQUAL_FLAG) | RS1(dst) | IMM_I(0))); + FAIL_IF(push_inst(compiler, SRLI | WORD | RD(TMP_REG1) | RS1(TMP_REG1) | IMM_EXTEND(31))); + return push_inst(compiler, XOR | RD(OTHER_FLAG) | RS1(TMP_REG1) | RS2(OTHER_FLAG)); + + case SLJIT_SUBC: + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG2) | RS1(TMP_ZERO) | IMM_I(src2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY); + + if (flags & SRC2_IMM) { + if (is_carry) + FAIL_IF(push_inst(compiler, SLTUI | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2))); + + FAIL_IF(push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(-src2))); + } + else { + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + + FAIL_IF(push_inst(compiler, SUB | WORD | RD(dst) | RS1(src1) | RS2(src2))); + } + + if (is_carry) + FAIL_IF(push_inst(compiler, SLTU | RD(TMP_REG1) | RS1(dst) | RS2(OTHER_FLAG))); + + FAIL_IF(push_inst(compiler, SUB | WORD | RD(dst) | RS1(dst) | RS2(OTHER_FLAG))); + + if (!is_carry) + return SLJIT_SUCCESS; + + return push_inst(compiler, OR | RD(OTHER_FLAG) | RS1(EQUAL_FLAG) | RS2(TMP_REG1)); + + case SLJIT_MUL: + SLJIT_ASSERT(!(flags & SRC2_IMM)); + + if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) + return push_inst(compiler, MUL | WORD | RD(dst) | RS1(src1) | RS2(src2)); + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + if (word) { + FAIL_IF(push_inst(compiler, MUL | RD(OTHER_FLAG) | RS1(src1) | RS2(src2))); + FAIL_IF(push_inst(compiler, MUL | 0x8 | RD(dst) | RS1(src1) | RS2(src2))); + return push_inst(compiler, SUB | RD(OTHER_FLAG) | RS1(dst) | RS2(OTHER_FLAG)); + } +#endif /* SLJIT_CONFIG_RISCV_64 */ + + FAIL_IF(push_inst(compiler, MULH | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + FAIL_IF(push_inst(compiler, MUL | RD(dst) | RS1(src1) | RS2(src2))); +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + FAIL_IF(push_inst(compiler, SRAI | RD(OTHER_FLAG) | RS1(dst) | IMM_I(31))); +#else /* !SLJIT_CONFIG_RISCV_32 */ + FAIL_IF(push_inst(compiler, SRAI | RD(OTHER_FLAG) | RS1(dst) | IMM_I(63))); +#endif /* SLJIT_CONFIG_RISCV_32 */ + return push_inst(compiler, SUB | RD(OTHER_FLAG) | RS1(EQUAL_FLAG) | RS2(OTHER_FLAG)); + + case SLJIT_AND: + EMIT_LOGICAL(ANDI, AND); + return SLJIT_SUCCESS; + + case SLJIT_OR: + EMIT_LOGICAL(ORI, OR); + return SLJIT_SUCCESS; + + case SLJIT_XOR: + EMIT_LOGICAL(XORI, XOR); + return SLJIT_SUCCESS; + + case SLJIT_SHL: + case SLJIT_MSHL: + EMIT_SHIFT(SLLI, SLL); + break; + + case SLJIT_LSHR: + case SLJIT_MLSHR: + EMIT_SHIFT(SRLI, SRL); + break; + + case SLJIT_ASHR: + case SLJIT_MASHR: + EMIT_SHIFT(SRAI, SRA); + break; + + case SLJIT_ROTL: + case SLJIT_ROTR: + if (flags & SRC2_IMM) { + SLJIT_ASSERT(src2 != 0); + + op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SLLI : SRLI; + FAIL_IF(push_inst(compiler, op_imm | WORD | RD(OTHER_FLAG) | RS1(src1) | IMM_I(src2))); + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + src2 = ((op & SLJIT_32) ? 32 : 64) - src2; +#else /* !SLJIT_CONFIG_RISCV_64 */ + src2 = 32 - src2; +#endif /* SLJIT_CONFIG_RISCV_64 */ + op_imm = (GET_OPCODE(op) == SLJIT_ROTL) ? SRLI : SLLI; + FAIL_IF(push_inst(compiler, op_imm | WORD | RD(dst) | RS1(src1) | IMM_I(src2))); + return push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(OTHER_FLAG)); + } + + if (src2 == TMP_ZERO) { + if (dst != src1) + return push_inst(compiler, ADDI | WORD | RD(dst) | RS1(src1) | IMM_I(0)); + return SLJIT_SUCCESS; + } + + FAIL_IF(push_inst(compiler, SUB | WORD | RD(EQUAL_FLAG) | RS1(TMP_ZERO) | RS2(src2))); + op_reg = (GET_OPCODE(op) == SLJIT_ROTL) ? SLL : SRL; + FAIL_IF(push_inst(compiler, op_reg | WORD | RD(OTHER_FLAG) | RS1(src1) | RS2(src2))); + op_reg = (GET_OPCODE(op) == SLJIT_ROTL) ? SRL : SLL; + FAIL_IF(push_inst(compiler, op_reg | WORD | RD(dst) | RS1(src1) | RS2(EQUAL_FLAG))); + return push_inst(compiler, OR | RD(dst) | RS1(dst) | RS2(OTHER_FLAG)); + + default: + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; + } + + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, op_imm | WORD | RD(EQUAL_FLAG) | RS1(src1) | IMM_I(src2))); + + if (flags & UNUSED_DEST) + return SLJIT_SUCCESS; + return push_inst(compiler, op_imm | WORD | RD(dst) | RS1(src1) | IMM_I(src2)); + } + + if (op & SLJIT_SET_Z) + FAIL_IF(push_inst(compiler, op_reg | WORD | RD(EQUAL_FLAG) | RS1(src1) | RS2(src2))); + + if (flags & UNUSED_DEST) + return SLJIT_SUCCESS; + return push_inst(compiler, op_reg | WORD | RD(dst) | RS1(src1) | RS2(src2)); +} + +#undef IMM_EXTEND + +static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + /* arg1 goes to TMP_REG1 or src reg + arg2 goes to TMP_REG2, imm or src reg + TMP_REG3 can be used for caching + result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ + sljit_s32 dst_r = TMP_REG2; + sljit_s32 src1_r; + sljit_sw src2_r = 0; + sljit_s32 sugg_src2_r = TMP_REG2; + + if (!(flags & ALT_KEEP_CACHE)) { + compiler->cache_arg = 0; + compiler->cache_argw = 0; + } + + if (dst == TMP_REG2) { + SLJIT_ASSERT(HAS_FLAGS(op)); + flags |= UNUSED_DEST; + } + else if (FAST_IS_REG(dst)) { + dst_r = dst; + flags |= REG_DEST; + if (flags & MOVE_OP) + sugg_src2_r = dst_r; + } + else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw)) + flags |= SLOW_DEST; + + if (flags & IMM_OP) { + if ((src2 & SLJIT_IMM) && src2w != 0 && src2w <= SIMM_MAX && src2w >= SIMM_MIN) { + flags |= SRC2_IMM; + src2_r = src2w; + } + else if ((flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w != 0 && src1w <= SIMM_MAX && src1w >= SIMM_MIN) { + flags |= SRC2_IMM; + src2_r = src1w; + + /* And swap arguments. */ + src1 = src2; + src1w = src2w; + src2 = SLJIT_IMM; + /* src2w = src2_r unneeded. */ + } + } + + /* Source 1. */ + if (FAST_IS_REG(src1)) { + src1_r = src1; + flags |= REG1_SOURCE; + } + else if (src1 & SLJIT_IMM) { + if (src1w) { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w, TMP_REG3)); + src1_r = TMP_REG1; + } + else + src1_r = TMP_ZERO; + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC1; + src1_r = TMP_REG1; + } + + /* Source 2. */ + if (FAST_IS_REG(src2)) { + src2_r = src2; + flags |= REG2_SOURCE; + if ((flags & (REG_DEST | MOVE_OP)) == MOVE_OP) + dst_r = (sljit_s32)src2_r; + } + else if (src2 & SLJIT_IMM) { + if (!(flags & SRC2_IMM)) { + if (src2w) { + FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w, TMP_REG3)); + src2_r = sugg_src2_r; + } + else { + src2_r = TMP_ZERO; + if (flags & MOVE_OP) { + if (dst & SLJIT_MEM) + dst_r = 0; + else + op = SLJIT_MOV; + } + } + } + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC2; + src2_r = sugg_src2_r; + } + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + SLJIT_ASSERT(src2_r == TMP_REG2); + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); + + FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); + + if (dst & SLJIT_MEM) { + if (!(flags & SLOW_DEST)) { + getput_arg_fast(compiler, flags, dst_r, dst, dstw); + return compiler->error; + } + return getput_arg(compiler, flags, dst_r, dst, dstw, 0, 0); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) +{ +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + sljit_ins word = (op & SLJIT_32) >> 5; + + SLJIT_ASSERT(word == 0 || word == 0x8); +#endif /* SLJIT_CONFIG_RISCV_64 */ + + CHECK_ERROR(); + CHECK(check_sljit_emit_op0(compiler, op)); + + switch (GET_OPCODE(op)) { + case SLJIT_BREAKPOINT: + return push_inst(compiler, EBREAK); + case SLJIT_NOP: + return push_inst(compiler, ADDI | RD(TMP_ZERO) | RS1(TMP_ZERO) | IMM_I(0)); + case SLJIT_LMUL_UW: + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R1) | IMM_I(0))); + FAIL_IF(push_inst(compiler, MULHU | RD(SLJIT_R1) | RS1(SLJIT_R0) | RS2(SLJIT_R1))); + return push_inst(compiler, MUL | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(TMP_REG1)); + case SLJIT_LMUL_SW: + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R1) | IMM_I(0))); + FAIL_IF(push_inst(compiler, MULH | RD(SLJIT_R1) | RS1(SLJIT_R0) | RS2(SLJIT_R1))); + return push_inst(compiler, MUL | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(TMP_REG1)); + case SLJIT_DIVMOD_UW: + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R0) | IMM_I(0))); + FAIL_IF(push_inst(compiler, DIVU | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1))); + return push_inst(compiler, REMU | WORD | RD(SLJIT_R1) | RS1(TMP_REG1) | RS2(SLJIT_R1)); + case SLJIT_DIVMOD_SW: + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(SLJIT_R0) | IMM_I(0))); + FAIL_IF(push_inst(compiler, DIV | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1))); + return push_inst(compiler, REM | WORD | RD(SLJIT_R1) | RS1(TMP_REG1) | RS2(SLJIT_R1)); + case SLJIT_DIV_UW: + return push_inst(compiler, DIVU | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1)); + case SLJIT_DIV_SW: + return push_inst(compiler, DIV | WORD | RD(SLJIT_R0) | RS1(SLJIT_R0) | RS2(SLJIT_R1)); + case SLJIT_ENDBR: + case SLJIT_SKIP_FRAMES_BEFORE_RETURN: + return SLJIT_SUCCESS; + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 flags = 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + if (op & SLJIT_32) + flags = INT_DATA | SIGNED_DATA; +#endif + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + case SLJIT_MOV_U32: + case SLJIT_MOV_S32: + case SLJIT_MOV32: +#endif + case SLJIT_MOV_P: + return emit_op(compiler, SLJIT_MOV, WORD_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, srcw); + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + case SLJIT_MOV_U32: + return emit_op(compiler, SLJIT_MOV_U32, INT_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u32)srcw : srcw); + + case SLJIT_MOV_S32: + /* Logical operators have no W variant, so sign extended input is necessary for them. */ + case SLJIT_MOV32: + return emit_op(compiler, SLJIT_MOV_S32, INT_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s32)srcw : srcw); +#endif + + case SLJIT_MOV_U8: + return emit_op(compiler, op, BYTE_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw); + + case SLJIT_MOV_S8: + return emit_op(compiler, op, BYTE_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw); + + case SLJIT_MOV_U16: + return emit_op(compiler, op, HALF_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw); + + case SLJIT_MOV_S16: + return emit_op(compiler, op, HALF_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw); + + case SLJIT_NOT: + return emit_op(compiler, SLJIT_XOR | (op & (SLJIT_32 | SLJIT_SET_Z)), flags, dst, dstw, src, srcw, SLJIT_IMM, -1); + + case SLJIT_CLZ: + case SLJIT_CTZ: + return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); + } + + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 flags = 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + if (op & SLJIT_32) { + flags |= INT_DATA | SIGNED_DATA; + if (src1 & SLJIT_IMM) + src1w = (sljit_s32)src1w; + if (src2 & SLJIT_IMM) + src2w = (sljit_s32)src2w; + } +#endif + + switch (GET_OPCODE(op)) { + case SLJIT_ADD: + case SLJIT_ADDC: + compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD; + return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SUB: + case SLJIT_SUBC: + compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB; + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_MUL: + compiler->status_flags_state = 0; + return emit_op(compiler, op, flags | CUMULATIVE_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_AND: + case SLJIT_OR: + case SLJIT_XOR: + return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SHL: + case SLJIT_MSHL: + case SLJIT_LSHR: + case SLJIT_MLSHR: + case SLJIT_ASHR: + case SLJIT_MASHR: + case SLJIT_ROTL: + case SLJIT_ROTR: + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + src2w &= 0x1f; +#else /* !SLJIT_CONFIG_RISCV_32 */ + if (op & SLJIT_32) + src2w &= 0x1f; + else + src2w &= 0x3f; +#endif /* SLJIT_CONFIG_RISCV_32 */ + } + + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + } + + SLJIT_UNREACHABLE(); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 is_left; + sljit_ins ins1, ins2, ins3; +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + sljit_ins word = (op & SLJIT_32) >> 5; + sljit_s32 inp_flags = ((op & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA; + sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64; +#else /* !SLJIT_CONFIG_RISCV_64 */ + sljit_s32 inp_flags = WORD_DATA | LOAD_DATA; + sljit_sw bit_length = 32; +#endif /* SLJIT_CONFIG_RISCV_64 */ + + SLJIT_ASSERT(WORD == 0 || WORD == 0x8); + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + + is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL); + + if (src_dst == src1) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, (is_left ? SLJIT_ROTL : SLJIT_ROTR) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w); + } + + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (src2 & SLJIT_IMM) { + src2w &= bit_length - 1; + + if (src2w == 0) + return SLJIT_SUCCESS; + } else if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG2, src2, src2w)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG1, src1, src1w)); + src1 = TMP_REG1; + } else if (src1 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w, TMP_REG3)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_IMM) { + if (is_left) { + ins1 = SLLI | WORD | IMM_I(src2w); + src2w = bit_length - src2w; + ins2 = SRLI | WORD | IMM_I(src2w); + } else { + ins1 = SRLI | WORD | IMM_I(src2w); + src2w = bit_length - src2w; + ins2 = SLLI | WORD | IMM_I(src2w); + } + + FAIL_IF(push_inst(compiler, ins1 | RD(src_dst) | RS1(src_dst))); + FAIL_IF(push_inst(compiler, ins2 | RD(TMP_REG1) | RS1(src1))); + return push_inst(compiler, OR | RD(src_dst) | RS1(src_dst) | RS2(TMP_REG1)); + } + + if (is_left) { + ins1 = SLL; + ins2 = SRLI; + ins3 = SRL; + } else { + ins1 = SRL; + ins2 = SLLI; + ins3 = SLL; + } + + FAIL_IF(push_inst(compiler, ins1 | WORD | RD(src_dst) | RS1(src_dst) | RS2(src2))); + + if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) { + FAIL_IF(push_inst(compiler, ins2 | WORD | RD(TMP_REG1) | RS1(src1) | IMM_I(1))); + FAIL_IF(push_inst(compiler, XORI | RD(TMP_REG2) | RS1(src2) | IMM_I((sljit_ins)bit_length - 1))); + src1 = TMP_REG1; + } else + FAIL_IF(push_inst(compiler, SUB | WORD | RD(TMP_REG2) | RS1(TMP_ZERO) | RS2(src2))); + + FAIL_IF(push_inst(compiler, ins3 | WORD | RD(TMP_REG1) | RS1(src1) | RS2(TMP_REG2))); + return push_inst(compiler, OR | RD(src_dst) | RS1(src_dst) | RS2(TMP_REG1)); +} + +#undef WORD + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op_src(compiler, op, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + switch (op) { + case SLJIT_FAST_RETURN: + if (FAST_IS_REG(src)) + FAIL_IF(push_inst(compiler, ADDI | RD(RETURN_ADDR_REG) | RS1(src) | IMM_I(0))); + else + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, RETURN_ADDR_REG, src, srcw)); + + return push_inst(compiler, JALR | RD(TMP_ZERO) | RS1(RETURN_ADDR_REG) | IMM_I(0)); + case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN: + return SLJIT_SUCCESS; + case SLJIT_PREFETCH_L1: + case SLJIT_PREFETCH_L2: + case SLJIT_PREFETCH_L3: + case SLJIT_PREFETCH_ONCE: + return SLJIT_SUCCESS; + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_register_index(reg)); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) +{ + CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); + return freg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_u32 size) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); + + return push_inst(compiler, *(sljit_ins*)instruction); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_32) >> 7)) +#define FMT(op) ((sljit_ins)((op & SLJIT_32) ^ SLJIT_32) << 17) + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) +# define flags (sljit_u32)0 +#else + sljit_u32 flags = ((sljit_u32)(GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)) << 21; +#endif + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw)); + src = TMP_FREG1; + } + + FAIL_IF(push_inst(compiler, FCVT_W_S | FMT(op) | flags | RD(dst_r) | FRS1(src))); + + /* Store the integer value from a VFP register. */ + if (dst & SLJIT_MEM) { +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + return emit_op_mem2(compiler, WORD_DATA, TMP_REG2, dst, dstw, 0, 0); +#else + return emit_op_mem2(compiler, flags ? WORD_DATA : INT_DATA, TMP_REG2, dst, dstw, 0, 0); +#endif + } + return SLJIT_SUCCESS; + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) +# undef flags +#endif +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_ins inst; +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + sljit_u32 flags = ((sljit_u32)(GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)) << 21; +#endif + + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (src & SLJIT_MEM) { +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw)); +#else + FAIL_IF(emit_op_mem2(compiler, (flags ? WORD_DATA : INT_DATA) | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw)); +#endif + src = TMP_REG1; + } else if (src & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) + srcw = (sljit_s32)srcw; +#endif + + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw, TMP_REG3)); + src = TMP_REG1; + } + + inst = FCVT_S_W | FMT(op) | FRD(dst_r) | RS1(src); + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + if (op & SLJIT_32) + inst |= F3(0x7); +#else + inst |= flags; + + if (op != SLJIT_CONV_F64_FROM_S32) + inst |= F3(0x7); +#endif + + FAIL_IF(push_inst(compiler, inst)); + + if (dst & SLJIT_MEM) + return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_ins inst; + + if (src1 & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + src1 = TMP_FREG1; + } + + if (src2 & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0)); + src2 = TMP_FREG2; + } + + switch (GET_FLAG_TYPE(op)) { + case SLJIT_F_EQUAL: + case SLJIT_F_NOT_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + inst = FEQ_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2); + break; + case SLJIT_F_LESS: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_LESS: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + inst = FLT_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2); + break; + case SLJIT_ORDERED_GREATER: + case SLJIT_UNORDERED_OR_LESS_EQUAL: + inst = FLT_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src2) | FRS2(src1); + break; + case SLJIT_F_GREATER: + case SLJIT_F_LESS_EQUAL: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_ORDERED_LESS_EQUAL: + inst = FLE_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2); + break; + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_ORDERED_GREATER_EQUAL: + inst = FLE_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src2) | FRS2(src1); + break; + case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */ + case SLJIT_ORDERED_NOT_EQUAL: /* Not supported. */ + FAIL_IF(push_inst(compiler, FLT_S | FMT(op) | RD(OTHER_FLAG) | FRS1(src1) | FRS2(src2))); + FAIL_IF(push_inst(compiler, FLT_S | FMT(op) | RD(TMP_REG1) | FRS1(src2) | FRS2(src1))); + inst = OR | RD(OTHER_FLAG) | RS1(OTHER_FLAG) | RS2(TMP_REG1); + break; + default: /* SLJIT_UNORDERED, SLJIT_ORDERED */ + FAIL_IF(push_inst(compiler, FADD_S | FMT(op) | FRD(TMP_FREG1) | FRS1(src1) | FRS2(src2))); + inst = FEQ_S | FMT(op) | RD(OTHER_FLAG) | FRS1(TMP_FREG1) | FRS2(TMP_FREG1); + break; + } + + return push_inst(compiler, inst); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src, sljit_sw srcw) +{ + sljit_s32 dst_r; + + CHECK_ERROR(); + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error); + SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); + + if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) + op ^= SLJIT_32; + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; + + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw)); + src = dst_r; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOV_F64: + if (src != dst_r) { + if (dst_r != TMP_FREG1) + FAIL_IF(push_inst(compiler, FSGNJ_S | FMT(op) | FRD(dst_r) | FRS1(src) | FRS2(src))); + else + dst_r = src; + } + break; + case SLJIT_NEG_F64: + FAIL_IF(push_inst(compiler, FSGNJN_S | FMT(op) | FRD(dst_r) | FRS1(src) | FRS2(src))); + break; + case SLJIT_ABS_F64: + FAIL_IF(push_inst(compiler, FSGNJX_S | FMT(op) | FRD(dst_r) | FRS1(src) | FRS2(src))); + break; + case SLJIT_CONV_F64_FROM_F32: + /* The SLJIT_32 bit is inverted because sljit_f32 needs to be loaded from the memory. */ + FAIL_IF(push_inst(compiler, FCVT_S_D | ((op & SLJIT_32) ? (1 << 25) : ((1 << 20) | F3(7))) | FRD(dst_r) | FRS1(src))); + op ^= SLJIT_32; + break; + } + + if (dst & SLJIT_MEM) + return emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 dst_r, flags = 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2; + + if (src1 & SLJIT_MEM) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { + FAIL_IF(compiler->error); + src1 = TMP_FREG1; + } else + flags |= SLOW_SRC1; + } + + if (src2 & SLJIT_MEM) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { + FAIL_IF(compiler->error); + src2 = TMP_FREG2; + } else + flags |= SLOW_SRC2; + } + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + + if (flags & SLOW_SRC1) + src1 = TMP_FREG1; + if (flags & SLOW_SRC2) + src2 = TMP_FREG2; + + switch (GET_OPCODE(op)) { + case SLJIT_ADD_F64: + FAIL_IF(push_inst(compiler, FADD_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2))); + break; + + case SLJIT_SUB_F64: + FAIL_IF(push_inst(compiler, FSUB_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2))); + break; + + case SLJIT_MUL_F64: + FAIL_IF(push_inst(compiler, FMUL_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2))); + break; + + case SLJIT_DIV_F64: + FAIL_IF(push_inst(compiler, FDIV_S | FMT(op) | FRD(dst_r) | FRS1(src1) | FRS2(src2))); + break; + } + + if (dst_r == TMP_FREG2) + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); + + return SLJIT_SUCCESS; +} + +#undef FLOAT_DATA +#undef FMT + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + if (FAST_IS_REG(dst)) + return push_inst(compiler, ADDI | RD(dst) | RS1(RETURN_ADDR_REG) | IMM_I(0)); + + /* Memory. */ + return emit_op_mem(compiler, WORD_DATA, RETURN_ADDR_REG, dst, dstw); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_label(compiler)); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + return label; +} + +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) +#define BRANCH_LENGTH ((sljit_ins)(3 * sizeof(sljit_ins)) << 7) +#else +#define BRANCH_LENGTH ((sljit_ins)(7 * sizeof(sljit_ins)) << 7) +#endif + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) +{ + struct sljit_jump *jump; + sljit_ins inst; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_jump(compiler, type)); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + switch (type) { + case SLJIT_EQUAL: + inst = BNE | RS1(EQUAL_FLAG) | RS2(TMP_ZERO) | BRANCH_LENGTH; + break; + case SLJIT_NOT_EQUAL: + inst = BEQ | RS1(EQUAL_FLAG) | RS2(TMP_ZERO) | BRANCH_LENGTH; + break; + case SLJIT_LESS: + case SLJIT_GREATER: + case SLJIT_SIG_LESS: + case SLJIT_SIG_GREATER: + case SLJIT_OVERFLOW: + case SLJIT_CARRY: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: /* Not supported. */ + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: + case SLJIT_ORDERED_GREATER: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: + case SLJIT_ORDERED: + inst = BEQ | RS1(OTHER_FLAG) | RS2(TMP_ZERO) | BRANCH_LENGTH; + break; + case SLJIT_GREATER_EQUAL: + case SLJIT_LESS_EQUAL: + case SLJIT_SIG_GREATER_EQUAL: + case SLJIT_SIG_LESS_EQUAL: + case SLJIT_NOT_OVERFLOW: + case SLJIT_NOT_CARRY: + case SLJIT_F_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */ + case SLJIT_F_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: + case SLJIT_F_GREATER: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_UNORDERED: + inst = BNE | RS1(OTHER_FLAG) | RS2(TMP_ZERO) | BRANCH_LENGTH; + break; + default: + /* Not conditional branch. */ + inst = 0; + break; + } + + if (inst != 0) { + PTR_FAIL_IF(push_inst(compiler, inst)); + jump->flags |= IS_COND; + } + + jump->addr = compiler->size; + inst = JALR | RS1(TMP_REG1) | IMM_I(0); + + if (type >= SLJIT_FAST_CALL) { + jump->flags |= IS_CALL; + inst |= RD(RETURN_ADDR_REG); + } + + PTR_FAIL_IF(push_inst(compiler, inst)); + + /* Maximum number of instructions required for generating a constant. */ +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + compiler->size += 1; +#else + compiler->size += 5; +#endif + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types) +{ + SLJIT_UNUSED_ARG(arg_types); + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); + + if (type & SLJIT_CALL_RETURN) { + PTR_FAIL_IF(emit_stack_frame_release(compiler, 0)); + type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); + } + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_jump(compiler, type); +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + struct sljit_jump *jump; + sljit_s32 flags; + sljit_ins inst; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + flags = WORD_DATA | LOAD_DATA; +#else /* !SLJIT_CONFIG_RISCV_32 */ + flags = ((type & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA; +#endif /* SLJIT_CONFIG_RISCV_32 */ + + if (src1 & SLJIT_MEM) { + PTR_FAIL_IF(emit_op_mem2(compiler, flags, TMP_REG1, src1, src1w, src2, src2w)); + src1 = TMP_REG1; + } + + if (src2 & SLJIT_MEM) { + PTR_FAIL_IF(emit_op_mem2(compiler, flags, TMP_REG2, src2, src2w, 0, 0)); + src2 = TMP_REG2; + } + + if (src1 & SLJIT_IMM) { + if (src1w != 0) { + PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, src1w, TMP_REG3)); + src1 = TMP_REG1; + } + else + src1 = TMP_ZERO; + } + + if (src2 & SLJIT_IMM) { + if (src2w != 0) { + PTR_FAIL_IF(load_immediate(compiler, TMP_REG2, src2w, TMP_REG3)); + src2 = TMP_REG2; + } + else + src2 = TMP_ZERO; + } + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, (sljit_u32)((type & SLJIT_REWRITABLE_JUMP) | IS_COND)); + type &= 0xff; + + switch (type) { + case SLJIT_EQUAL: + inst = BNE | RS1(src1) | RS2(src2) | BRANCH_LENGTH; + break; + case SLJIT_NOT_EQUAL: + inst = BEQ | RS1(src1) | RS2(src2) | BRANCH_LENGTH; + break; + case SLJIT_LESS: + inst = BGEU | RS1(src1) | RS2(src2) | BRANCH_LENGTH; + break; + case SLJIT_GREATER_EQUAL: + inst = BLTU | RS1(src1) | RS2(src2) | BRANCH_LENGTH; + break; + case SLJIT_GREATER: + inst = BGEU | RS1(src2) | RS2(src1) | BRANCH_LENGTH; + break; + case SLJIT_LESS_EQUAL: + inst = BLTU | RS1(src2) | RS2(src1) | BRANCH_LENGTH; + break; + case SLJIT_SIG_LESS: + inst = BGE | RS1(src1) | RS2(src2) | BRANCH_LENGTH; + break; + case SLJIT_SIG_GREATER_EQUAL: + inst = BLT | RS1(src1) | RS2(src2) | BRANCH_LENGTH; + break; + case SLJIT_SIG_GREATER: + inst = BGE | RS1(src2) | RS2(src1) | BRANCH_LENGTH; + break; + case SLJIT_SIG_LESS_EQUAL: + inst = BLT | RS1(src2) | RS2(src1) | BRANCH_LENGTH; + break; + } + + PTR_FAIL_IF(push_inst(compiler, inst)); + + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, JALR | RD(TMP_ZERO) | RS1(TMP_REG1) | IMM_I(0))); + + /* Maximum number of instructions required for generating a constant. */ +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + compiler->size += 1; +#else + compiler->size += 5; +#endif + return jump; +} + +#undef BRANCH_LENGTH + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) +{ + struct sljit_jump *jump; + + CHECK_ERROR(); + CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); + + if (!(src & SLJIT_IMM)) { + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw)); + src = TMP_REG1; + } + return push_inst(compiler, JALR | RD((type >= SLJIT_FAST_CALL) ? RETURN_ADDR_REG : TMP_ZERO) | RS1(src) | IMM_I(0)); + } + + /* These jumps are converted to jump/call instructions when possible. */ + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_CALL : 0)); + jump->u.target = (sljit_uw)srcw; + + jump->addr = compiler->size; + FAIL_IF(push_inst(compiler, JALR | RD((type >= SLJIT_FAST_CALL) ? RETURN_ADDR_REG : TMP_ZERO) | RS1(TMP_REG1) | IMM_I(0))); + + /* Maximum number of instructions required for generating a constant. */ +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + compiler->size += 1; +#else + compiler->size += 5; +#endif + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 arg_types, + sljit_s32 src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(arg_types); + CHECK_ERROR(); + CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw)); + src = TMP_REG1; + } + + if (type & SLJIT_CALL_RETURN) { + if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG1) | RS1(src) | IMM_I(0))); + src = TMP_REG1; + } + + FAIL_IF(emit_stack_frame_release(compiler, 0)); + type = SLJIT_JUMP; + } + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, type, src, srcw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, sljit_sw dstw, + sljit_s32 type) +{ + sljit_s32 src_r, dst_r, invert; + sljit_s32 saved_op = op; +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + sljit_s32 mem_type = WORD_DATA; +#else + sljit_s32 mem_type = ((op & SLJIT_32) || op == SLJIT_MOV32) ? (INT_DATA | SIGNED_DATA) : WORD_DATA; +#endif + + CHECK_ERROR(); + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + op = GET_OPCODE(op); + dst_r = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2; + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + if (op >= SLJIT_ADD && (dst & SLJIT_MEM)) + FAIL_IF(emit_op_mem2(compiler, mem_type | LOAD_DATA, TMP_REG1, dst, dstw, dst, dstw)); + + if (type < SLJIT_F_EQUAL) { + src_r = OTHER_FLAG; + invert = type & 0x1; + + switch (type) { + case SLJIT_EQUAL: + case SLJIT_NOT_EQUAL: + FAIL_IF(push_inst(compiler, SLTUI | RD(dst_r) | RS1(EQUAL_FLAG) | IMM_I(1))); + src_r = dst_r; + break; + case SLJIT_OVERFLOW: + case SLJIT_NOT_OVERFLOW: + if (compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB)) { + src_r = OTHER_FLAG; + break; + } + FAIL_IF(push_inst(compiler, SLTUI | RD(dst_r) | RS1(OTHER_FLAG) | IMM_I(1))); + src_r = dst_r; + invert ^= 0x1; + break; + } + } else { + invert = 0; + src_r = OTHER_FLAG; + + switch (type) { + case SLJIT_F_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */ + case SLJIT_F_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: + case SLJIT_F_GREATER: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_UNORDERED: + invert = 1; + break; + } + } + + if (invert) { + FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(src_r) | IMM_I(1))); + src_r = dst_r; + } + + if (op < SLJIT_ADD) { + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, mem_type, src_r, dst, dstw); + + if (src_r != dst_r) + return push_inst(compiler, ADDI | RD(dst_r) | RS1(src_r) | IMM_I(0)); + return SLJIT_SUCCESS; + } + + mem_type |= CUMULATIVE_OP | IMM_OP | ALT_KEEP_CACHE; + + if (dst & SLJIT_MEM) + return emit_op(compiler, saved_op, mem_type, dst, dstw, TMP_REG1, 0, src_r, 0); + return emit_op(compiler, saved_op, mem_type, dst, dstw, dst, dstw, src_r, 0); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_s32 flags; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if (!(reg & REG_PAIR_MASK)) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + + if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) { + memw &= 0x3; + + if (SLJIT_UNLIKELY(memw != 0)) { + FAIL_IF(push_inst(compiler, SLLI | RD(TMP_REG1) | RS1(OFFS_REG(mem)) | IMM_I(memw))); + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RS1(TMP_REG1) | RS2(mem & REG_MASK))); + } else + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RS1(mem & REG_MASK) | RS2(OFFS_REG(mem)))); + + mem = TMP_REG1; + memw = 0; + } else if (memw > SIMM_MAX - SSIZE_OF(sw) || memw < SIMM_MIN) { + if (((memw + 0x800) & 0xfff) <= 0xfff - SSIZE_OF(sw)) { + FAIL_IF(load_immediate(compiler, TMP_REG1, TO_ARGW_HI(memw), TMP_REG3)); + memw &= 0xfff; + } else { + FAIL_IF(load_immediate(compiler, TMP_REG1, memw, TMP_REG3)); + memw = 0; + } + + if (mem & REG_MASK) + FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RS1(TMP_REG1) | RS2(mem & REG_MASK))); + + mem = TMP_REG1; + } else { + mem &= REG_MASK; + memw &= 0xfff; + } + + SLJIT_ASSERT((memw >= 0 && memw <= SIMM_MAX - SSIZE_OF(sw)) || (memw > SIMM_MAX && memw <= 0xfff)); + + if (!(type & SLJIT_MEM_STORE) && mem == REG_PAIR_FIRST(reg)) { + FAIL_IF(push_mem_inst(compiler, WORD_DATA | LOAD_DATA, REG_PAIR_SECOND(reg), mem, (memw + SSIZE_OF(sw)) & 0xfff)); + return push_mem_inst(compiler, WORD_DATA | LOAD_DATA, REG_PAIR_FIRST(reg), mem, memw); + } + + flags = WORD_DATA | (!(type & SLJIT_MEM_STORE) ? LOAD_DATA : 0); + + FAIL_IF(push_mem_inst(compiler, flags, REG_PAIR_FIRST(reg), mem, memw)); + return push_mem_inst(compiler, flags, REG_PAIR_SECOND(reg), mem, (memw + SSIZE_OF(sw)) & 0xfff); +} + +#undef TO_ARGW_HI + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + PTR_FAIL_IF(emit_const(compiler, dst_r, init_value, ADDI | RD(dst_r))); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); + + return const_; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) +{ + struct sljit_put_label *put_label; + sljit_s32 dst_r; + + CHECK_ERROR_PTR(); + CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); + ADJUST_LOCAL_OFFSET(dst, dstw); + + put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); + PTR_FAIL_IF(!put_label); + set_put_label(put_label, compiler, 0); + + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; + PTR_FAIL_IF(push_inst(compiler, (sljit_ins)dst_r)); +#if (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) + compiler->size += 1; +#else + compiler->size += 5; +#endif + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); + + return put_label; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) +{ + sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset); +} diff --git a/thirdparty/pcre2/src/sljit/sljitNativeS390X.c b/thirdparty/pcre2/src/sljit/sljitNativeS390X.c index 8eef910c42..8b51bad9bc 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeS390X.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeS390X.c @@ -103,11 +103,8 @@ static const sljit_gpr r15 = 15; /* reg_map[SLJIT_NUMBER_OF_REGISTERS + 1]: stac /* When reg cannot be unused. */ #define IS_GPR_REG(reg) ((reg > 0) && (reg) <= SLJIT_SP) -/* Link registers. The normal link register is r14, but since - we use that for flags we need to use r0 instead to do fast - calls so that flags are preserved. */ +/* Link register. */ static const sljit_gpr link_r = 14; /* r14 */ -static const sljit_gpr fast_link_r = 0; /* r0 */ #define TMP_FREG1 (0) @@ -220,7 +217,8 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t } /* fallthrough */ - case SLJIT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_ORDERED_EQUAL: return cc0; case SLJIT_NOT_EQUAL: @@ -234,13 +232,14 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t } /* fallthrough */ - case SLJIT_NOT_EQUAL_F64: + case SLJIT_UNORDERED_OR_NOT_EQUAL: return (cc1 | cc2 | cc3); case SLJIT_LESS: return cc1; case SLJIT_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: return (cc0 | cc2 | cc3); case SLJIT_GREATER: @@ -254,7 +253,8 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t return (cc0 | cc1 | cc2); case SLJIT_SIG_LESS: - case SLJIT_LESS_F64: + case SLJIT_F_LESS: + case SLJIT_ORDERED_LESS: return cc1; case SLJIT_NOT_CARRY: @@ -263,7 +263,8 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t /* fallthrough */ case SLJIT_SIG_LESS_EQUAL: - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_LESS_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: return (cc0 | cc1); case SLJIT_CARRY: @@ -272,6 +273,7 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t /* fallthrough */ case SLJIT_SIG_GREATER: + case SLJIT_UNORDERED_OR_GREATER: /* Overflow is considered greater, see SLJIT_SUB. */ return cc2 | cc3; @@ -283,7 +285,7 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t return (cc2 | cc3); /* fallthrough */ - case SLJIT_UNORDERED_F64: + case SLJIT_UNORDERED: return cc3; case SLJIT_NOT_OVERFLOW: @@ -291,14 +293,29 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t return (cc0 | cc1); /* fallthrough */ - case SLJIT_ORDERED_F64: + case SLJIT_ORDERED: return (cc0 | cc1 | cc2); - case SLJIT_GREATER_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + return (cc1 | cc2); + + case SLJIT_F_GREATER: + case SLJIT_ORDERED_GREATER: return cc2; - case SLJIT_GREATER_EQUAL_F64: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: return (cc0 | cc2); + + case SLJIT_UNORDERED_OR_LESS_EQUAL: + return (cc0 | cc1 | cc3); + + case SLJIT_UNORDERED_OR_EQUAL: + return (cc0 | cc3); + + case SLJIT_UNORDERED_OR_LESS: + return (cc1 | cc3); } SLJIT_UNREACHABLE(); @@ -978,7 +995,7 @@ static sljit_s32 make_addr_bx(struct sljit_compiler *compiler, (cond) ? EVAL(i1, r, addr) : EVAL(i2, r, addr) /* May clobber tmp1. */ -static sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst, +static sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst_r, sljit_s32 src, sljit_sw srcw, sljit_s32 is_32bit) { @@ -986,21 +1003,36 @@ static sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst, sljit_ins ins; SLJIT_ASSERT(src & SLJIT_MEM); - if (have_ldisp() || !is_32bit) - FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1)); - else + + if (is_32bit && ((src & OFFS_REG_MASK) || is_u12(srcw) || !is_s20(srcw))) { FAIL_IF(make_addr_bx(compiler, &addr, src, srcw, tmp1)); + return push_inst(compiler, 0x58000000 /* l */ | R20A(dst_r) | R16A(addr.index) | R12A(addr.base) | (sljit_ins)addr.offset); + } - if (is_32bit) - ins = WHEN(is_u12(addr.offset), dst, l, ly, addr); - else - ins = lg(dst, addr.offset, addr.index, addr.base); + FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1)); - return push_inst(compiler, ins); + ins = is_32bit ? 0xe30000000058 /* ly */ : 0xe30000000004 /* lg */; + return push_inst(compiler, ins | R36A(dst_r) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset)); } /* May clobber tmp1. */ -static sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src, +static sljit_s32 load_unsigned_word(struct sljit_compiler *compiler, sljit_gpr dst_r, + sljit_s32 src, sljit_sw srcw, + sljit_s32 is_32bit) +{ + struct addr addr; + sljit_ins ins; + + SLJIT_ASSERT(src & SLJIT_MEM); + + FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1)); + + ins = is_32bit ? 0xe30000000016 /* llgf */ : 0xe30000000004 /* lg */; + return push_inst(compiler, ins | R36A(dst_r) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset)); +} + +/* May clobber tmp1. */ +static sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src_r, sljit_s32 dst, sljit_sw dstw, sljit_s32 is_32bit) { @@ -1008,17 +1040,16 @@ static sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src, sljit_ins ins; SLJIT_ASSERT(dst & SLJIT_MEM); - if (have_ldisp() || !is_32bit) - FAIL_IF(make_addr_bxy(compiler, &addr, dst, dstw, tmp1)); - else + + if (is_32bit && ((dst & OFFS_REG_MASK) || is_u12(dstw) || !is_s20(dstw))) { FAIL_IF(make_addr_bx(compiler, &addr, dst, dstw, tmp1)); + return push_inst(compiler, 0x50000000 /* st */ | R20A(src_r) | R16A(addr.index) | R12A(addr.base) | (sljit_ins)addr.offset); + } - if (is_32bit) - ins = WHEN(is_u12(addr.offset), src, st, sty, addr); - else - ins = stg(src, addr.offset, addr.index, addr.base); + FAIL_IF(make_addr_bxy(compiler, &addr, dst, dstw, tmp1)); - return push_inst(compiler, ins); + ins = is_32bit ? 0xe30000000050 /* sty */ : 0xe30000000024 /* stg */; + return push_inst(compiler, ins | R36A(src_r) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset)); } #undef WHEN @@ -1618,16 +1649,24 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) { /* TODO(mundaym): implement all */ switch (feature_type) { + case SLJIT_HAS_FPU: case SLJIT_HAS_CLZ: - return have_eimm() ? 1 : 0; /* FLOGR instruction */ + case SLJIT_HAS_ROT: + case SLJIT_HAS_PREFETCH: + return 1; + case SLJIT_HAS_CTZ: + return 2; case SLJIT_HAS_CMOV: return have_lscond1() ? 1 : 0; - case SLJIT_HAS_FPU: - return 1; } return 0; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type) +{ + return (type >= SLJIT_UNORDERED && type <= SLJIT_ORDERED_LESS_EQUAL); +} + /* --------------------------------------------------------------------- */ /* Entry, exit */ /* --------------------------------------------------------------------- */ @@ -1636,7 +1675,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_s32 word_arg_count = 0; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); sljit_s32 offset, i, tmp; CHECK_ERROR(); @@ -1648,8 +1687,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi offset = 2 * SSIZE_OF(sw); if (saveds + scratches >= SLJIT_NUMBER_OF_REGISTERS) { - FAIL_IF(push_inst(compiler, stmg(r6, r14, offset, r15))); /* save registers TODO(MGM): optimize */ - offset += 9 * SSIZE_OF(sw); + if (saved_arg_count == 0) { + FAIL_IF(push_inst(compiler, stmg(r6, r14, offset, r15))); + offset += 9 * SSIZE_OF(sw); + } else { + FAIL_IF(push_inst(compiler, stmg(r6, r13 - (sljit_gpr)saved_arg_count, offset, r15))); + offset += (8 - saved_arg_count) * SSIZE_OF(sw); + } } else { if (scratches == SLJIT_FIRST_SAVED_REG) { FAIL_IF(push_inst(compiler, stg(r6, offset, 0, r15))); @@ -1659,15 +1703,30 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi offset += (scratches - (SLJIT_FIRST_SAVED_REG - 1)) * SSIZE_OF(sw); } - if (saveds == 0) { - FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15))); - offset += SSIZE_OF(sw); - } else { - FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r14, offset, r15))); - offset += (saveds + 1) * SSIZE_OF(sw); + if (saved_arg_count == 0) { + if (saveds == 0) { + FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15))); + offset += SSIZE_OF(sw); + } else { + FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r14, offset, r15))); + offset += (saveds + 1) * SSIZE_OF(sw); + } + } else if (saveds > saved_arg_count) { + if (saveds == saved_arg_count + 1) { + FAIL_IF(push_inst(compiler, stg(r14 - (sljit_gpr)saveds, offset, 0, r15))); + offset += SSIZE_OF(sw); + } else { + FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r13 - (sljit_gpr)saved_arg_count, offset, r15))); + offset += (saveds - saved_arg_count) * SSIZE_OF(sw); + } } } + if (saved_arg_count > 0) { + FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15))); + offset += SSIZE_OF(sw); + } + tmp = SLJIT_FS0 - fsaveds; for (i = SLJIT_FS0; i > tmp; i--) { FAIL_IF(push_inst(compiler, 0x60000000 /* std */ | F20(i) | R12A(r15) | (sljit_ins)offset)); @@ -1684,15 +1743,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi FAIL_IF(push_inst(compiler, 0xe30000000071 /* lay */ | R36A(r15) | R28A(r15) | disp_s20(-local_size))); + if (options & SLJIT_ENTER_REG_ARG) + return SLJIT_SUCCESS; + arg_types >>= SLJIT_ARG_SHIFT; + saved_arg_count = 0; tmp = 0; while (arg_types > 0) { if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) { if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) { - FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S0 - tmp), gpr(SLJIT_R0 + word_arg_count)))); - tmp++; + FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S0 - saved_arg_count), gpr(SLJIT_R0 + tmp)))); + saved_arg_count++; } - word_arg_count++; + tmp++; } arg_types >>= SLJIT_ARG_SHIFT; @@ -1713,12 +1776,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp return SLJIT_SUCCESS; } -static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) +static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_gpr last_reg) { sljit_s32 offset, i, tmp; sljit_s32 local_size = compiler->local_size; sljit_s32 saveds = compiler->saveds; sljit_s32 scratches = compiler->scratches; + sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options); if (is_u12(local_size)) FAIL_IF(push_inst(compiler, 0x41000000 /* ly */ | R20A(r15) | R12A(r15) | (sljit_ins)local_size)); @@ -1727,8 +1791,13 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) offset = 2 * SSIZE_OF(sw); if (saveds + scratches >= SLJIT_NUMBER_OF_REGISTERS) { - FAIL_IF(push_inst(compiler, lmg(r6, r14, offset, r15))); /* save registers TODO(MGM): optimize */ - offset += 9 * SSIZE_OF(sw); + if (kept_saveds_count == 0) { + FAIL_IF(push_inst(compiler, lmg(r6, last_reg, offset, r15))); + offset += 9 * SSIZE_OF(sw); + } else { + FAIL_IF(push_inst(compiler, lmg(r6, r13 - (sljit_gpr)kept_saveds_count, offset, r15))); + offset += (8 - kept_saveds_count) * SSIZE_OF(sw); + } } else { if (scratches == SLJIT_FIRST_SAVED_REG) { FAIL_IF(push_inst(compiler, lg(r6, offset, 0, r15))); @@ -1738,15 +1807,35 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) offset += (scratches - (SLJIT_FIRST_SAVED_REG - 1)) * SSIZE_OF(sw); } - if (saveds == 0) { - FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15))); - offset += SSIZE_OF(sw); - } else { - FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, r14, offset, r15))); - offset += (saveds + 1) * SSIZE_OF(sw); + if (kept_saveds_count == 0) { + if (saveds == 0) { + if (last_reg == r14) + FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15))); + offset += SSIZE_OF(sw); + } else if (saveds == 1 && last_reg == r13) { + FAIL_IF(push_inst(compiler, lg(r13, offset, 0, r15))); + offset += 2 * SSIZE_OF(sw); + } else { + FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, last_reg, offset, r15))); + offset += (saveds + 1) * SSIZE_OF(sw); + } + } else if (saveds > kept_saveds_count) { + if (saveds == kept_saveds_count + 1) { + FAIL_IF(push_inst(compiler, lg(r14 - (sljit_gpr)saveds, offset, 0, r15))); + offset += SSIZE_OF(sw); + } else { + FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, r13 - (sljit_gpr)kept_saveds_count, offset, r15))); + offset += (saveds - kept_saveds_count) * SSIZE_OF(sw); + } } } + if (kept_saveds_count > 0) { + if (last_reg == r14) + FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15))); + offset += SSIZE_OF(sw); + } + tmp = SLJIT_FS0 - compiler->fsaveds; for (i = SLJIT_FS0; i > tmp; i--) { FAIL_IF(push_inst(compiler, 0x68000000 /* ld */ | F20(i) | R12A(r15) | (sljit_ins)offset)); @@ -1766,10 +1855,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler CHECK_ERROR(); CHECK(check_sljit_emit_return_void(compiler)); - FAIL_IF(emit_stack_frame_release(compiler)); + FAIL_IF(emit_stack_frame_release(compiler, r14)); return push_inst(compiler, br(r14)); /* return */ } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + if (src & SLJIT_MEM) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(load_word(compiler, tmp1, src, srcw, 0 /* 64-bit */)); + src = TMP_REG2; + srcw = 0; + } else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + FAIL_IF(push_inst(compiler, lgr(tmp1, gpr(src)))); + src = TMP_REG2; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, r13)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} + /* --------------------------------------------------------------------- */ /* Operators */ /* --------------------------------------------------------------------- */ @@ -1858,6 +1970,47 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile return push_inst(compiler, lgr(arg1, tmp0)); } +static sljit_s32 sljit_emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 op, sljit_gpr dst_r, sljit_gpr src_r) +{ + sljit_s32 is_ctz = (GET_OPCODE(op) == SLJIT_CTZ); + + if ((op & SLJIT_32) && src_r != tmp0) { + FAIL_IF(push_inst(compiler, 0xb9160000 /* llgfr */ | R4A(tmp0) | R0A(src_r))); + src_r = tmp0; + } + + if (is_ctz) { + FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? 0x1300 /* lcr */ : 0xb9030000 /* lcgr */) | R4A(tmp1) | R0A(src_r))); + + if (src_r == tmp0) + FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? 0x1400 /* nr */ : 0xb9800000 /* ngr */) | R4A(tmp0) | R0A(tmp1))); + else + FAIL_IF(push_inst(compiler, 0xb9e40000 /* ngrk */ | R12A(tmp1) | R4A(tmp0) | R0A(src_r))); + + src_r = tmp0; + } + + FAIL_IF(push_inst(compiler, 0xb9830000 /* flogr */ | R4A(tmp0) | R0A(src_r))); + + if (is_ctz) + FAIL_IF(push_inst(compiler, 0xec00000000d9 /* aghik */ | R36A(tmp1) | R32A(tmp0) | ((sljit_ins)(-64 & 0xffff) << 16))); + + if (op & SLJIT_32) { + if (!is_ctz && dst_r != tmp0) + return push_inst(compiler, 0xec00000000d9 /* aghik */ | R36A(dst_r) | R32A(tmp0) | ((sljit_ins)(-32 & 0xffff) << 16)); + + FAIL_IF(push_inst(compiler, 0xc20800000000 /* agfi */ | R36A(tmp0) | (sljit_u32)-32)); + } + + if (is_ctz) + FAIL_IF(push_inst(compiler, 0xec0000000057 /* rxsbg */ | R36A(tmp0) | R32A(tmp1) | ((sljit_ins)((op & SLJIT_32) ? 59 : 58) << 24) | (63 << 16) | ((sljit_ins)((op & SLJIT_32) ? 5 : 6) << 8))); + + if (dst_r == tmp0) + return SLJIT_SUCCESS; + + return push_inst(compiler, ((op & SLJIT_32) ? 0x1800 /* lr */ : 0xb9040000 /* lgr */) | R4A(dst_r) | R0A(tmp0)); +} + /* LEVAL will be defined later with different parameters as needed */ #define WHEN2(cond, i1, i2) (cond) ? LEVAL(i1) : LEVAL(i2) @@ -2091,23 +2244,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile dst_r = FAST_IS_REG(dst) ? gpr(REG_MASK & dst) : tmp0; src_r = FAST_IS_REG(src) ? gpr(REG_MASK & src) : tmp0; - if (src & SLJIT_MEM) - FAIL_IF(load_word(compiler, src_r, src, srcw, src & SLJIT_32)); compiler->status_flags_state = op & (VARIABLE_FLAG_MASK | SLJIT_SET_Z); /* TODO(mundaym): optimize loads and stores */ - switch (opcode | (op & SLJIT_32)) { + switch (opcode) { case SLJIT_NOT: - /* emulate ~x with x^-1 */ - FAIL_IF(push_load_imm_inst(compiler, tmp1, -1)); - if (src_r != dst_r) - FAIL_IF(push_inst(compiler, lgr(dst_r, src_r))); + if (src & SLJIT_MEM) + FAIL_IF(load_word(compiler, src_r, src, srcw, op & SLJIT_32)); - FAIL_IF(push_inst(compiler, xgr(dst_r, tmp1))); - break; - case SLJIT_NOT32: /* emulate ~x with x^-1 */ + if (!(op & SLJIT_32)) { + FAIL_IF(push_load_imm_inst(compiler, tmp1, -1)); + if (src_r != dst_r) + FAIL_IF(push_inst(compiler, lgr(dst_r, src_r))); + + FAIL_IF(push_inst(compiler, xgr(dst_r, tmp1))); + break; + } + if (have_eimm()) FAIL_IF(push_inst(compiler, xilf(dst_r, 0xffffffff))); else { @@ -2119,24 +2274,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile } break; case SLJIT_CLZ: - if (have_eimm()) { - FAIL_IF(push_inst(compiler, flogr(tmp0, src_r))); /* clobbers tmp1 */ - if (dst_r != tmp0) - FAIL_IF(push_inst(compiler, lgr(dst_r, tmp0))); - } else { - abort(); /* TODO(mundaym): no eimm (?) */ - } - break; - case SLJIT_CLZ32: - if (have_eimm()) { - FAIL_IF(push_inst(compiler, sllg(tmp1, src_r, 32, 0))); - FAIL_IF(push_inst(compiler, iilf(tmp1, 0xffffffff))); - FAIL_IF(push_inst(compiler, flogr(tmp0, tmp1))); /* clobbers tmp1 */ - if (dst_r != tmp0) - FAIL_IF(push_inst(compiler, lr(dst_r, tmp0))); - } else { - abort(); /* TODO(mundaym): no eimm (?) */ - } + case SLJIT_CTZ: + if (src & SLJIT_MEM) + FAIL_IF(load_unsigned_word(compiler, src_r, src, srcw, op & SLJIT_32)); + + FAIL_IF(sljit_emit_clz_ctz(compiler, op, dst_r, src_r)); break; default: SLJIT_UNREACHABLE(); @@ -2145,9 +2287,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile if ((op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_SET_Z | SLJIT_SET_OVERFLOW)) FAIL_IF(update_zero_overflow(compiler, op, dst_r)); - /* TODO(carenas): doesn't need FAIL_IF */ if (dst & SLJIT_MEM) - FAIL_IF(store_word(compiler, dst_r, dst, dstw, op & SLJIT_32)); + return store_word(compiler, dst_r, dst, dstw, op & SLJIT_32); return SLJIT_SUCCESS; } @@ -2166,11 +2307,6 @@ static SLJIT_INLINE int is_commutative(sljit_s32 op) return 0; } -static SLJIT_INLINE int is_shift(sljit_s32 op) { - sljit_s32 v = GET_OPCODE(op); - return (v == SLJIT_SHL || v == SLJIT_ASHR || v == SLJIT_LSHR) ? 1 : 0; -} - static const struct ins_forms add_forms = { 0x1a00, /* ar */ 0xb9080000, /* agr */ @@ -2604,33 +2740,41 @@ static sljit_s32 sljit_emit_shift(struct sljit_compiler *compiler, sljit_s32 op, sljit_ins ins; if (FAST_IS_REG(src1)) - src_r = gpr(src1 & REG_MASK); + src_r = gpr(src1); else FAIL_IF(emit_move(compiler, tmp0, src1, src1w)); - if (src2 & SLJIT_IMM) + if (!(src2 & SLJIT_IMM)) { + if (FAST_IS_REG(src2)) + base_r = gpr(src2); + else { + FAIL_IF(emit_move(compiler, tmp1, src2, src2w)); + base_r = tmp1; + } + + if ((op & SLJIT_32) && (type == SLJIT_MSHL || type == SLJIT_MLSHR || type == SLJIT_MASHR)) { + if (base_r != tmp1) { + FAIL_IF(push_inst(compiler, 0xec0000000055 /* risbg */ | R36A(tmp1) | R32A(base_r) | (59 << 24) | (1 << 23) | (63 << 16))); + base_r = tmp1; + } else + FAIL_IF(push_inst(compiler, 0xa5070000 /* nill */ | R20A(tmp1) | 0x1f)); + } + } else imm = (sljit_ins)(src2w & ((op & SLJIT_32) ? 0x1f : 0x3f)); - else if (FAST_IS_REG(src2)) - base_r = gpr(src2 & REG_MASK); - else { - FAIL_IF(emit_move(compiler, tmp1, src2, src2w)); - base_r = tmp1; - } if ((op & SLJIT_32) && dst_r == src_r) { - if (type == SLJIT_SHL) + if (type == SLJIT_SHL || type == SLJIT_MSHL) ins = 0x89000000 /* sll */; - else if (type == SLJIT_LSHR) + else if (type == SLJIT_LSHR || type == SLJIT_MLSHR) ins = 0x88000000 /* srl */; else ins = 0x8a000000 /* sra */; FAIL_IF(push_inst(compiler, ins | R20A(dst_r) | R12A(base_r) | imm)); - } - else { - if (type == SLJIT_SHL) + } else { + if (type == SLJIT_SHL || type == SLJIT_MSHL) ins = (op & SLJIT_32) ? 0xeb00000000df /* sllk */ : 0xeb000000000d /* sllg */; - else if (type == SLJIT_LSHR) + else if (type == SLJIT_LSHR || type == SLJIT_MLSHR) ins = (op & SLJIT_32) ? 0xeb00000000de /* srlk */ : 0xeb000000000c /* srlg */; else ins = (op & SLJIT_32) ? 0xeb00000000dc /* srak */ : 0xeb000000000a /* srag */; @@ -2644,6 +2788,47 @@ static sljit_s32 sljit_emit_shift(struct sljit_compiler *compiler, sljit_s32 op, return SLJIT_SUCCESS; } +static sljit_s32 sljit_emit_rotate(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0; + sljit_gpr src_r = tmp0; + sljit_gpr base_r = tmp0; + sljit_ins imm = 0; + sljit_ins ins; + + if (FAST_IS_REG(src1)) + src_r = gpr(src1); + else + FAIL_IF(emit_move(compiler, tmp0, src1, src1w)); + + if (!(src2 & SLJIT_IMM)) { + if (FAST_IS_REG(src2)) + base_r = gpr(src2); + else { + FAIL_IF(emit_move(compiler, tmp1, src2, src2w)); + base_r = tmp1; + } + } + + if (GET_OPCODE(op) == SLJIT_ROTR) { + if (!(src2 & SLJIT_IMM)) { + ins = (op & SLJIT_32) ? 0x1300 /* lcr */ : 0xb9030000 /* lcgr */; + FAIL_IF(push_inst(compiler, ins | R4A(tmp1) | R0A(base_r))); + base_r = tmp1; + } else + src2w = -src2w; + } + + if (src2 & SLJIT_IMM) + imm = (sljit_ins)(src2w & ((op & SLJIT_32) ? 0x1f : 0x3f)); + + ins = (op & SLJIT_32) ? 0xeb000000001d /* rll */ : 0xeb000000001c /* rllg */; + return push_inst(compiler, ins | R36A(dst_r) | R32A(src_r) | R28A(base_r) | (imm << 16)); +} + static const struct ins_forms addc_forms = { 0xb9980000, /* alcr */ 0xb9880000, /* alcgr */ @@ -2716,10 +2901,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile FAIL_IF(sljit_emit_bitwise(compiler, op, dst, src1, src1w, src2, src2w)); break; case SLJIT_SHL: + case SLJIT_MSHL: case SLJIT_LSHR: + case SLJIT_MLSHR: case SLJIT_ASHR: + case SLJIT_MASHR: FAIL_IF(sljit_emit_shift(compiler, op, dst, src1, src1w, src2, src2w)); break; + case SLJIT_ROTL: + case SLJIT_ROTR: + FAIL_IF(sljit_emit_rotate(compiler, op, dst, src1, src1w, src2, src2w)); + break; } if (dst & SLJIT_MEM) @@ -2734,18 +2926,130 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil CHECK_ERROR(); CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, (sljit_s32)tmp0, 0, src1, src1w, src2, src2w); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 is_right; + sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64; + sljit_gpr src_dst_r = gpr(src_dst); + sljit_gpr src1_r = tmp0; + sljit_gpr src2_r = tmp1; + sljit_ins ins; + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + + is_right = (GET_OPCODE(op) == SLJIT_LSHR || GET_OPCODE(op) == SLJIT_MLSHR); + + if (src_dst == src1) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_op2(compiler, (is_right ? SLJIT_ROTR : SLJIT_ROTL) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w); + } + + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if (src1 & SLJIT_MEM) + FAIL_IF(load_word(compiler, tmp0, src1, src1w, op & SLJIT_32)); + else if (src1 & SLJIT_IMM) + FAIL_IF(push_load_imm_inst(compiler, tmp0, src1w)); + else + src1_r = gpr(src1); + + if (src2 & SLJIT_IMM) { + src2w &= bit_length - 1; + + if (src2w == 0) + return SLJIT_SUCCESS; + } else if (!(src2 & SLJIT_MEM)) + src2_r = gpr(src2); + else + FAIL_IF(load_word(compiler, tmp1, src2, src2w, op & SLJIT_32)); + + if (src2 & SLJIT_IMM) { + if (op & SLJIT_32) { + ins = is_right ? 0x88000000 /* srl */ : 0x89000000 /* sll */; + FAIL_IF(push_inst(compiler, ins | R20A(src_dst_r) | (sljit_ins)src2w)); + } else { + ins = is_right ? 0xeb000000000c /* srlg */ : 0xeb000000000d /* sllg */; + FAIL_IF(push_inst(compiler, ins | R36A(src_dst_r) | R32A(src_dst_r) | ((sljit_ins)src2w << 16))); + } + + ins = 0xec0000000055 /* risbg */; + + if (is_right) { + src2w = bit_length - src2w; + ins |= ((sljit_ins)(64 - bit_length) << 24) | ((sljit_ins)(63 - src2w) << 16) | ((sljit_ins)src2w << 8); + } else + ins |= ((sljit_ins)(64 - src2w) << 24) | ((sljit_ins)63 << 16) | ((sljit_ins)src2w << 8); + + return push_inst(compiler, ins | R36A(src_dst_r) | R32A(src1_r)); + } + + if (op & SLJIT_32) { + if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR) { + if (src2_r != tmp1) { + FAIL_IF(push_inst(compiler, 0xec0000000055 /* risbg */ | R36A(tmp1) | R32A(src2_r) | (59 << 24) | (1 << 23) | (63 << 16))); + src2_r = tmp1; + } else + FAIL_IF(push_inst(compiler, 0xa5070000 /* nill */ | R20A(tmp1) | 0x1f)); + } + + ins = is_right ? 0x88000000 /* srl */ : 0x89000000 /* sll */; + FAIL_IF(push_inst(compiler, ins | R20A(src_dst_r) | R12A(src2_r))); + + if (src2_r != tmp1) { + FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | 0x1f)); + FAIL_IF(push_inst(compiler, 0x1700 /* xr */ | R4A(tmp1) | R0A(src2_r))); + } else + FAIL_IF(push_inst(compiler, 0xc00700000000 /* xilf */ | R36A(tmp1) | 0x1f)); + + if (src1_r == tmp0) { + ins = is_right ? 0x89000000 /* sll */ : 0x88000000 /* srl */; + FAIL_IF(push_inst(compiler, ins | R20A(tmp0) | R12A(tmp1) | 0x1)); + } else { + ins = is_right ? 0xeb00000000df /* sllk */ : 0xeb00000000de /* srlk */; + FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src1_r) | R28A(tmp1) | (0x1 << 16))); + } + + return push_inst(compiler, 0x1600 /* or */ | R4A(src_dst_r) | R0A(tmp0)); + } + + ins = is_right ? 0xeb000000000c /* srlg */ : 0xeb000000000d /* sllg */; + FAIL_IF(push_inst(compiler, ins | R36A(src_dst_r) | R32A(src_dst_r) | R28A(src2_r))); + + ins = is_right ? 0xeb000000000d /* sllg */ : 0xeb000000000c /* srlg */; + + if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) { + if (src2_r != tmp1) + FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | 0x3f)); + + FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src1_r) | (0x1 << 16))); + src1_r = tmp0; + + if (src2_r != tmp1) + FAIL_IF(push_inst(compiler, 0xb9820000 /* xgr */ | R4A(tmp1) | R0A(src2_r))); + else + FAIL_IF(push_inst(compiler, 0xc00700000000 /* xilf */ | R36A(tmp1) | 0x3f)); + } else + FAIL_IF(push_inst(compiler, 0xb9030000 /* lcgr */ | R4A(tmp1) | R0A(src2_r))); + + FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src1_r) | R28A(tmp1))); + return push_inst(compiler, 0xb9810000 /* ogr */ | R4A(src_dst_r) | R0A(tmp0)); +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src( struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { sljit_gpr src_r; + struct addr addr; CHECK_ERROR(); CHECK(check_sljit_emit_op_src(compiler, op, src, srcw)); @@ -2759,16 +3063,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src( return push_inst(compiler, br(src_r)); case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN: - /* TODO(carenas): implement? */ return SLJIT_SUCCESS; case SLJIT_PREFETCH_L1: case SLJIT_PREFETCH_L2: case SLJIT_PREFETCH_L3: case SLJIT_PREFETCH_ONCE: - /* TODO(carenas): implement */ - return SLJIT_SUCCESS; + FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1)); + return push_inst(compiler, 0xe31000000036 /* pfd */ | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset)); default: - /* TODO(carenas): probably should not success by default */ return SLJIT_SUCCESS; } @@ -3064,10 +3366,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler * ADJUST_LOCAL_OFFSET(dst, dstw); if (FAST_IS_REG(dst)) - return push_inst(compiler, lgr(gpr(dst), fast_link_r)); + return push_inst(compiler, lgr(gpr(dst), link_r)); /* memory */ - return store_word(compiler, fast_link_r, dst, dstw, 0); + return store_word(compiler, link_r, dst, dstw, 0); } /* --------------------------------------------------------------------- */ @@ -3107,7 +3409,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile /* emit jump instruction */ type &= 0xff; if (type >= SLJIT_FAST_CALL) - PTR_FAIL_IF(push_inst(compiler, brasl(type == SLJIT_FAST_CALL ? fast_link_r : link_r, 0))); + PTR_FAIL_IF(push_inst(compiler, brasl(link_r, 0))); else PTR_FAIL_IF(push_inst(compiler, brcl(mask, 0))); @@ -3117,19 +3419,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types) { + SLJIT_UNUSED_ARG(arg_types); CHECK_ERROR_PTR(); CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); if (type & SLJIT_CALL_RETURN) { - PTR_FAIL_IF(emit_stack_frame_release(compiler)); + PTR_FAIL_IF(emit_stack_frame_release(compiler, r14)); type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, type); } @@ -3151,7 +3450,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi /* emit jump instruction */ if (type >= SLJIT_FAST_CALL) - return push_inst(compiler, basr(type == SLJIT_FAST_CALL ? fast_link_r : link_r, src_r)); + return push_inst(compiler, basr(link_r, src_r)); return push_inst(compiler, br(src_r)); } @@ -3169,23 +3468,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi ADJUST_LOCAL_OFFSET(src, srcw); FAIL_IF(load_word(compiler, tmp1, src, srcw, 0 /* 64-bit */)); src = TMP_REG2; + srcw = 0; } if (type & SLJIT_CALL_RETURN) { - if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) { + if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { FAIL_IF(push_inst(compiler, lgr(tmp1, gpr(src)))); src = TMP_REG2; + srcw = 0; } - FAIL_IF(emit_stack_frame_release(compiler)); + FAIL_IF(emit_stack_frame_release(compiler, r14)); type = SLJIT_JUMP; } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_ijump(compiler, type, src, srcw); } @@ -3193,7 +3490,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co sljit_s32 dst, sljit_sw dstw, sljit_s32 type) { - sljit_u8 mask = get_cc(compiler, type & 0xff); + sljit_u8 mask = get_cc(compiler, type); CHECK_ERROR(); CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); @@ -3263,27 +3560,92 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil sljit_s32 dst_reg, sljit_s32 src, sljit_sw srcw) { - sljit_u8 mask = get_cc(compiler, type & 0xff); - sljit_gpr dst_r = gpr(dst_reg & ~SLJIT_32); - sljit_gpr src_r = FAST_IS_REG(src) ? gpr(src) : tmp0; + sljit_ins mask = get_cc(compiler, type & ~SLJIT_32); + sljit_gpr src_r; + sljit_ins ins; CHECK_ERROR(); CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + if (type & SLJIT_32) + srcw = (sljit_s32)srcw; + + if (have_lscond2() && (src & SLJIT_IMM) && is_s16(srcw)) { + ins = (type & SLJIT_32) ? 0xec0000000042 /* lochi */ : 0xec0000000046 /* locghi */; + return push_inst(compiler, ins | R36A(gpr(dst_reg)) | (mask << 32) | (sljit_ins)(srcw & 0xffff) << 16); + } + if (src & SLJIT_IMM) { - /* TODO(mundaym): fast path with lscond2 */ - FAIL_IF(push_load_imm_inst(compiler, src_r, srcw)); + FAIL_IF(push_load_imm_inst(compiler, tmp0, srcw)); + src_r = tmp0; + } else + src_r = gpr(src); + + if (have_lscond1()) { + ins = (type & SLJIT_32) ? 0xb9f20000 /* locr */ : 0xb9e20000 /* locgr */; + return push_inst(compiler, ins | (mask << 12) | R4A(gpr(dst_reg)) | R0A(src_r)); } - #define LEVAL(i) i(dst_r, src_r, mask) - if (have_lscond1()) - return push_inst(compiler, - WHEN2(dst_reg & SLJIT_32, locr, locgr)); + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw); +} - #undef LEVAL +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_ins ins, reg1, reg2, base, offs = 0; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if (!(reg & REG_PAIR_MASK)) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + + ADJUST_LOCAL_OFFSET(mem, memw); + + base = gpr(mem & REG_MASK); + reg1 = gpr(REG_PAIR_FIRST(reg)); + reg2 = gpr(REG_PAIR_SECOND(reg)); + + if (mem & OFFS_REG_MASK) { + memw &= 0x3; + offs = gpr(OFFS_REG(mem)); + + if (memw != 0) { + FAIL_IF(push_inst(compiler, 0xeb000000000d /* sllg */ | R36A(tmp1) | R32A(offs) | ((sljit_ins)memw << 16))); + offs = tmp1; + } else if (!(type & SLJIT_MEM_STORE) && (base == reg1 || base == reg2) && (offs == reg1 || offs == reg2)) { + FAIL_IF(push_inst(compiler, 0xb9f80000 | R12A(tmp1) | R4A(base) | R0A(offs))); + base = tmp1; + offs = 0; + } + + memw = 0; + } else if (memw < -0x80000 || memw > 0x7ffff - ((reg2 == reg1 + 1) ? 0 : SSIZE_OF(sw))) { + FAIL_IF(push_load_imm_inst(compiler, tmp1, memw)); + + if (base == 0) + base = tmp1; + else + offs = tmp1; + + memw = 0; + } + + if (offs == 0 && reg2 == (reg1 + 1)) { + ins = (type & SLJIT_MEM_STORE) ? 0xeb0000000024 /* stmg */ : 0xeb0000000004 /* lmg */; + return push_inst(compiler, ins | R36A(reg1) | R32A(reg2) | R28A(base) | disp_s20((sljit_s32)memw)); + } + + ins = ((type & SLJIT_MEM_STORE) ? 0xe30000000024 /* stg */ : 0xe30000000004 /* lg */) | R32A(offs) | R28A(base); + + if (!(type & SLJIT_MEM_STORE) && base == reg1) { + FAIL_IF(push_inst(compiler, ins | R36A(reg2) | disp_s20((sljit_s32)memw + SSIZE_OF(sw)))); + return push_inst(compiler, ins | R36A(reg1) | disp_s20((sljit_s32)memw)); + } - /* TODO(mundaym): implement */ - return SLJIT_ERR_UNSUPPORTED; + FAIL_IF(push_inst(compiler, ins | R36A(reg1) | disp_s20((sljit_s32)memw))); + return push_inst(compiler, ins | R36A(reg2) | disp_s20((sljit_s32)memw + SSIZE_OF(sw))); } /* --------------------------------------------------------------------- */ diff --git a/thirdparty/pcre2/src/sljit/sljitNativeSPARC_32.c b/thirdparty/pcre2/src/sljit/sljitNativeSPARC_32.c deleted file mode 100644 index 218992b355..0000000000 --- a/thirdparty/pcre2/src/sljit/sljitNativeSPARC_32.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Stack-less Just-In-Time compiler - * - * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. - * - * Redistribution and use in source and binary forms, with or without modification, are - * permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, this list - * of conditions and the following disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw imm) -{ - if (imm <= SIMM_MAX && imm >= SIMM_MIN) - return push_inst(compiler, OR | D(dst) | S1(0) | IMM(imm), DR(dst)); - - FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((imm >> 10) & 0x3fffff), DR(dst))); - return (imm & 0x3ff) ? push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (imm & 0x3ff), DR(dst)) : SLJIT_SUCCESS; -} - -#define ARG2(flags, src2) ((flags & SRC2_IMM) ? IMM(src2) : S2(src2)) - -static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_u32 flags, - sljit_s32 dst, sljit_s32 src1, sljit_sw src2) -{ - SLJIT_COMPILE_ASSERT(ICC_IS_SET == SET_FLAGS, icc_is_set_and_set_flags_must_be_the_same); - - switch (op) { - case SLJIT_MOV: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if (dst != src2) - return push_inst(compiler, OR | D(dst) | S1(0) | S2(src2), DR(dst)); - return SLJIT_SUCCESS; - - case SLJIT_MOV_U8: - case SLJIT_MOV_S8: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - if (op == SLJIT_MOV_U8) - return push_inst(compiler, AND | D(dst) | S1(src2) | IMM(0xff), DR(dst)); - FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(24), DR(dst))); - return push_inst(compiler, SRA | D(dst) | S1(dst) | IMM(24), DR(dst)); - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_MOV_U16: - case SLJIT_MOV_S16: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { - FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(16), DR(dst))); - return push_inst(compiler, (op == SLJIT_MOV_S16 ? SRA : SRL) | D(dst) | S1(dst) | IMM(16), DR(dst)); - } - SLJIT_ASSERT(dst == src2); - return SLJIT_SUCCESS; - - case SLJIT_NOT: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - return push_inst(compiler, XNOR | (flags & SET_FLAGS) | D(dst) | S1(0) | S2(src2), DRF(dst, flags)); - - case SLJIT_CLZ: - SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); - FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(src2) | S2(0), SET_FLAGS)); - FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2(src2), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, BICC | DA(0x1) | (7 & DISP_MASK), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(32), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(-1), DR(dst))); - - /* Loop. */ - FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(0), SET_FLAGS)); - FAIL_IF(push_inst(compiler, SLL | D(TMP_REG1) | S1(TMP_REG1) | IMM(1), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, BICC | DA(0xe) | ((sljit_ins)-2 & DISP_MASK), UNMOVABLE_INS)); - return push_inst(compiler, ADD | D(dst) | S1(dst) | IMM(1), UNMOVABLE_INS); - - case SLJIT_ADD: - compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD; - return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags)); - - case SLJIT_ADDC: - compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD; - return push_inst(compiler, ADDC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags)); - - case SLJIT_SUB: - compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB; - return push_inst(compiler, SUB | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags)); - - case SLJIT_SUBC: - compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB; - return push_inst(compiler, SUBC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags)); - - case SLJIT_MUL: - compiler->status_flags_state = 0; - FAIL_IF(push_inst(compiler, SMUL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); - if (!(flags & SET_FLAGS)) - return SLJIT_SUCCESS; - FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(dst) | IMM(31), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, RDY | D(TMP_LINK), DR(TMP_LINK))); - return push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(TMP_LINK), MOVABLE_INS | SET_FLAGS); - - case SLJIT_AND: - return push_inst(compiler, AND | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags)); - - case SLJIT_OR: - return push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags)); - - case SLJIT_XOR: - return push_inst(compiler, XOR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags)); - - case SLJIT_SHL: - FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); - return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); - - case SLJIT_LSHR: - FAIL_IF(push_inst(compiler, SRL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); - return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); - - case SLJIT_ASHR: - FAIL_IF(push_inst(compiler, SRA | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); - return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); - } - - SLJIT_UNREACHABLE(); - return SLJIT_SUCCESS; -} - -static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src) -{ - sljit_s32 reg_index = 8; - sljit_s32 word_reg_index = 8; - sljit_s32 float_arg_index = 1; - sljit_s32 double_arg_count = 0; - sljit_u32 float_offset = (16 + 6) * sizeof(sljit_sw); - sljit_s32 types = 0; - sljit_s32 reg = 0; - sljit_s32 move_to_tmp2 = 0; - - if (src) - reg = reg_map[*src & REG_MASK]; - - arg_types >>= SLJIT_ARG_SHIFT; - - while (arg_types) { - types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK); - - switch (arg_types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - float_arg_index++; - double_arg_count++; - if (reg_index == reg || reg_index + 1 == reg) - move_to_tmp2 = 1; - reg_index += 2; - break; - case SLJIT_ARG_TYPE_F32: - float_arg_index++; - if (reg_index == reg) - move_to_tmp2 = 1; - reg_index++; - break; - default: - if (reg_index != word_reg_index && reg_index == reg) - move_to_tmp2 = 1; - reg_index++; - word_reg_index++; - break; - } - - arg_types >>= SLJIT_ARG_SHIFT; - } - - if (move_to_tmp2) { - if (reg < 14) - FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2A(reg), DR(TMP_REG1))); - *src = TMP_REG1; - } - - arg_types = types; - - while (arg_types) { - switch (arg_types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - float_arg_index--; - if (float_arg_index == 4 && double_arg_count == 4) { - /* The address is not doubleword aligned, so two instructions are required to store the double. */ - FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM((16 + 7) * sizeof(sljit_sw)), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | (1 << 25) | S1(SLJIT_SP) | IMM((16 + 8) * sizeof(sljit_sw)), MOVABLE_INS)); - } - else - FAIL_IF(push_inst(compiler, STDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - float_offset -= sizeof(sljit_f64); - break; - case SLJIT_ARG_TYPE_F32: - float_arg_index--; - FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - float_offset -= sizeof(sljit_f64); - break; - default: - break; - } - - arg_types >>= SLJIT_ARG_SHIFT; - } - - float_offset = (16 + 6) * sizeof(sljit_sw); - - while (types) { - switch (types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - reg_index -= 2; - if (reg_index < 14) { - if ((reg_index & 0x1) != 0) { - FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index)); - if (reg_index < 8 + 6 - 1) - FAIL_IF(push_inst(compiler, LDUW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), reg_index + 1)); - } - else - FAIL_IF(push_inst(compiler, LDD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index)); - } - float_offset -= sizeof(sljit_f64); - break; - case SLJIT_ARG_TYPE_F32: - reg_index--; - if (reg_index < 8 + 6) - FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index)); - float_offset -= sizeof(sljit_f64); - break; - default: - reg_index--; - word_reg_index--; - - if (reg_index != word_reg_index) { - if (reg_index < 14) - FAIL_IF(push_inst(compiler, OR | DA(reg_index) | S1(0) | S2A(word_reg_index), reg_index)); - else - FAIL_IF(push_inst(compiler, STW | DA(word_reg_index) | S1(SLJIT_SP) | IMM(92), word_reg_index)); - } - break; - } - - types >>= SLJIT_ARG_SHIFT; - } - - return SLJIT_SUCCESS; -} - -static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value) -{ - FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((init_value >> 10) & 0x3fffff), DR(dst))); - return push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (init_value & 0x3ff), DR(dst)); -} - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) -{ - sljit_ins *inst = (sljit_ins *)addr; - SLJIT_UNUSED_ARG(executable_offset); - - SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 0); - SLJIT_ASSERT(((inst[0] & 0xc1c00000) == 0x01000000) && ((inst[1] & 0xc1f82000) == 0x80102000)); - inst[0] = (inst[0] & 0xffc00000) | ((new_target >> 10) & 0x3fffff); - inst[1] = (inst[1] & 0xfffffc00) | (new_target & 0x3ff); - SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 1); - inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset); - SLJIT_CACHE_FLUSH(inst, inst + 2); -} - -SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) -{ - sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset); -} diff --git a/thirdparty/pcre2/src/sljit/sljitNativeSPARC_common.c b/thirdparty/pcre2/src/sljit/sljitNativeSPARC_common.c deleted file mode 100644 index c8d19e16c6..0000000000 --- a/thirdparty/pcre2/src/sljit/sljitNativeSPARC_common.c +++ /dev/null @@ -1,1673 +0,0 @@ -/* - * Stack-less Just-In-Time compiler - * - * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. - * - * Redistribution and use in source and binary forms, with or without modification, are - * permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, this list - * of conditions and the following disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) -{ - return "SPARC" SLJIT_CPUINFO; -} - -/* Length of an instruction word - Both for sparc-32 and sparc-64 */ -typedef sljit_u32 sljit_ins; - -#if (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) - -static void sparc_cache_flush(sljit_ins *from, sljit_ins *to) -{ -#if defined(__SUNPRO_C) && __SUNPRO_C < 0x590 - __asm ( - /* if (from == to) return */ - "cmp %i0, %i1\n" - "be .leave\n" - "nop\n" - - /* loop until from >= to */ - ".mainloop:\n" - "flush %i0\n" - "add %i0, 8, %i0\n" - "cmp %i0, %i1\n" - "bcs .mainloop\n" - "nop\n" - - /* The comparison was done above. */ - "bne .leave\n" - /* nop is not necessary here, since the - sub operation has no side effect. */ - "sub %i0, 4, %i0\n" - "flush %i0\n" - ".leave:" - ); -#else - if (SLJIT_UNLIKELY(from == to)) - return; - - do { - __asm__ volatile ( - "flush %0\n" - : : "r"(from) - ); - /* Operates at least on doubleword. */ - from += 2; - } while (from < to); - - if (from == to) { - /* Flush the last word. */ - from --; - __asm__ volatile ( - "flush %0\n" - : : "r"(from) - ); - } -#endif -} - -#endif /* (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) */ - -/* TMP_REG2 is not used by getput_arg */ -#define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) -#define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) -#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4) -/* This register is modified by calls, which affects the instruction - in the delay slot if it is used as a source register. */ -#define TMP_LINK (SLJIT_NUMBER_OF_REGISTERS + 5) - -#define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1) -#define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) - -static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = { - 0, 8, 9, 10, 11, 23, 22, 21, 20, 19, 18, 17, 16, 29, 28, 27, 26, 25, 24, 14, 1, 12, 13, 15 -}; - -static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = { - 0, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 -}; - -/* --------------------------------------------------------------------- */ -/* Instrucion forms */ -/* --------------------------------------------------------------------- */ - -#define D(d) ((sljit_ins)reg_map[d] << 25) -#define FD(d) ((sljit_ins)freg_map[d] << 25) -#define FDN(d) (((sljit_ins)freg_map[d] | 0x1) << 25) -#define DA(d) ((sljit_ins)(d) << 25) -#define S1(s1) ((sljit_ins)reg_map[s1] << 14) -#define FS1(s1) ((sljit_ins)freg_map[s1] << 14) -#define S1A(s1) ((sljit_ins)(s1) << 14) -#define S2(s2) ((sljit_ins)reg_map[s2]) -#define FS2(s2) ((sljit_ins)freg_map[s2]) -#define FS2N(s2) ((sljit_ins)freg_map[s2] | 0x1) -#define S2A(s2) ((sljit_ins)(s2)) -#define IMM_ARG 0x2000 -#define DOP(op) ((sljit_ins)(op) << 5) -#define IMM(imm) (((sljit_ins)(imm) & 0x1fff) | IMM_ARG) - -#define DR(dr) (reg_map[dr]) -#define DRF(dr, flags) ((sljit_s32)(reg_map[dr] | ((flags) & SET_FLAGS))) -#define OPC1(opcode) ((sljit_ins)(opcode) << 30) -#define OPC2(opcode) ((sljit_ins)(opcode) << 22) -#define OPC3(opcode) ((sljit_ins)(opcode) << 19) -#define SET_FLAGS OPC3(0x10) - -#define ADD (OPC1(0x2) | OPC3(0x00)) -#define ADDC (OPC1(0x2) | OPC3(0x08)) -#define AND (OPC1(0x2) | OPC3(0x01)) -#define ANDN (OPC1(0x2) | OPC3(0x05)) -#define CALL (OPC1(0x1)) -#define FABSS (OPC1(0x2) | OPC3(0x34) | DOP(0x09)) -#define FADDD (OPC1(0x2) | OPC3(0x34) | DOP(0x42)) -#define FADDS (OPC1(0x2) | OPC3(0x34) | DOP(0x41)) -#define FCMPD (OPC1(0x2) | OPC3(0x35) | DOP(0x52)) -#define FCMPS (OPC1(0x2) | OPC3(0x35) | DOP(0x51)) -#define FDIVD (OPC1(0x2) | OPC3(0x34) | DOP(0x4e)) -#define FDIVS (OPC1(0x2) | OPC3(0x34) | DOP(0x4d)) -#define FDTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd2)) -#define FDTOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc6)) -#define FITOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc8)) -#define FITOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc4)) -#define FMOVS (OPC1(0x2) | OPC3(0x34) | DOP(0x01)) -#define FMULD (OPC1(0x2) | OPC3(0x34) | DOP(0x4a)) -#define FMULS (OPC1(0x2) | OPC3(0x34) | DOP(0x49)) -#define FNEGS (OPC1(0x2) | OPC3(0x34) | DOP(0x05)) -#define FSTOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc9)) -#define FSTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd1)) -#define FSUBD (OPC1(0x2) | OPC3(0x34) | DOP(0x46)) -#define FSUBS (OPC1(0x2) | OPC3(0x34) | DOP(0x45)) -#define JMPL (OPC1(0x2) | OPC3(0x38)) -#define LDD (OPC1(0x3) | OPC3(0x03)) -#define LDDF (OPC1(0x3) | OPC3(0x23)) -#define LDF (OPC1(0x3) | OPC3(0x20)) -#define LDUW (OPC1(0x3) | OPC3(0x00)) -#define NOP (OPC1(0x0) | OPC2(0x04)) -#define OR (OPC1(0x2) | OPC3(0x02)) -#define ORN (OPC1(0x2) | OPC3(0x06)) -#define RDY (OPC1(0x2) | OPC3(0x28) | S1A(0)) -#define RESTORE (OPC1(0x2) | OPC3(0x3d)) -#define SAVE (OPC1(0x2) | OPC3(0x3c)) -#define SETHI (OPC1(0x0) | OPC2(0x04)) -#define SLL (OPC1(0x2) | OPC3(0x25)) -#define SLLX (OPC1(0x2) | OPC3(0x25) | (1 << 12)) -#define SRA (OPC1(0x2) | OPC3(0x27)) -#define SRAX (OPC1(0x2) | OPC3(0x27) | (1 << 12)) -#define SRL (OPC1(0x2) | OPC3(0x26)) -#define SRLX (OPC1(0x2) | OPC3(0x26) | (1 << 12)) -#define STD (OPC1(0x3) | OPC3(0x07)) -#define STDF (OPC1(0x3) | OPC3(0x27)) -#define STF (OPC1(0x3) | OPC3(0x24)) -#define STW (OPC1(0x3) | OPC3(0x04)) -#define SUB (OPC1(0x2) | OPC3(0x04)) -#define SUBC (OPC1(0x2) | OPC3(0x0c)) -#define TA (OPC1(0x2) | OPC3(0x3a) | (8 << 25)) -#define WRY (OPC1(0x2) | OPC3(0x30) | DA(0)) -#define XOR (OPC1(0x2) | OPC3(0x03)) -#define XNOR (OPC1(0x2) | OPC3(0x07)) - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -#define MAX_DISP (0x1fffff) -#define MIN_DISP (-0x200000) -#define DISP_MASK ((sljit_ins)0x3fffff) - -#define BICC (OPC1(0x0) | OPC2(0x2)) -#define FBFCC (OPC1(0x0) | OPC2(0x6)) -#define SLL_W SLL -#define SDIV (OPC1(0x2) | OPC3(0x0f)) -#define SMUL (OPC1(0x2) | OPC3(0x0b)) -#define UDIV (OPC1(0x2) | OPC3(0x0e)) -#define UMUL (OPC1(0x2) | OPC3(0x0a)) -#else -#define SLL_W SLLX -#endif - -#define SIMM_MAX (0x0fff) -#define SIMM_MIN (-0x1000) - -/* dest_reg is the absolute name of the register - Useful for reordering instructions in the delay slot. */ -static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_s32 delay_slot) -{ - sljit_ins *ptr; - SLJIT_ASSERT((delay_slot & DST_INS_MASK) == UNMOVABLE_INS - || (delay_slot & DST_INS_MASK) == MOVABLE_INS - || (delay_slot & DST_INS_MASK) == ((ins >> 25) & 0x1f)); - ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); - FAIL_IF(!ptr); - *ptr = ins; - compiler->size++; - compiler->delay_slot = delay_slot; - return SLJIT_SUCCESS; -} - -static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset) -{ - sljit_sw diff; - sljit_uw target_addr; - sljit_ins *inst; - sljit_ins saved_inst; - - if (jump->flags & SLJIT_REWRITABLE_JUMP) - return code_ptr; - - if (jump->flags & JUMP_ADDR) - target_addr = jump->u.target; - else { - SLJIT_ASSERT(jump->flags & JUMP_LABEL); - target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset; - } - inst = (sljit_ins*)jump->addr; - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - if (jump->flags & IS_CALL) { - /* Call is always patchable on sparc 32. */ - jump->flags |= PATCH_CALL; - if (jump->flags & IS_MOVABLE) { - inst[0] = inst[-1]; - inst[-1] = CALL; - jump->addr -= sizeof(sljit_ins); - return inst; - } - inst[0] = CALL; - inst[1] = NOP; - return inst + 1; - } -#else - /* Both calls and BPr instructions shall not pass this point. */ -#error "Implementation required" -#endif - - if (jump->flags & IS_COND) - inst--; - - diff = ((sljit_sw)target_addr - (sljit_sw)(inst - 1) - executable_offset) >> 2; - - if (jump->flags & IS_MOVABLE) { - if (diff <= MAX_DISP && diff >= MIN_DISP) { - jump->flags |= PATCH_B; - inst--; - if (jump->flags & IS_COND) { - saved_inst = inst[0]; - inst[0] = inst[1] ^ (1 << 28); - inst[1] = saved_inst; - } else { - inst[1] = inst[0]; - inst[0] = BICC | DA(0x8); - } - jump->addr = (sljit_uw)inst; - return inst + 1; - } - } - - diff += SSIZE_OF(ins); - - if (diff <= MAX_DISP && diff >= MIN_DISP) { - jump->flags |= PATCH_B; - if (jump->flags & IS_COND) - inst[0] ^= (1 << 28); - else - inst[0] = BICC | DA(0x8); - inst[1] = NOP; - jump->addr = (sljit_uw)inst; - return inst + 1; - } - - return code_ptr; -} - -SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) -{ - struct sljit_memory_fragment *buf; - sljit_ins *code; - sljit_ins *code_ptr; - sljit_ins *buf_ptr; - sljit_ins *buf_end; - sljit_uw word_count; - sljit_uw next_addr; - sljit_sw executable_offset; - sljit_sw addr; - - struct sljit_label *label; - struct sljit_jump *jump; - struct sljit_const *const_; - struct sljit_put_label *put_label; - - CHECK_ERROR_PTR(); - CHECK_PTR(check_sljit_generate_code(compiler)); - reverse_buf(compiler); - - code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins), compiler->exec_allocator_data); - PTR_FAIL_WITH_EXEC_IF(code); - buf = compiler->buf; - - code_ptr = code; - word_count = 0; - next_addr = 0; - executable_offset = SLJIT_EXEC_OFFSET(code); - - label = compiler->labels; - jump = compiler->jumps; - const_ = compiler->consts; - put_label = compiler->put_labels; - - do { - buf_ptr = (sljit_ins*)buf->memory; - buf_end = buf_ptr + (buf->used_size >> 2); - do { - *code_ptr = *buf_ptr++; - if (next_addr == word_count) { - SLJIT_ASSERT(!label || label->size >= word_count); - SLJIT_ASSERT(!jump || jump->addr >= word_count); - SLJIT_ASSERT(!const_ || const_->addr >= word_count); - SLJIT_ASSERT(!put_label || put_label->addr >= word_count); - - /* These structures are ordered by their address. */ - if (label && label->size == word_count) { - /* Just recording the address. */ - label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); - label->size = (sljit_uw)(code_ptr - code); - label = label->next; - } - if (jump && jump->addr == word_count) { -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - jump->addr = (sljit_uw)(code_ptr - 3); -#else - jump->addr = (sljit_uw)(code_ptr - 6); -#endif - code_ptr = detect_jump_type(jump, code_ptr, code, executable_offset); - jump = jump->next; - } - if (const_ && const_->addr == word_count) { - /* Just recording the address. */ - const_->addr = (sljit_uw)code_ptr; - const_ = const_->next; - } - if (put_label && put_label->addr == word_count) { - SLJIT_ASSERT(put_label->label); - put_label->addr = (sljit_uw)code_ptr; - put_label = put_label->next; - } - next_addr = compute_next_addr(label, jump, const_, put_label); - } - code_ptr ++; - word_count ++; - } while (buf_ptr < buf_end); - - buf = buf->next; - } while (buf); - - if (label && label->size == word_count) { - label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); - label->size = (sljit_uw)(code_ptr - code); - label = label->next; - } - - SLJIT_ASSERT(!label); - SLJIT_ASSERT(!jump); - SLJIT_ASSERT(!const_); - SLJIT_ASSERT(!put_label); - SLJIT_ASSERT(code_ptr - code <= (sljit_s32)compiler->size); - - jump = compiler->jumps; - while (jump) { - do { - addr = (sljit_sw)((jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target); - buf_ptr = (sljit_ins *)jump->addr; - - if (jump->flags & PATCH_CALL) { - addr = (addr - (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2; - SLJIT_ASSERT(addr <= 0x1fffffff && addr >= -0x20000000); - buf_ptr[0] = CALL | ((sljit_ins)addr & 0x3fffffff); - break; - } - if (jump->flags & PATCH_B) { - addr = (addr - (sljit_sw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2; - SLJIT_ASSERT(addr <= MAX_DISP && addr >= MIN_DISP); - buf_ptr[0] = (buf_ptr[0] & ~DISP_MASK) | ((sljit_ins)addr & DISP_MASK); - break; - } - - /* Set the fields of immediate loads. */ -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - SLJIT_ASSERT(((buf_ptr[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr[1] & 0xc1f83fff) == 0x80102000)); - buf_ptr[0] |= (sljit_ins)(addr >> 10) & 0x3fffff; - buf_ptr[1] |= (sljit_ins)addr & 0x3ff; -#else -#error "Implementation required" -#endif - } while (0); - jump = jump->next; - } - - put_label = compiler->put_labels; - while (put_label) { - addr = (sljit_sw)put_label->label->addr; - buf_ptr = (sljit_ins *)put_label->addr; - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - SLJIT_ASSERT(((buf_ptr[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr[1] & 0xc1f83fff) == 0x80102000)); - buf_ptr[0] |= (addr >> 10) & 0x3fffff; - buf_ptr[1] |= addr & 0x3ff; -#else -#error "Implementation required" -#endif - put_label = put_label->next; - } - - compiler->error = SLJIT_ERR_COMPILED; - compiler->executable_offset = executable_offset; - compiler->executable_size = (sljit_uw)(code_ptr - code) * sizeof(sljit_ins); - - code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); - code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset); - - SLJIT_CACHE_FLUSH(code, code_ptr); - SLJIT_UPDATE_WX_FLAGS(code, code_ptr, 1); - return code; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) -{ - switch (feature_type) { - case SLJIT_HAS_FPU: -#ifdef SLJIT_IS_FPU_AVAILABLE - return SLJIT_IS_FPU_AVAILABLE; -#else - /* Available by default. */ - return 1; -#endif - - case SLJIT_HAS_ZERO_REGISTER: - return 1; - -#if (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64) - case SLJIT_HAS_CMOV: - return 1; -#endif - - default: - return 0; - } -} - -/* --------------------------------------------------------------------- */ -/* Entry, exit */ -/* --------------------------------------------------------------------- */ - -/* Creates an index in data_transfer_insts array. */ -#define LOAD_DATA 0x01 -#define WORD_DATA 0x00 -#define BYTE_DATA 0x02 -#define HALF_DATA 0x04 -#define INT_DATA 0x06 -#define SIGNED_DATA 0x08 -/* Separates integer and floating point registers */ -#define GPR_REG 0x0f -#define DOUBLE_DATA 0x10 -#define SINGLE_DATA 0x12 - -#define MEM_MASK 0x1f - -#define ARG_TEST 0x00020 -#define ALT_KEEP_CACHE 0x00040 -#define CUMULATIVE_OP 0x00080 -#define IMM_OP 0x00100 -#define MOVE_OP 0x00200 -#define SRC2_IMM 0x00400 - -#define REG_DEST 0x00800 -#define REG2_SOURCE 0x01000 -#define SLOW_SRC1 0x02000 -#define SLOW_SRC2 0x04000 -#define SLOW_DEST 0x08000 - -/* SET_FLAGS (0x10 << 19) also belong here! */ - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -#include "sljitNativeSPARC_32.c" -#else -#include "sljitNativeSPARC_64.c" -#endif - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, - sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, - sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) -{ - sljit_s32 reg_index, types, tmp; - sljit_u32 float_offset, args_offset; - sljit_s32 saved_arg_index, scratch_arg_index, float_arg_index; - - CHECK_ERROR(); - CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); - set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - - local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7; - compiler->local_size = local_size; - - if (local_size <= -SIMM_MIN) { - FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | IMM(-local_size), UNMOVABLE_INS)); - } - else { - FAIL_IF(load_immediate(compiler, TMP_REG1, -local_size)); - FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | S2(TMP_REG1), UNMOVABLE_INS)); - } - - arg_types >>= SLJIT_ARG_SHIFT; - - types = arg_types; - float_offset = 16 * sizeof(sljit_sw); - reg_index = 24; - - while (types && reg_index < 24 + 6) { - switch (types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - if (reg_index & 0x1) { - FAIL_IF(push_inst(compiler, STW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - if (reg_index >= 24 + 6 - 1) - break; - FAIL_IF(push_inst(compiler, STW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), MOVABLE_INS)); - } else - FAIL_IF(push_inst(compiler, STD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - - float_offset += sizeof(sljit_f64); - reg_index++; - break; - case SLJIT_ARG_TYPE_F32: - FAIL_IF(push_inst(compiler, STW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - float_offset += sizeof(sljit_f64); - break; - } - - reg_index++; - types >>= SLJIT_ARG_SHIFT; - } - - args_offset = (16 + 1 + 6) * sizeof(sljit_sw); - float_offset = 16 * sizeof(sljit_sw); - reg_index = 24; - saved_arg_index = 24; - scratch_arg_index = 8 - 1; - float_arg_index = 1; - - while (arg_types) { - switch (arg_types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - if (reg_index < 24 + 6 - 1) { - FAIL_IF(push_inst(compiler, LDDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - } else if (reg_index < 24 + 6) { - FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | (1 << 25) | S1A(30) | IMM(args_offset), MOVABLE_INS)); - } else { - FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1A(30) | IMM(args_offset), MOVABLE_INS)); - FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | (1 << 25) | S1A(30) | IMM(args_offset + sizeof(sljit_sw)), MOVABLE_INS)); - } - - float_arg_index++; - float_offset += sizeof(sljit_f64); - reg_index++; - break; - case SLJIT_ARG_TYPE_F32: - if (reg_index < 24 + 6) - FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS)); - else - FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1A(30) | IMM(args_offset), MOVABLE_INS)); - float_arg_index++; - float_offset += sizeof(sljit_f64); - break; - default: - scratch_arg_index++; - - if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) { - tmp = saved_arg_index++; - if (tmp == reg_index) - break; - } else - tmp = scratch_arg_index; - - if (reg_index < 24 + 6) - FAIL_IF(push_inst(compiler, OR | DA(tmp) | S1(0) | S2A(reg_index), tmp)); - else - FAIL_IF(push_inst(compiler, LDUW | DA(tmp) | S1A(30) | IMM(args_offset), tmp)); - break; - } - - reg_index++; - arg_types >>= SLJIT_ARG_SHIFT; - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, - sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, - sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) -{ - CHECK_ERROR(); - CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); - set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); - - compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7; - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_return_void(compiler)); - - FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS)); - return push_inst(compiler, RESTORE | D(SLJIT_R0) | S1(SLJIT_R0) | S2(0), UNMOVABLE_INS); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_return(compiler, op, src, srcw)); - - if (TYPE_CAST_NEEDED(op) || !FAST_IS_REG(src)) { - FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); - src = SLJIT_R0; - } - - FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS)); - return push_inst(compiler, RESTORE | D(SLJIT_R0) | S1(src) | S2(0), UNMOVABLE_INS); -} - -/* --------------------------------------------------------------------- */ -/* Operators */ -/* --------------------------------------------------------------------- */ - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) -#define ARCH_32_64(a, b) a -#else -#define ARCH_32_64(a, b) b -#endif - -static const sljit_ins data_transfer_insts[16 + 4] = { -/* u w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */), -/* u w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */), -/* u b s */ OPC1(3) | OPC3(0x05) /* stb */, -/* u b l */ OPC1(3) | OPC3(0x01) /* ldub */, -/* u h s */ OPC1(3) | OPC3(0x06) /* sth */, -/* u h l */ OPC1(3) | OPC3(0x02) /* lduh */, -/* u i s */ OPC1(3) | OPC3(0x04) /* stw */, -/* u i l */ OPC1(3) | OPC3(0x00) /* lduw */, - -/* s w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */), -/* s w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */), -/* s b s */ OPC1(3) | OPC3(0x05) /* stb */, -/* s b l */ OPC1(3) | OPC3(0x09) /* ldsb */, -/* s h s */ OPC1(3) | OPC3(0x06) /* sth */, -/* s h l */ OPC1(3) | OPC3(0x0a) /* ldsh */, -/* s i s */ OPC1(3) | OPC3(0x04) /* stw */, -/* s i l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x08) /* ldsw */), - -/* d s */ OPC1(3) | OPC3(0x27), -/* d l */ OPC1(3) | OPC3(0x23), -/* s s */ OPC1(3) | OPC3(0x24), -/* s l */ OPC1(3) | OPC3(0x20), -}; - -#undef ARCH_32_64 - -/* Can perform an operation using at most 1 instruction. */ -static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) -{ - SLJIT_ASSERT(arg & SLJIT_MEM); - - if ((!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN) - || ((arg & OFFS_REG_MASK) && (argw & 0x3) == 0)) { - /* Works for both absoulte and relative addresses (immediate case). */ - if (SLJIT_UNLIKELY(flags & ARG_TEST)) - return 1; - FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] - | ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)) - | S1(arg & REG_MASK) | ((arg & OFFS_REG_MASK) ? S2(OFFS_REG(arg)) : IMM(argw)), - ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS)); - return -1; - } - return 0; -} - -/* See getput_arg below. - Note: can_cache is called only for binary operators. Those - operators always uses word arguments without write back. */ -static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) -{ - SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); - - /* Simple operation except for updates. */ - if (arg & OFFS_REG_MASK) { - argw &= 0x3; - SLJIT_ASSERT(argw); - next_argw &= 0x3; - if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == next_argw) - return 1; - return 0; - } - - if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN)) - return 1; - return 0; -} - -/* Emit the necessary instructions. See can_cache above. */ -static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw) -{ - sljit_s32 base, arg2, delay_slot; - sljit_ins dest; - - SLJIT_ASSERT(arg & SLJIT_MEM); - if (!(next_arg & SLJIT_MEM)) { - next_arg = 0; - next_argw = 0; - } - - base = arg & REG_MASK; - if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) { - argw &= 0x3; - - /* Using the cache. */ - if (((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg) && (argw == compiler->cache_argw)) - arg2 = TMP_REG3; - else { - if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == (next_argw & 0x3)) { - compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK); - compiler->cache_argw = argw; - arg2 = TMP_REG3; - } - else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base && reg != OFFS_REG(arg)) - arg2 = reg; - else /* It must be a mov operation, so tmp1 must be free to use. */ - arg2 = TMP_REG1; - FAIL_IF(push_inst(compiler, SLL_W | D(arg2) | S1(OFFS_REG(arg)) | IMM_ARG | (sljit_ins)argw, DR(arg2))); - } - } - else { - /* Using the cache. */ - if ((compiler->cache_arg == SLJIT_MEM) && (argw - compiler->cache_argw) <= SIMM_MAX && (argw - compiler->cache_argw) >= SIMM_MIN) { - if (argw != compiler->cache_argw) { - FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | S1(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); - compiler->cache_argw = argw; - } - arg2 = TMP_REG3; - } else { - if ((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN) { - compiler->cache_arg = SLJIT_MEM; - compiler->cache_argw = argw; - arg2 = TMP_REG3; - } - else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base) - arg2 = reg; - else /* It must be a mov operation, so tmp1 must be free to use. */ - arg2 = TMP_REG1; - FAIL_IF(load_immediate(compiler, arg2, argw)); - } - } - - dest = ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg)); - delay_slot = ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS; - if (!base) - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(arg2) | IMM(0), delay_slot); - return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot); -} - -static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw) -{ - if (getput_arg_fast(compiler, flags, reg, arg, argw)) - return compiler->error; - compiler->cache_arg = 0; - compiler->cache_argw = 0; - return getput_arg(compiler, flags, reg, arg, argw, 0, 0); -} - -static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_u32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w) -{ - if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) - return compiler->error; - return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); -} - -static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_u32 flags, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 src1, sljit_sw src1w, - sljit_s32 src2, sljit_sw src2w) -{ - /* arg1 goes to TMP_REG1 or src reg - arg2 goes to TMP_REG2, imm or src reg - TMP_REG3 can be used for caching - result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ - sljit_s32 dst_r = TMP_REG2; - sljit_s32 src1_r; - sljit_sw src2_r = 0; - sljit_s32 sugg_src2_r = TMP_REG2; - - if (!(flags & ALT_KEEP_CACHE)) { - compiler->cache_arg = 0; - compiler->cache_argw = 0; - } - - if (dst != TMP_REG2) { - if (FAST_IS_REG(dst)) { - dst_r = dst; - flags |= REG_DEST; - if (flags & MOVE_OP) - sugg_src2_r = dst_r; - } - else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw)) - flags |= SLOW_DEST; - } - - if (flags & IMM_OP) { - if ((src2 & SLJIT_IMM) && src2w) { - if (src2w <= SIMM_MAX && src2w >= SIMM_MIN) { - flags |= SRC2_IMM; - src2_r = src2w; - } - } - if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) { - if (src1w <= SIMM_MAX && src1w >= SIMM_MIN) { - flags |= SRC2_IMM; - src2_r = src1w; - - /* And swap arguments. */ - src1 = src2; - src1w = src2w; - src2 = SLJIT_IMM; - /* src2w = src2_r unneeded. */ - } - } - } - - /* Source 1. */ - if (FAST_IS_REG(src1)) - src1_r = src1; - else if (src1 & SLJIT_IMM) { - if (src1w) { - FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); - src1_r = TMP_REG1; - } - else - src1_r = 0; - } - else { - if (getput_arg_fast(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w)) - FAIL_IF(compiler->error); - else - flags |= SLOW_SRC1; - src1_r = TMP_REG1; - } - - /* Source 2. */ - if (FAST_IS_REG(src2)) { - src2_r = src2; - flags |= REG2_SOURCE; - if ((flags & (REG_DEST | MOVE_OP)) == MOVE_OP) - dst_r = src2_r; - } - else if (src2 & SLJIT_IMM) { - if (!(flags & SRC2_IMM)) { - if (src2w) { - FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w)); - src2_r = sugg_src2_r; - } - else { - src2_r = 0; - if (flags & MOVE_OP) { - if (dst & SLJIT_MEM) - dst_r = 0; - else - op = SLJIT_MOV; - } - } - } - } - else { - if (getput_arg_fast(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w)) - FAIL_IF(compiler->error); - else - flags |= SLOW_SRC2; - src2_r = sugg_src2_r; - } - - if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { - SLJIT_ASSERT(src2_r == TMP_REG2); - if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { - FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w)); - FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); - } - else { - FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); - FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); - } - } - else if (flags & SLOW_SRC1) - FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); - else if (flags & SLOW_SRC2) - FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); - - FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); - - if (dst & SLJIT_MEM) { - if (!(flags & SLOW_DEST)) { - getput_arg_fast(compiler, flags, dst_r, dst, dstw); - return compiler->error; - } - return getput_arg(compiler, flags, dst_r, dst, dstw, 0, 0); - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_op0(compiler, op)); - - op = GET_OPCODE(op); - switch (op) { - case SLJIT_BREAKPOINT: - return push_inst(compiler, TA, UNMOVABLE_INS); - case SLJIT_NOP: - return push_inst(compiler, NOP, UNMOVABLE_INS); - case SLJIT_LMUL_UW: - case SLJIT_LMUL_SW: -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0))); - return push_inst(compiler, RDY | D(SLJIT_R1), DR(SLJIT_R1)); -#else -#error "Implementation required" -#endif - case SLJIT_DIVMOD_UW: - case SLJIT_DIVMOD_SW: - case SLJIT_DIV_UW: - case SLJIT_DIV_SW: - SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments); -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - if ((op | 0x2) == SLJIT_DIV_UW) - FAIL_IF(push_inst(compiler, WRY | S1(0), MOVABLE_INS)); - else { - FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(SLJIT_R0) | IMM(31), DR(TMP_REG1))); - FAIL_IF(push_inst(compiler, WRY | S1(TMP_REG1), MOVABLE_INS)); - } - if (op <= SLJIT_DIVMOD_SW) - FAIL_IF(push_inst(compiler, OR | D(TMP_REG2) | S1(0) | S2(SLJIT_R0), DR(TMP_REG2))); - FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0))); - if (op >= SLJIT_DIV_UW) - return SLJIT_SUCCESS; - FAIL_IF(push_inst(compiler, SMUL | D(SLJIT_R1) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R1))); - return push_inst(compiler, SUB | D(SLJIT_R1) | S1(TMP_REG2) | S2(SLJIT_R1), DR(SLJIT_R1)); -#else -#error "Implementation required" -#endif - case SLJIT_ENDBR: - case SLJIT_SKIP_FRAMES_BEFORE_RETURN: - return SLJIT_SUCCESS; - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 src, sljit_sw srcw) -{ - sljit_u32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0; - - CHECK_ERROR(); - CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw)); - ADJUST_LOCAL_OFFSET(dst, dstw); - ADJUST_LOCAL_OFFSET(src, srcw); - - op = GET_OPCODE(op); - switch (op) { - case SLJIT_MOV: -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - case SLJIT_MOV_U32: - case SLJIT_MOV_S32: - case SLJIT_MOV32: -#endif - case SLJIT_MOV_P: - return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, srcw); - - case SLJIT_MOV_U8: - return emit_op(compiler, SLJIT_MOV_U8, flags | BYTE_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw); - - case SLJIT_MOV_S8: - return emit_op(compiler, SLJIT_MOV_S8, flags | BYTE_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw); - - case SLJIT_MOV_U16: - return emit_op(compiler, SLJIT_MOV_U16, flags | HALF_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw); - - case SLJIT_MOV_S16: - return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw); - - case SLJIT_NOT: - case SLJIT_CLZ: - return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 src1, sljit_sw src1w, - sljit_s32 src2, sljit_sw src2w) -{ - sljit_u32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0; - - CHECK_ERROR(); - CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w)); - ADJUST_LOCAL_OFFSET(dst, dstw); - ADJUST_LOCAL_OFFSET(src1, src1w); - ADJUST_LOCAL_OFFSET(src2, src2w); - - op = GET_OPCODE(op); - switch (op) { - case SLJIT_ADD: - case SLJIT_ADDC: - case SLJIT_MUL: - case SLJIT_AND: - case SLJIT_OR: - case SLJIT_XOR: - return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); - - case SLJIT_SUB: - case SLJIT_SUBC: - return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); - - case SLJIT_SHL: - case SLJIT_LSHR: - case SLJIT_ASHR: -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - if (src2 & SLJIT_IMM) - src2w &= 0x1f; -#else - SLJIT_UNREACHABLE(); -#endif - return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 src1, sljit_sw src1w, - sljit_s32 src2, sljit_sw src2w) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 src, sljit_sw srcw) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_op_src(compiler, op, src, srcw)); - ADJUST_LOCAL_OFFSET(src, srcw); - - switch (op) { - case SLJIT_FAST_RETURN: - if (FAST_IS_REG(src)) - FAIL_IF(push_inst(compiler, OR | D(TMP_LINK) | S1(0) | S2(src), DR(TMP_LINK))); - else - FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_LINK, src, srcw)); - - FAIL_IF(push_inst(compiler, JMPL | D(0) | S1(TMP_LINK) | IMM(8), UNMOVABLE_INS)); - return push_inst(compiler, NOP, UNMOVABLE_INS); - case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN: - case SLJIT_PREFETCH_L1: - case SLJIT_PREFETCH_L2: - case SLJIT_PREFETCH_L3: - case SLJIT_PREFETCH_ONCE: - return SLJIT_SUCCESS; - } - - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) -{ - CHECK_REG_INDEX(check_sljit_get_register_index(reg)); - return reg_map[reg]; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) -{ - CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); - return freg_map[reg]; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, - void *instruction, sljit_u32 size) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_op_custom(compiler, instruction, size)); - - return push_inst(compiler, *(sljit_ins*)instruction, UNMOVABLE_INS); -} - -/* --------------------------------------------------------------------- */ -/* Floating point operators */ -/* --------------------------------------------------------------------- */ - -#define FLOAT_DATA(op) ((sljit_ins)DOUBLE_DATA | (((sljit_ins)(op) & SLJIT_32) >> 7)) -#define SELECT_FOP(op, single, double) ((op & SLJIT_32) ? single : double) -#define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw)) - -static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 src, sljit_sw srcw) -{ - if (src & SLJIT_MEM) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw)); - src = TMP_FREG1; - } - - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOI, FDTOI) | FD(TMP_FREG1) | FS2(src), MOVABLE_INS)); - - if (FAST_IS_REG(dst)) { - FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET)); - return emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET); - } - - /* Store the integer value from a VFP register. */ - return emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, dst, dstw, 0, 0); -} - -static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 src, sljit_sw srcw) -{ - sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; - - if (src & SLJIT_IMM) { -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) - srcw = (sljit_s32)srcw; -#endif - FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); - src = TMP_REG1; - srcw = 0; - } - - if (FAST_IS_REG(src)) { - FAIL_IF(emit_op_mem2(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET)); - src = SLJIT_MEM1(SLJIT_SP); - srcw = FLOAT_TMP_MEM_OFFSET; - } - - FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw)); - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FITOS, FITOD) | FD(dst_r) | FS2(TMP_FREG1), MOVABLE_INS)); - - if (dst & SLJIT_MEM) - return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0); - return SLJIT_SUCCESS; -} - -static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 src1, sljit_sw src1w, - sljit_s32 src2, sljit_sw src2w) -{ - if (src1 & SLJIT_MEM) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); - src1 = TMP_FREG1; - } - - if (src2 & SLJIT_MEM) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0)); - src2 = TMP_FREG2; - } - - return push_inst(compiler, SELECT_FOP(op, FCMPS, FCMPD) | FS1(src1) | FS2(src2), FCC_IS_SET | MOVABLE_INS); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 src, sljit_sw srcw) -{ - sljit_s32 dst_r; - - CHECK_ERROR(); - compiler->cache_arg = 0; - compiler->cache_argw = 0; - - SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error); - SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw); - - if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) - op ^= SLJIT_32; - - dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1; - - if (src & SLJIT_MEM) { - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw)); - src = dst_r; - } - - switch (GET_OPCODE(op)) { - case SLJIT_MOV_F64: - if (src != dst_r) { - if (dst_r != TMP_FREG1) { - FAIL_IF(push_inst(compiler, FMOVS | FD(dst_r) | FS2(src), MOVABLE_INS)); - if (!(op & SLJIT_32)) - FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS)); - } - else - dst_r = src; - } - break; - case SLJIT_NEG_F64: - FAIL_IF(push_inst(compiler, FNEGS | FD(dst_r) | FS2(src), MOVABLE_INS)); - if (dst_r != src && !(op & SLJIT_32)) - FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS)); - break; - case SLJIT_ABS_F64: - FAIL_IF(push_inst(compiler, FABSS | FD(dst_r) | FS2(src), MOVABLE_INS)); - if (dst_r != src && !(op & SLJIT_32)) - FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS)); - break; - case SLJIT_CONV_F64_FROM_F32: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | FD(dst_r) | FS2(src), MOVABLE_INS)); - op ^= SLJIT_32; - break; - } - - if (dst & SLJIT_MEM) - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0)); - return SLJIT_SUCCESS; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 src1, sljit_sw src1w, - sljit_s32 src2, sljit_sw src2w) -{ - sljit_s32 dst_r, flags = 0; - - CHECK_ERROR(); - CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w)); - ADJUST_LOCAL_OFFSET(dst, dstw); - ADJUST_LOCAL_OFFSET(src1, src1w); - ADJUST_LOCAL_OFFSET(src2, src2w); - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - - dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2; - - if (src1 & SLJIT_MEM) { - if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { - FAIL_IF(compiler->error); - src1 = TMP_FREG1; - } else - flags |= SLOW_SRC1; - } - - if (src2 & SLJIT_MEM) { - if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { - FAIL_IF(compiler->error); - src2 = TMP_FREG2; - } else - flags |= SLOW_SRC2; - } - - if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { - if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); - } - else { - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); - } - } - else if (flags & SLOW_SRC1) - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); - else if (flags & SLOW_SRC2) - FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); - - if (flags & SLOW_SRC1) - src1 = TMP_FREG1; - if (flags & SLOW_SRC2) - src2 = TMP_FREG2; - - switch (GET_OPCODE(op)) { - case SLJIT_ADD_F64: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); - break; - - case SLJIT_SUB_F64: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); - break; - - case SLJIT_MUL_F64: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); - break; - - case SLJIT_DIV_F64: - FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS)); - break; - } - - if (dst_r == TMP_FREG2) - FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); - - return SLJIT_SUCCESS; -} - -#undef FLOAT_DATA -#undef SELECT_FOP - -/* --------------------------------------------------------------------- */ -/* Other instructions */ -/* --------------------------------------------------------------------- */ - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); - ADJUST_LOCAL_OFFSET(dst, dstw); - - if (FAST_IS_REG(dst)) - return push_inst(compiler, OR | D(dst) | S1(0) | S2(TMP_LINK), UNMOVABLE_INS); - - /* Memory. */ - FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_LINK, dst, dstw)); - compiler->delay_slot = UNMOVABLE_INS; - return SLJIT_SUCCESS; -} - -/* --------------------------------------------------------------------- */ -/* Conditional instructions */ -/* --------------------------------------------------------------------- */ - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) -{ - struct sljit_label *label; - - CHECK_ERROR_PTR(); - CHECK_PTR(check_sljit_emit_label(compiler)); - - if (compiler->last_label && compiler->last_label->size == compiler->size) - return compiler->last_label; - - label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); - PTR_FAIL_IF(!label); - set_label(label, compiler); - compiler->delay_slot = UNMOVABLE_INS; - return label; -} - -static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type) -{ - switch (type) { - case SLJIT_EQUAL: - case SLJIT_NOT_EQUAL_F64: /* Unordered. */ - return DA(0x1); - - case SLJIT_NOT_EQUAL: - case SLJIT_EQUAL_F64: - return DA(0x9); - - case SLJIT_LESS: - case SLJIT_GREATER_F64: /* Unordered. */ - case SLJIT_CARRY: - return DA(0x5); - - case SLJIT_GREATER_EQUAL: - case SLJIT_LESS_EQUAL_F64: - case SLJIT_NOT_CARRY: - return DA(0xd); - - case SLJIT_GREATER: - case SLJIT_GREATER_EQUAL_F64: /* Unordered. */ - return DA(0xc); - - case SLJIT_LESS_EQUAL: - case SLJIT_LESS_F64: - return DA(0x4); - - case SLJIT_SIG_LESS: - return DA(0x3); - - case SLJIT_SIG_GREATER_EQUAL: - return DA(0xb); - - case SLJIT_SIG_GREATER: - return DA(0xa); - - case SLJIT_SIG_LESS_EQUAL: - return DA(0x2); - - case SLJIT_OVERFLOW: - if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB))) - return DA(0x9); - /* fallthrough */ - - case SLJIT_UNORDERED_F64: - return DA(0x7); - - case SLJIT_NOT_OVERFLOW: - if (!(compiler->status_flags_state & (SLJIT_CURRENT_FLAGS_ADD | SLJIT_CURRENT_FLAGS_SUB))) - return DA(0x1); - /* fallthrough */ - - case SLJIT_ORDERED_F64: - return DA(0xf); - - default: - SLJIT_UNREACHABLE(); - return DA(0x8); - } -} - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type) -{ - struct sljit_jump *jump; - - CHECK_ERROR_PTR(); - CHECK_PTR(check_sljit_emit_jump(compiler, type)); - - jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); - PTR_FAIL_IF(!jump); - set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); - type &= 0xff; - - if (type < SLJIT_EQUAL_F64) { - jump->flags |= IS_COND; - if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & ICC_IS_SET)) - jump->flags |= IS_MOVABLE; -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - PTR_FAIL_IF(push_inst(compiler, BICC | get_cc(compiler, type ^ 1) | 5, UNMOVABLE_INS)); -#else -#error "Implementation required" -#endif - } - else if (type < SLJIT_JUMP) { - jump->flags |= IS_COND; - if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & FCC_IS_SET)) - jump->flags |= IS_MOVABLE; -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - PTR_FAIL_IF(push_inst(compiler, FBFCC | get_cc(compiler, type ^ 1) | 5, UNMOVABLE_INS)); -#else -#error "Implementation required" -#endif - } - else { - if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) - jump->flags |= IS_MOVABLE; - if (type >= SLJIT_FAST_CALL) - jump->flags |= IS_CALL; - } - - PTR_FAIL_IF(emit_const(compiler, TMP_REG1, 0)); - PTR_FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(TMP_REG1) | IMM(0), UNMOVABLE_INS)); - jump->addr = compiler->size; - PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); - - return jump; -} - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, - sljit_s32 arg_types) -{ - CHECK_ERROR_PTR(); - CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); - - PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - - return sljit_emit_jump(compiler, type); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw) -{ - struct sljit_jump *jump = NULL; - sljit_s32 src_r; - - CHECK_ERROR(); - CHECK(check_sljit_emit_ijump(compiler, type, src, srcw)); - ADJUST_LOCAL_OFFSET(src, srcw); - - if (FAST_IS_REG(src)) - src_r = src; - else if (src & SLJIT_IMM) { - jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); - FAIL_IF(!jump); - set_jump(jump, compiler, JUMP_ADDR); - jump->u.target = (sljit_uw)srcw; - - if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) - jump->flags |= IS_MOVABLE; - if (type >= SLJIT_FAST_CALL) - jump->flags |= IS_CALL; - - FAIL_IF(emit_const(compiler, TMP_REG1, 0)); - src_r = TMP_REG1; - } - else { - FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw)); - src_r = TMP_REG1; - } - - FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(src_r) | IMM(0), UNMOVABLE_INS)); - if (jump) - jump->addr = compiler->size; - return push_inst(compiler, NOP, UNMOVABLE_INS); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type, - sljit_s32 arg_types, - sljit_s32 src, sljit_sw srcw) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); - - if (src & SLJIT_MEM) { - ADJUST_LOCAL_OFFSET(src, srcw); - FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw)); - src = TMP_REG1; - } - - FAIL_IF(call_with_args(compiler, arg_types, &src)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - - return sljit_emit_ijump(compiler, type, src, srcw); -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, - sljit_s32 dst, sljit_sw dstw, - sljit_s32 type) -{ - sljit_s32 reg; - sljit_u32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0; - - CHECK_ERROR(); - CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); - ADJUST_LOCAL_OFFSET(dst, dstw); - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - op = GET_OPCODE(op); - reg = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2; - - compiler->cache_arg = 0; - compiler->cache_argw = 0; - - if (op >= SLJIT_ADD && (dst & SLJIT_MEM)) - FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, dst, dstw, dst, dstw)); - - type &= 0xff; - if (type < SLJIT_EQUAL_F64) - FAIL_IF(push_inst(compiler, BICC | get_cc(compiler, type) | 3, UNMOVABLE_INS)); - else - FAIL_IF(push_inst(compiler, FBFCC | get_cc(compiler, type) | 3, UNMOVABLE_INS)); - - FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(1), UNMOVABLE_INS)); - FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(0), UNMOVABLE_INS)); - - if (op >= SLJIT_ADD) { - flags |= CUMULATIVE_OP | IMM_OP | ALT_KEEP_CACHE; - if (dst & SLJIT_MEM) - return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, TMP_REG2, 0); - return emit_op(compiler, op, flags, dst, 0, dst, 0, TMP_REG2, 0); - } - - if (!(dst & SLJIT_MEM)) - return SLJIT_SUCCESS; - - return emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw); -#else -#error "Implementation required" -#endif -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, - sljit_s32 dst_reg, - sljit_s32 src, sljit_sw srcw) -{ - CHECK_ERROR(); - CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); - -#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) - return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);; -#else -#error "Implementation required" -#endif -} - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value) -{ - struct sljit_const *const_; - sljit_s32 dst_r; - - CHECK_ERROR_PTR(); - CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value)); - ADJUST_LOCAL_OFFSET(dst, dstw); - - const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); - PTR_FAIL_IF(!const_); - set_const(const_, compiler); - - dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; - PTR_FAIL_IF(emit_const(compiler, dst_r, init_value)); - - if (dst & SLJIT_MEM) - PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); - return const_; -} - -SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) -{ - struct sljit_put_label *put_label; - sljit_s32 dst_r; - - CHECK_ERROR_PTR(); - CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw)); - ADJUST_LOCAL_OFFSET(dst, dstw); - - put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label)); - PTR_FAIL_IF(!put_label); - set_put_label(put_label, compiler, 0); - - dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; - PTR_FAIL_IF(emit_const(compiler, dst_r, 0)); - - if (dst & SLJIT_MEM) - PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); - return put_label; -} diff --git a/thirdparty/pcre2/src/sljit/sljitNativeX86_32.c b/thirdparty/pcre2/src/sljit/sljitNativeX86_32.c index b9a7b39789..08da03026d 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeX86_32.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeX86_32.c @@ -80,21 +80,28 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw if (b & SLJIT_MEM) { if (!(b & REG_MASK)) inst_size += sizeof(sljit_sw); - else if (immb != 0 && !(b & OFFS_REG_MASK)) { - /* Immediate operand. */ - if (immb <= 127 && immb >= -128) - inst_size += sizeof(sljit_s8); - else - inst_size += sizeof(sljit_sw); - } - else if (reg_map[b & REG_MASK] == 5) - inst_size += sizeof(sljit_s8); + else { + if (immb != 0 && !(b & OFFS_REG_MASK)) { + /* Immediate operand. */ + if (immb <= 127 && immb >= -128) + inst_size += sizeof(sljit_s8); + else + inst_size += sizeof(sljit_sw); + } + else if (reg_map[b & REG_MASK] == 5) { + /* Swap registers if possible. */ + if ((b & OFFS_REG_MASK) && (immb & 0x3) == 0 && reg_map[OFFS_REG(b)] != 5) + b = SLJIT_MEM | OFFS_REG(b) | TO_OFFS_REG(b & REG_MASK); + else + inst_size += sizeof(sljit_s8); + } - if ((b & REG_MASK) == SLJIT_SP && !(b & OFFS_REG_MASK)) - b |= TO_OFFS_REG(SLJIT_SP); + if (reg_map[b & REG_MASK] == 4 && !(b & OFFS_REG_MASK)) + b |= TO_OFFS_REG(SLJIT_SP); - if (b & OFFS_REG_MASK) - inst_size += 1; /* SIB byte. */ + if (b & OFFS_REG_MASK) + inst_size += 1; /* SIB byte. */ + } } /* Calculate size of a. */ @@ -107,9 +114,9 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw inst_size += 4; } else if (flags & EX86_SHIFT_INS) { - imma &= 0x1f; + SLJIT_ASSERT(imma <= 0x1f); if (imma != 1) { - inst_size ++; + inst_size++; flags |= EX86_BYTE_ARG; } } else if (flags & EX86_BYTE_ARG) @@ -165,7 +172,7 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw } else if (b & REG_MASK) { reg_map_b = reg_map[b & REG_MASK]; - if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP) || reg_map_b == 5) { + if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) { if (immb != 0 || reg_map_b == 5) { if (immb <= 127 && immb >= -128) *buf_ptr |= 0x40; @@ -190,8 +197,14 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw } } else { + if (reg_map_b == 5) + *buf_ptr |= 0x40; + *buf_ptr++ |= 0x04; *buf_ptr++ = U8(reg_map_b | (reg_map[OFFS_REG(b)] << 3) | (immb << 6)); + + if (reg_map_b == 5) + *buf_ptr++ = 0; } } else { @@ -243,19 +256,16 @@ static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ return code_ptr; } -#define ENTER_R2_USED 0x00001 -#define ENTER_R2_TO_S 0x00002 -#define ENTER_R2_TO_R0 0x00004 -#define ENTER_R1_TO_S 0x00008 -#define ENTER_TMP_TO_R4 0x00010 -#define ENTER_TMP_TO_S 0x00020 +#define ENTER_TMP_TO_R4 0x00001 +#define ENTER_TMP_TO_S 0x00002 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { sljit_s32 word_arg_count, saved_arg_count, float_arg_count; - sljit_s32 size, locals_offset, args_size, types, status; + sljit_s32 size, args_size, types, status; + sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(options); sljit_u8 *inst; #ifdef _WIN32 sljit_s32 r2_offset = -1; @@ -271,108 +281,97 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi SLJIT_COMPILE_ASSERT(SLJIT_FR0 == 1, float_register_index_start); arg_types >>= SLJIT_ARG_SHIFT; - types = arg_types; word_arg_count = 0; - saved_arg_count = 0; - float_arg_count = 0; - args_size = SSIZE_OF(sw); status = 0; - while (types) { - switch (types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - float_arg_count++; - FAIL_IF(emit_sse2_load(compiler, 0, float_arg_count, SLJIT_MEM1(SLJIT_SP), args_size)); - args_size += SSIZE_OF(f64); - break; - case SLJIT_ARG_TYPE_F32: - float_arg_count++; - FAIL_IF(emit_sse2_load(compiler, 1, float_arg_count, SLJIT_MEM1(SLJIT_SP), args_size)); - args_size += SSIZE_OF(f32); - break; - default: - word_arg_count++; - if (!(types & SLJIT_ARG_TYPE_SCRATCH_REG)) { - saved_arg_count++; - if (saved_arg_count == 4) - status |= ENTER_TMP_TO_S; - } else { - if (word_arg_count == 4) + if (options & SLJIT_ENTER_REG_ARG) { + args_size = 3 * SSIZE_OF(sw); + + while (arg_types) { + if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) { + word_arg_count++; + if (word_arg_count >= 4) status |= ENTER_TMP_TO_R4; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (word_arg_count == 3) - status |= ENTER_R2_USED; -#endif } -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (word_arg_count <= 2 && !(options & SLJIT_ENTER_CDECL)) - break; -#endif - - args_size += SSIZE_OF(sw); - break; + arg_types >>= SLJIT_ARG_SHIFT; } - types >>= SLJIT_ARG_SHIFT; - } - args_size -= SSIZE_OF(sw); - compiler->args_size = args_size; + compiler->args_size = 0; + } else { + types = arg_types; + saved_arg_count = 0; + float_arg_count = 0; + args_size = SSIZE_OF(sw); + while (types) { + switch (types & SLJIT_ARG_MASK) { + case SLJIT_ARG_TYPE_F64: + float_arg_count++; + FAIL_IF(emit_sse2_load(compiler, 0, float_arg_count, SLJIT_MEM1(SLJIT_SP), args_size)); + args_size += SSIZE_OF(f64); + break; + case SLJIT_ARG_TYPE_F32: + float_arg_count++; + FAIL_IF(emit_sse2_load(compiler, 1, float_arg_count, SLJIT_MEM1(SLJIT_SP), args_size)); + args_size += SSIZE_OF(f32); + break; + default: + word_arg_count++; - /* [esp+0] for saving temporaries and function calls. */ - locals_offset = 2 * SSIZE_OF(sw); + if (!(types & SLJIT_ARG_TYPE_SCRATCH_REG)) + saved_arg_count++; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if ((options & SLJIT_ENTER_CDECL) && scratches >= 3) - locals_offset = 4 * SSIZE_OF(sw); -#else - if (scratches >= 3) - locals_offset = 4 * SSIZE_OF(sw); -#endif + if (word_arg_count == 4) { + if (types & SLJIT_ARG_TYPE_SCRATCH_REG) { + status |= ENTER_TMP_TO_R4; + arg_types &= ~(SLJIT_ARG_FULL_MASK << 3 * SLJIT_ARG_SHIFT); + } else if (saved_arg_count == 4) { + status |= ENTER_TMP_TO_S; + arg_types &= ~(SLJIT_ARG_FULL_MASK << 3 * SLJIT_ARG_SHIFT); + } + } - compiler->scratches_offset = locals_offset; + args_size += SSIZE_OF(sw); + break; + } + types >>= SLJIT_ARG_SHIFT; + } - if (scratches > 3) - locals_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * SSIZE_OF(sw); + args_size -= SSIZE_OF(sw); + compiler->args_size = args_size; + } - if (saveds > 3) - locals_offset += (saveds - 3) * SSIZE_OF(sw); + size = (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3) - kept_saveds_count; + if (!(options & SLJIT_ENTER_REG_ARG)) + size++; - compiler->locals_offset = locals_offset; + if (size != 0) { + inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(size + 1)); + FAIL_IF(!inst); - size = 1 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3); - inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(size + 1)); - FAIL_IF(!inst); + INC_SIZE((sljit_uw)size); - INC_SIZE((sljit_uw)size); - PUSH_REG(reg_map[TMP_REG1]); - if (saveds > 2 || scratches > 9) - PUSH_REG(reg_map[SLJIT_S2]); - if (saveds > 1 || scratches > 10) - PUSH_REG(reg_map[SLJIT_S1]); - if (saveds > 0 || scratches > 11) - PUSH_REG(reg_map[SLJIT_S0]); + if (!(options & SLJIT_ENTER_REG_ARG)) + PUSH_REG(reg_map[TMP_REG1]); - size *= SSIZE_OF(sw); + if ((saveds > 2 && kept_saveds_count <= 2) || scratches > 9) + PUSH_REG(reg_map[SLJIT_S2]); + if ((saveds > 1 && kept_saveds_count <= 1) || scratches > 10) + PUSH_REG(reg_map[SLJIT_S1]); + if ((saveds > 0 && kept_saveds_count == 0) || scratches > 11) + PUSH_REG(reg_map[SLJIT_S0]); + + size *= SSIZE_OF(sw); + } if (status & (ENTER_TMP_TO_R4 | ENTER_TMP_TO_S)) EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), args_size + size); size += SSIZE_OF(sw); -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!(options & SLJIT_ENTER_CDECL)) - size += args_size; -#endif - - local_size = ((locals_offset + local_size + size + 0xf) & ~0xf) - size; + local_size = ((SLJIT_LOCALS_OFFSET_BASE + local_size + size + 0xf) & ~0xf) - size; compiler->local_size = local_size; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!(options & SLJIT_ENTER_CDECL)) - size -= args_size; -#endif - word_arg_count = 0; saved_arg_count = 0; args_size = size; @@ -386,64 +385,27 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi break; default: word_arg_count++; + SLJIT_ASSERT(word_arg_count <= 3 || (word_arg_count == 4 && !(status & (ENTER_TMP_TO_R4 | ENTER_TMP_TO_S)))); -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!(options & SLJIT_ENTER_CDECL) && word_arg_count <= 2) { - if (word_arg_count == 1) { - if (status & ENTER_R2_USED) { - EMIT_MOV(compiler, (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) ? SLJIT_R0 : SLJIT_S0, 0, SLJIT_R2, 0); - } else if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) { - status |= ENTER_R2_TO_S; - saved_arg_count++; - } else - status |= ENTER_R2_TO_R0; - } else if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) { - status |= ENTER_R1_TO_S; - saved_arg_count++; - } - break; - } -#endif if (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) { - SLJIT_ASSERT(word_arg_count <= 3 || (status & ENTER_TMP_TO_R4)); - - if (word_arg_count <= 3) { #ifdef _WIN32 - if (word_arg_count == 3 && local_size > 4 * 4096) - r2_offset = local_size + args_size; - else + if (word_arg_count == 3 && local_size > 4 * 4096) + r2_offset = local_size + args_size; + else #endif - EMIT_MOV(compiler, word_arg_count, 0, SLJIT_MEM1(SLJIT_SP), args_size); - } - } else { - SLJIT_ASSERT(saved_arg_count <= 3 || (status & ENTER_TMP_TO_S)); + EMIT_MOV(compiler, word_arg_count, 0, SLJIT_MEM1(SLJIT_SP), args_size); - if (saved_arg_count <= 3) - EMIT_MOV(compiler, SLJIT_S0 - saved_arg_count, 0, SLJIT_MEM1(SLJIT_SP), args_size); + } else { + EMIT_MOV(compiler, SLJIT_S0 - saved_arg_count, 0, SLJIT_MEM1(SLJIT_SP), args_size); saved_arg_count++; } + args_size += SSIZE_OF(sw); break; } arg_types >>= SLJIT_ARG_SHIFT; } -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!(options & SLJIT_ENTER_CDECL)) { - if (status & ENTER_R2_TO_R0) - EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_R2, 0); - - saved_arg_count = 0; - if (status & ENTER_R2_TO_S) { - EMIT_MOV(compiler, SLJIT_S0, 0, SLJIT_R2, 0); - saved_arg_count++; - } - - if (status & ENTER_R1_TO_S) - EMIT_MOV(compiler, SLJIT_S0 - saved_arg_count, 0, SLJIT_R1, 0); - } -#endif - SLJIT_ASSERT(SLJIT_LOCALS_OFFSET > 0); #ifdef _WIN32 @@ -459,6 +421,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi BINARY_IMM32(OR, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3); } else { + if (options & SLJIT_ENTER_REG_ARG) { + SLJIT_ASSERT(r2_offset == -1); + + inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(1 + 1)); + FAIL_IF(!inst); + INC_SIZE(1); + PUSH_REG(reg_map[SLJIT_R2]); + + local_size -= SSIZE_OF(sw); + r2_offset = local_size; + } + EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_IMM, local_size >> 12); BINARY_IMM32(OR, 0, SLJIT_MEM1(SLJIT_SP), -4096); @@ -490,8 +464,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi #endif /* _WIN32 */ + size = SLJIT_LOCALS_OFFSET_BASE - SSIZE_OF(sw); + kept_saveds_count = SLJIT_R3 - kept_saveds_count; + + while (saved_arg_count > 3) { + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), size, kept_saveds_count, 0); + kept_saveds_count++; + size -= SSIZE_OF(sw); + saved_arg_count--; + } + if (status & (ENTER_TMP_TO_R4 | ENTER_TMP_TO_S)) { - size = (status & ENTER_TMP_TO_R4) ? compiler->scratches_offset : compiler->locals_offset - SSIZE_OF(sw); + if (status & ENTER_TMP_TO_R4) + size = 2 * SSIZE_OF(sw); + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), size, TMP_REG1, 0); } @@ -502,10 +488,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds, sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) { - sljit_s32 args_size, locals_offset; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - sljit_s32 word_arg_count = 0; -#endif + sljit_s32 args_size; CHECK_ERROR(); CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); @@ -513,87 +496,88 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp arg_types >>= SLJIT_ARG_SHIFT; args_size = 0; - while (arg_types) { - switch (arg_types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - args_size += SSIZE_OF(f64); - break; - case SLJIT_ARG_TYPE_F32: - args_size += SSIZE_OF(f32); - break; - default: -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (word_arg_count >= 2) + + if (!(options & SLJIT_ENTER_REG_ARG)) { + while (arg_types) { + switch (arg_types & SLJIT_ARG_MASK) { + case SLJIT_ARG_TYPE_F64: + args_size += SSIZE_OF(f64); + break; + case SLJIT_ARG_TYPE_F32: + args_size += SSIZE_OF(f32); + break; + default: args_size += SSIZE_OF(sw); - word_arg_count++; -#else - args_size += SSIZE_OF(sw); -#endif - break; + break; + } + arg_types >>= SLJIT_ARG_SHIFT; } - arg_types >>= SLJIT_ARG_SHIFT; } compiler->args_size = args_size; - /* [esp+0] for saving temporaries and function calls. */ - locals_offset = 2 * SSIZE_OF(sw); + /* [esp+0] for saving temporaries and for function calls. */ -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if ((options & SLJIT_ENTER_CDECL) && scratches >= 3) - locals_offset = 4 * SSIZE_OF(sw); -#else - if (scratches >= 3) - locals_offset = 4 * SSIZE_OF(sw); -#endif + saveds = (1 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3) - SLJIT_KEPT_SAVEDS_COUNT(options)) * SSIZE_OF(sw); - compiler->scratches_offset = locals_offset; + /* Saving ebp. */ + if (!(options & SLJIT_ENTER_REG_ARG)) + saveds += SSIZE_OF(sw); - if (scratches > 3) - locals_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * SSIZE_OF(sw); - - if (saveds > 3) - locals_offset += (saveds - 3) * SSIZE_OF(sw); - - compiler->locals_offset = locals_offset; - - saveds = (2 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3)) * SSIZE_OF(sw); - -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!(options & SLJIT_ENTER_CDECL)) - saveds += args_size; -#endif - - compiler->local_size = ((locals_offset + local_size + saveds + 0xf) & ~0xf) - saveds; + compiler->local_size = ((SLJIT_LOCALS_OFFSET_BASE + local_size + saveds + 0xf) & ~0xf) - saveds; return SLJIT_SUCCESS; } -static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) +static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to) { + sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options); + sljit_s32 local_size, saveds; sljit_uw size; sljit_u8 *inst; - size = (sljit_uw)(1 + (compiler->scratches > 9 ? (compiler->scratches - 9) : 0) + - (compiler->saveds <= 3 ? compiler->saveds : 3)); + size = (sljit_uw)((compiler->scratches > 9 ? (compiler->scratches - 9) : 0) + + (compiler->saveds <= 3 ? compiler->saveds : 3) - kept_saveds_count); + + local_size = compiler->local_size; + + if (!(compiler->options & SLJIT_ENTER_REG_ARG)) + size++; + else if (is_return_to && size == 0) { + local_size += SSIZE_OF(sw); + is_return_to = 0; + } + + if (local_size > 0) + BINARY_IMM32(ADD, local_size, SLJIT_SP, 0); + + if (size == 0) + return SLJIT_SUCCESS; + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); FAIL_IF(!inst); INC_SIZE(size); - if (compiler->saveds > 0 || compiler->scratches > 11) + saveds = compiler->saveds; + + if ((saveds > 0 && kept_saveds_count == 0) || compiler->scratches > 11) POP_REG(reg_map[SLJIT_S0]); - if (compiler->saveds > 1 || compiler->scratches > 10) + if ((saveds > 1 && kept_saveds_count <= 1) || compiler->scratches > 10) POP_REG(reg_map[SLJIT_S1]); - if (compiler->saveds > 2 || compiler->scratches > 9) + if ((saveds > 2 && kept_saveds_count <= 2) || compiler->scratches > 9) POP_REG(reg_map[SLJIT_S2]); - POP_REG(reg_map[TMP_REG1]); + + if (!(compiler->options & SLJIT_ENTER_REG_ARG)) + POP_REG(reg_map[TMP_REG1]); + + if (is_return_to) + BINARY_IMM32(ADD, sizeof(sljit_sw), SLJIT_SP, 0); return SLJIT_SUCCESS; } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler) { - sljit_uw size; sljit_u8 *inst; CHECK_ERROR(); @@ -602,143 +586,45 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler SLJIT_ASSERT(compiler->args_size >= 0); SLJIT_ASSERT(compiler->local_size > 0); - BINARY_IMM32(ADD, compiler->local_size, SLJIT_SP, 0); - - FAIL_IF(emit_stack_frame_release(compiler)); + FAIL_IF(emit_stack_frame_release(compiler, 0)); - size = 1; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (compiler->args_size > 0 && !(compiler->options & SLJIT_ENTER_CDECL)) - size = 3; -#endif - inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); - - INC_SIZE(size); - -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (compiler->args_size > 0 && !(compiler->options & SLJIT_ENTER_CDECL)) { - RET_I16(U8(compiler->args_size)); - return SLJIT_SUCCESS; - } -#endif - + INC_SIZE(1); RET(); return SLJIT_SUCCESS; } -/* --------------------------------------------------------------------- */ -/* Call / return instructions */ -/* --------------------------------------------------------------------- */ - -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - -static sljit_sw c_fast_call_get_stack_size(sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr) -{ - sljit_sw stack_size = 0; - sljit_s32 word_arg_count = 0; - - arg_types >>= SLJIT_ARG_SHIFT; - - while (arg_types) { - switch (arg_types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - stack_size += SSIZE_OF(f64); - break; - case SLJIT_ARG_TYPE_F32: - stack_size += SSIZE_OF(f32); - break; - default: - word_arg_count++; - if (word_arg_count > 2) - stack_size += SSIZE_OF(sw); - break; - } - - arg_types >>= SLJIT_ARG_SHIFT; - } - - if (word_arg_count_ptr) - *word_arg_count_ptr = word_arg_count; - - return stack_size; -} - -static sljit_s32 c_fast_call_with_args(struct sljit_compiler *compiler, - sljit_s32 arg_types, sljit_sw stack_size, sljit_s32 word_arg_count, sljit_s32 swap_args) +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) { - sljit_u8 *inst; - sljit_s32 float_arg_count; + sljit_s32 src_r; - if (stack_size == SSIZE_OF(sw) && word_arg_count == 3) { - inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); - FAIL_IF(!inst); - INC_SIZE(1); - PUSH_REG(reg_map[SLJIT_R2]); - } - else if (stack_size > 0) { - if (word_arg_count >= 4) - EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->scratches_offset); + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); - BINARY_IMM32(SUB, stack_size, SLJIT_SP, 0); + if ((src & SLJIT_MEM) || (src > SLJIT_R2 && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) { + ADJUST_LOCAL_OFFSET(src, srcw); + CHECK_EXTRA_REGS(src, srcw, (void)0); - stack_size = 0; - arg_types >>= SLJIT_ARG_SHIFT; - word_arg_count = 0; - float_arg_count = 0; - while (arg_types) { - switch (arg_types & SLJIT_ARG_MASK) { - case SLJIT_ARG_TYPE_F64: - float_arg_count++; - FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); - stack_size += SSIZE_OF(f64); - break; - case SLJIT_ARG_TYPE_F32: - float_arg_count++; - FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); - stack_size += SSIZE_OF(f32); - break; - default: - word_arg_count++; - if (word_arg_count == 3) { - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, SLJIT_R2, 0); - stack_size += SSIZE_OF(sw); - } - else if (word_arg_count == 4) { - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, TMP_REG1, 0); - stack_size += SSIZE_OF(sw); - } - break; - } + src_r = (compiler->options & SLJIT_ENTER_REG_ARG) ? TMP_REG1 : SLJIT_R1; - arg_types >>= SLJIT_ARG_SHIFT; - } + EMIT_MOV(compiler, src_r, 0, src, srcw); + src = src_r; + srcw = 0; } - if (word_arg_count > 0) { - if (swap_args) { - inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); - FAIL_IF(!inst); - INC_SIZE(1); - - *inst++ = U8(XCHG_EAX_r | reg_map[SLJIT_R2]); - } - else { - inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); - FAIL_IF(!inst); - INC_SIZE(2); - - *inst++ = MOV_r_rm; - *inst++ = U8(MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0]); - } - } + FAIL_IF(emit_stack_frame_release(compiler, 1)); - return SLJIT_SUCCESS; + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); } -#endif +/* --------------------------------------------------------------------- */ +/* Call / return instructions */ +/* --------------------------------------------------------------------- */ -static sljit_s32 cdecl_call_get_stack_size(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr) +static sljit_s32 call_get_stack_size(sljit_s32 arg_types, sljit_s32 *word_arg_count_ptr) { sljit_sw stack_size = 0; sljit_s32 word_arg_count = 0; @@ -765,25 +651,31 @@ static sljit_s32 cdecl_call_get_stack_size(struct sljit_compiler *compiler, slji if (word_arg_count_ptr) *word_arg_count_ptr = word_arg_count; - if (stack_size <= compiler->scratches_offset) + if (stack_size <= 4 * SSIZE_OF(sw)) return 0; - return ((stack_size - compiler->scratches_offset + 0xf) & ~0xf); + return ((stack_size - (4 * SSIZE_OF(sw)) + 0xf) & ~0xf); } -static sljit_s32 cdecl_call_with_args(struct sljit_compiler *compiler, - sljit_s32 arg_types, sljit_sw stack_size, sljit_s32 word_arg_count) +static sljit_s32 call_with_args(struct sljit_compiler *compiler, + sljit_s32 arg_types, sljit_sw stack_size, sljit_s32 word_arg_count, sljit_s32 keep_tmp1) { - sljit_s32 float_arg_count = 0; + sljit_s32 float_arg_count = 0, arg4_reg = 0, arg_offset; sljit_u8 *inst; - if (word_arg_count >= 4) - EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), compiler->scratches_offset); + if (word_arg_count >= 4) { + arg4_reg = SLJIT_R0; + + if (!keep_tmp1) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw)); + arg4_reg = TMP_REG1; + } + } if (stack_size > 0) BINARY_IMM32(SUB, stack_size, SLJIT_SP, 0); - stack_size = 0; + arg_offset = 0; word_arg_count = 0; arg_types >>= SLJIT_ARG_SHIFT; @@ -791,18 +683,22 @@ static sljit_s32 cdecl_call_with_args(struct sljit_compiler *compiler, switch (arg_types & SLJIT_ARG_MASK) { case SLJIT_ARG_TYPE_F64: float_arg_count++; - FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); - stack_size += SSIZE_OF(f64); + FAIL_IF(emit_sse2_store(compiler, 0, SLJIT_MEM1(SLJIT_SP), arg_offset, float_arg_count)); + arg_offset += SSIZE_OF(f64); break; case SLJIT_ARG_TYPE_F32: float_arg_count++; - FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), stack_size, float_arg_count)); - stack_size += SSIZE_OF(f32); + FAIL_IF(emit_sse2_store(compiler, 1, SLJIT_MEM1(SLJIT_SP), arg_offset, float_arg_count)); + arg_offset += SSIZE_OF(f32); break; default: word_arg_count++; - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size, (word_arg_count >= 4) ? TMP_REG1 : word_arg_count, 0); - stack_size += SSIZE_OF(sw); + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), arg_offset, (word_arg_count >= 4) ? arg4_reg : word_arg_count, 0); + + if (word_arg_count == 1 && arg4_reg == SLJIT_R0) + EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw) + stack_size); + + arg_offset += SSIZE_OF(sw); break; } @@ -840,21 +736,19 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, sljit_s32 *extra_space, sljit_s32 arg_types, sljit_s32 src, sljit_sw srcw) { - sljit_sw args_size, prev_args_size, saved_regs_size; + sljit_sw args_size, saved_regs_size; sljit_sw types, word_arg_count, float_arg_count; sljit_sw stack_size, prev_stack_size, min_size, offset; sljit_sw word_arg4_offset; sljit_u8 r2_offset = 0; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - sljit_u8 fast_call = (*extra_space & 0xff) == SLJIT_CALL; -#endif + sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options); sljit_u8* inst; ADJUST_LOCAL_OFFSET(src, srcw); CHECK_EXTRA_REGS(src, srcw, (void)0); saved_regs_size = (1 + (compiler->scratches > 9 ? (compiler->scratches - 9) : 0) - + (compiler->saveds <= 3 ? compiler->saveds : 3)) * SSIZE_OF(sw); + + (compiler->saveds <= 3 ? compiler->saveds : 3) - kept_saveds_count) * SSIZE_OF(sw); word_arg_count = 0; float_arg_count = 0; @@ -876,30 +770,15 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, break; default: word_arg_count++; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!fast_call || word_arg_count > 2) - args_size += SSIZE_OF(sw); -#else args_size += SSIZE_OF(sw); -#endif break; } arg_types >>= SLJIT_ARG_SHIFT; } - if (args_size <= compiler->args_size -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - && (!(compiler->options & SLJIT_ENTER_CDECL) || args_size == 0 || !fast_call) -#endif /* SLJIT_X86_32_FASTCALL */ - && 1) { -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - *extra_space = fast_call ? 0 : args_size; - prev_args_size = compiler->args_size; - stack_size = prev_args_size + SSIZE_OF(sw) + saved_regs_size; -#else /* !SLJIT_X86_32_FASTCALL */ + if (args_size <= compiler->args_size) { *extra_space = 0; stack_size = args_size + SSIZE_OF(sw) + saved_regs_size; -#endif /* SLJIT_X86_32_FASTCALL */ offset = stack_size + compiler->local_size; @@ -911,37 +790,6 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, EMIT_MOV(compiler, SLJIT_R0, 0, src, srcw); } -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!(compiler->options & SLJIT_ENTER_CDECL)) { - if (!fast_call) - offset -= SSIZE_OF(sw); - - if (word_arg_count >= 3) { - word_arg4_offset = SSIZE_OF(sw); - - if (word_arg_count + float_arg_count >= 4) { - word_arg4_offset = SSIZE_OF(sw) + SSIZE_OF(sw); - if ((types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F64) - word_arg4_offset = SSIZE_OF(sw) + SSIZE_OF(f64); - } - - /* In cdecl mode, at least one more word value must - * be present on the stack before the return address. */ - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset - word_arg4_offset, SLJIT_R2, 0); - } - - if (fast_call) { - if (args_size < prev_args_size) { - EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), offset - prev_args_size - SSIZE_OF(sw)); - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset - args_size - SSIZE_OF(sw), SLJIT_R2, 0); - } - } else if (prev_args_size > 0) { - EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), offset - prev_args_size); - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0); - } - } -#endif /* SLJIT_X86_32_FASTCALL */ - while (types != 0) { switch (types & SLJIT_ARG_MASK) { case SLJIT_ARG_TYPE_F64: @@ -957,12 +805,6 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, default: switch (word_arg_count) { case 1: -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (fast_call) { - EMIT_MOV(compiler, SLJIT_R2, 0, r2_offset != 0 ? SLJIT_MEM1(SLJIT_SP) : SLJIT_R0, 0); - break; - } -#endif offset -= SSIZE_OF(sw); if (r2_offset != 0) { EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), 0); @@ -971,10 +813,6 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R0, 0); break; case 2: -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (fast_call) - break; -#endif offset -= SSIZE_OF(sw); EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R1, 0); break; @@ -983,7 +821,7 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, break; case 4: offset -= SSIZE_OF(sw); - EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), compiler->scratches_offset); + EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw)); EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0); break; } @@ -993,15 +831,7 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, types >>= SLJIT_ARG_SHIFT; } - BINARY_IMM32(ADD, compiler->local_size, SLJIT_SP, 0); - FAIL_IF(emit_stack_frame_release(compiler)); - -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (args_size < prev_args_size) - BINARY_IMM32(ADD, prev_args_size - args_size, SLJIT_SP, 0); -#endif - - return SLJIT_SUCCESS; + return emit_stack_frame_release(compiler, 0); } stack_size = args_size + SSIZE_OF(sw); @@ -1014,16 +844,10 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, if (word_arg_count >= 3) stack_size += SSIZE_OF(sw); - prev_args_size = 0; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (!(compiler->options & SLJIT_ENTER_CDECL)) - prev_args_size = compiler->args_size; -#endif - - prev_stack_size = prev_args_size + SSIZE_OF(sw) + saved_regs_size; + prev_stack_size = SSIZE_OF(sw) + saved_regs_size; min_size = prev_stack_size + compiler->local_size; - word_arg4_offset = compiler->scratches_offset; + word_arg4_offset = 2 * SSIZE_OF(sw); if (stack_size > min_size) { BINARY_IMM32(SUB, stack_size - min_size, SLJIT_SP, 0); @@ -1050,75 +874,30 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, } /* Restore saved registers. */ - offset = stack_size - prev_args_size - 2 * SSIZE_OF(sw); + offset = stack_size - 2 * SSIZE_OF(sw); EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), offset); if (compiler->saveds > 2 || compiler->scratches > 9) { offset -= SSIZE_OF(sw); EMIT_MOV(compiler, SLJIT_S2, 0, SLJIT_MEM1(SLJIT_SP), offset); } - if (compiler->saveds > 1 || compiler->scratches > 10) { + if ((compiler->saveds > 1 && kept_saveds_count <= 1) || compiler->scratches > 10) { offset -= SSIZE_OF(sw); EMIT_MOV(compiler, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_SP), offset); } - if (compiler->saveds > 0 || compiler->scratches > 11) { + if ((compiler->saveds > 0 && kept_saveds_count == 0) || compiler->scratches > 11) { offset -= SSIZE_OF(sw); EMIT_MOV(compiler, SLJIT_S0, 0, SLJIT_MEM1(SLJIT_SP), offset); } /* Copy fourth argument and return address. */ -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (fast_call) { - offset = stack_size; - *extra_space = 0; - - if (word_arg_count >= 4 && prev_args_size == 0) { - offset -= SSIZE_OF(sw); - inst = emit_x86_instruction(compiler, 1, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), offset); - FAIL_IF(!inst); - *inst = XCHG_r_rm; - - SLJIT_ASSERT(args_size != prev_args_size); - } else { - if (word_arg_count >= 4) { - offset -= SSIZE_OF(sw); - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0); - } - - if (args_size != prev_args_size) - EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), stack_size - prev_args_size - SSIZE_OF(sw)); - } - - if (args_size != prev_args_size) - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size - args_size - SSIZE_OF(sw), SLJIT_R2, 0); - } else { -#endif /* SLJIT_X86_32_FASTCALL */ - offset = stack_size - SSIZE_OF(sw); - *extra_space = args_size; + offset = stack_size - SSIZE_OF(sw); + *extra_space = args_size; - if (word_arg_count >= 4 && prev_args_size == SSIZE_OF(sw)) { - offset -= SSIZE_OF(sw); - inst = emit_x86_instruction(compiler, 1, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), offset); - FAIL_IF(!inst); - *inst = XCHG_r_rm; - - SLJIT_ASSERT(prev_args_size > 0); - } else { - if (word_arg_count >= 4) { - offset -= SSIZE_OF(sw); - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0); - } - - if (prev_args_size > 0) - EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), stack_size - prev_args_size - SSIZE_OF(sw)); - } - - /* Copy return address. */ - if (prev_args_size > 0) - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), stack_size - SSIZE_OF(sw), SLJIT_R2, 0); -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (word_arg_count >= 4) { + offset -= SSIZE_OF(sw); + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R2, 0); } -#endif /* SLJIT_X86_32_FASTCALL */ while (types != 0) { switch (types & SLJIT_ARG_MASK) { @@ -1135,12 +914,6 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, default: switch (word_arg_count) { case 1: -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (fast_call) { - EMIT_MOV(compiler, SLJIT_R2, 0, r2_offset != 0 ? SLJIT_MEM1(SLJIT_SP) : SLJIT_R0, 0); - break; - } -#endif offset -= SSIZE_OF(sw); if (r2_offset != 0) { EMIT_MOV(compiler, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), 0); @@ -1149,10 +922,6 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R0, 0); break; case 2: -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (fast_call) - break; -#endif offset -= SSIZE_OF(sw); EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, SLJIT_R1, 0); break; @@ -1168,12 +937,6 @@ static sljit_s32 tail_call_with_args(struct sljit_compiler *compiler, types >>= SLJIT_ARG_SHIFT; } -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - /* Skip return address. */ - if (fast_call) - offset -= SSIZE_OF(sw); -#endif - SLJIT_ASSERT(offset >= 0); if (offset == 0) @@ -1198,6 +961,38 @@ static sljit_s32 emit_tail_call_end(struct sljit_compiler *compiler, sljit_s32 e return SLJIT_SUCCESS; } +static sljit_s32 tail_call_reg_arg_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types) +{ + sljit_s32 word_arg_count = 0; + sljit_s32 kept_saveds_count, offset; + + arg_types >>= SLJIT_ARG_SHIFT; + + while (arg_types) { + if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) + word_arg_count++; + + arg_types >>= SLJIT_ARG_SHIFT; + } + + if (word_arg_count < 4) + return SLJIT_SUCCESS; + + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), 2 * SSIZE_OF(sw)); + + kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options); + offset = compiler->local_size + 3 * SSIZE_OF(sw); + + if ((compiler->saveds > 0 && kept_saveds_count == 0) || compiler->scratches > 11) + offset += SSIZE_OF(sw); + if ((compiler->saveds > 1 && kept_saveds_count <= 1) || compiler->scratches > 10) + offset += SSIZE_OF(sw); + if ((compiler->saveds > 2 && kept_saveds_count <= 2) || compiler->scratches > 9) + offset += SSIZE_OF(sw); + + return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), offset, TMP_REG1, 0); +} + SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 arg_types) { @@ -1209,18 +1004,21 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types)); if (type & SLJIT_CALL_RETURN) { + if ((type & 0xff) == SLJIT_CALL_REG_ARG) { + PTR_FAIL_IF(tail_call_reg_arg_with_args(compiler, arg_types)); + PTR_FAIL_IF(emit_stack_frame_release(compiler, 0)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_jump(compiler, SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP)); + } + stack_size = type; PTR_FAIL_IF(tail_call_with_args(compiler, &stack_size, arg_types, SLJIT_IMM, 0)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); - if (stack_size == 0) { - type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); - return sljit_emit_jump(compiler, type); - } + if (stack_size == 0) + return sljit_emit_jump(compiler, SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP)); jump = sljit_emit_jump(compiler, type); PTR_FAIL_IF(jump == NULL); @@ -1229,32 +1027,15 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile return jump; } -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if ((type & 0xff) == SLJIT_CALL) { - stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count); - PTR_FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, 0)); - -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - - jump = sljit_emit_jump(compiler, type); - PTR_FAIL_IF(jump == NULL); - - PTR_FAIL_IF(post_call_with_args(compiler, arg_types, 0)); - return jump; + if ((type & 0xff) == SLJIT_CALL_REG_ARG) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_jump(compiler, type); } -#endif - - stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count); - PTR_FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + stack_size = call_get_stack_size(arg_types, &word_arg_count); + PTR_FAIL_IF(call_with_args(compiler, arg_types, stack_size, word_arg_count, 0)); + SLJIT_SKIP_CHECKS(compiler); jump = sljit_emit_jump(compiler, type); PTR_FAIL_IF(jump == NULL); @@ -1268,14 +1049,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi { sljit_sw stack_size = 0; sljit_s32 word_arg_count; -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - sljit_s32 swap_args; -#endif CHECK_ERROR(); CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw)); if (type & SLJIT_CALL_RETURN) { + if ((type & 0xff) == SLJIT_CALL_REG_ARG) { + FAIL_IF(tail_call_reg_arg_with_args(compiler, arg_types)); + + if ((src & SLJIT_MEM) || (src > SLJIT_R2 && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) { + ADJUST_LOCAL_OFFSET(src, srcw); + CHECK_EXTRA_REGS(src, srcw, (void)0); + + EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); + src = TMP_REG1; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 0)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); + } + stack_size = type; FAIL_IF(tail_call_with_args(compiler, &stack_size, arg_types, src, srcw)); @@ -1284,10 +1080,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi srcw = 0; } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); if (stack_size == 0) return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); @@ -1296,57 +1089,57 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi return emit_tail_call_end(compiler, stack_size); } -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - SLJIT_ASSERT(reg_map[SLJIT_R0] == 0 && reg_map[SLJIT_R2] == 1 && SLJIT_R0 == 1 && SLJIT_R2 == 3); + if ((type & 0xff) == SLJIT_CALL_REG_ARG) { + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, type, src, srcw); + } - if ((type & 0xff) == SLJIT_CALL) { - stack_size = c_fast_call_get_stack_size(arg_types, &word_arg_count); - swap_args = 0; + ADJUST_LOCAL_OFFSET(src, srcw); + CHECK_EXTRA_REGS(src, srcw, (void)0); - if (word_arg_count > 0) { - if ((src & REG_MASK) == SLJIT_R2 || OFFS_REG(src) == SLJIT_R2) { - swap_args = 1; - if (((src & REG_MASK) | 0x2) == SLJIT_R2) - src ^= 0x2; - if ((OFFS_REG(src) | 0x2) == SLJIT_R2) - src ^= TO_OFFS_REG(0x2); - } - } + if (src & SLJIT_MEM) { + EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); + src = TMP_REG1; + srcw = 0; + } - FAIL_IF(c_fast_call_with_args(compiler, arg_types, stack_size, word_arg_count, swap_args)); + stack_size = call_get_stack_size(arg_types, &word_arg_count); + FAIL_IF(call_with_args(compiler, arg_types, stack_size, word_arg_count, src == TMP_REG1)); - compiler->scratches_offset += stack_size; - compiler->locals_offset += stack_size; + if (stack_size > 0 && src == SLJIT_MEM1(SLJIT_SP)) + srcw += stack_size; -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + SLJIT_SKIP_CHECKS(compiler); + FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + + return post_call_with_args(compiler, arg_types, stack_size); +} - compiler->scratches_offset -= stack_size; - compiler->locals_offset -= stack_size; +static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) +{ + sljit_u8* inst; - return post_call_with_args(compiler, arg_types, 0); - } -#endif + if (compiler->options & SLJIT_ENTER_REG_ARG) { + if (src == SLJIT_FR0) + return SLJIT_SUCCESS; - stack_size = cdecl_call_get_stack_size(compiler, arg_types, &word_arg_count); - FAIL_IF(cdecl_call_with_args(compiler, arg_types, stack_size, word_arg_count)); + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_fop1(compiler, op, SLJIT_RETURN_FREG, 0, src, srcw); + } - compiler->scratches_offset += stack_size; - compiler->locals_offset += stack_size; + if (FAST_IS_REG(src)) { + FAIL_IF(emit_sse2_store(compiler, op & SLJIT_32, SLJIT_MEM1(SLJIT_SP), 0, src)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif - FAIL_IF(sljit_emit_ijump(compiler, type, src, srcw)); + src = SLJIT_MEM1(SLJIT_SP); + srcw = 0; + } else { + ADJUST_LOCAL_OFFSET(src, srcw); + } - compiler->scratches_offset -= stack_size; - compiler->locals_offset -= stack_size; + inst = emit_x86_instruction(compiler, 1 | EX86_SSE2_OP1, 0, 0, src, srcw); + *inst = (op & SLJIT_32) ? FLDS : FLDL; - return post_call_with_args(compiler, arg_types, stack_size); + return SLJIT_SUCCESS; } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) @@ -1404,6 +1197,88 @@ static sljit_s32 emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src return SLJIT_SUCCESS; } +/* --------------------------------------------------------------------- */ +/* Other operations */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_u8* inst; + sljit_s32 i, next, reg_idx, offset; + sljit_u8 regs[2]; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if (!(reg & REG_PAIR_MASK)) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + + ADJUST_LOCAL_OFFSET(mem, memw); + + regs[0] = U8(REG_PAIR_FIRST(reg)); + regs[1] = U8(REG_PAIR_SECOND(reg)); + + next = SSIZE_OF(sw); + + if (!(type & SLJIT_MEM_STORE) && (regs[0] == (mem & REG_MASK) || regs[0] == OFFS_REG(mem))) { + if (regs[1] == (mem & REG_MASK) || regs[1] == OFFS_REG(mem)) { + /* None of them are virtual register so TMP_REG1 will not be used. */ + EMIT_MOV(compiler, TMP_REG1, 0, OFFS_REG(mem), 0); + + if (regs[1] == OFFS_REG(mem)) + next = -SSIZE_OF(sw); + + mem = (mem & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG1); + } else { + next = -SSIZE_OF(sw); + + if (!(mem & OFFS_REG_MASK)) + memw += SSIZE_OF(sw); + } + } + + for (i = 0; i < 2; i++) { + reg_idx = next > 0 ? i : (i ^ 0x1); + reg = regs[reg_idx]; + + offset = -1; + + if (reg >= SLJIT_R3 && reg <= SLJIT_S3) { + offset = (2 * SSIZE_OF(sw)) + ((reg) - SLJIT_R3) * SSIZE_OF(sw); + reg = TMP_REG1; + + if (type & SLJIT_MEM_STORE) + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), offset); + } + + if ((mem & OFFS_REG_MASK) && (reg_idx == 1)) { + inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(1 + 4)); + FAIL_IF(!inst); + + INC_SIZE(4); + + inst[0] = (type & SLJIT_MEM_STORE) ? MOV_rm_r : MOV_r_rm; + inst[1] = 0x44 | U8(reg_map[reg] << 3); + inst[2] = U8(memw << 6) | U8(reg_map[OFFS_REG(mem)] << 3) | reg_map[mem & REG_MASK]; + inst[3] = sizeof(sljit_sw); + } else if (type & SLJIT_MEM_STORE) { + EMIT_MOV(compiler, mem, memw, reg, 0); + } else { + EMIT_MOV(compiler, reg, 0, mem, memw); + } + + if (!(mem & OFFS_REG_MASK)) + memw += next; + + if (!(type & SLJIT_MEM_STORE) && offset != -1) + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), offset, TMP_REG1, 0); + } + + return SLJIT_SUCCESS; +} + static sljit_s32 skip_frames_before_return(struct sljit_compiler *compiler) { sljit_sw size; diff --git a/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c b/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c index f37df6e1bf..4e938ffcf3 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c @@ -101,34 +101,38 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw /* Calculate size of b. */ inst_size += 1; /* mod r/m byte. */ if (b & SLJIT_MEM) { - if (!(b & OFFS_REG_MASK)) { - if (NOT_HALFWORD(immb)) { - PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb)); - immb = 0; - if (b & REG_MASK) - b |= TO_OFFS_REG(TMP_REG2); - else - b |= TMP_REG2; - } - else if (reg_lmap[b & REG_MASK] == 4) - b |= TO_OFFS_REG(SLJIT_SP); + if (!(b & OFFS_REG_MASK) && NOT_HALFWORD(immb)) { + PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb)); + immb = 0; + if (b & REG_MASK) + b |= TO_OFFS_REG(TMP_REG2); + else + b |= TMP_REG2; } if (!(b & REG_MASK)) inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */ else { - if (reg_map[b & REG_MASK] >= 8) - rex |= REX_B; - - if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) { + if (immb != 0 && !(b & OFFS_REG_MASK)) { /* Immediate operand. */ if (immb <= 127 && immb >= -128) inst_size += sizeof(sljit_s8); else inst_size += sizeof(sljit_s32); } - else if (reg_lmap[b & REG_MASK] == 5) - inst_size += sizeof(sljit_s8); + else if (reg_lmap[b & REG_MASK] == 5) { + /* Swap registers if possible. */ + if ((b & OFFS_REG_MASK) && (immb & 0x3) == 0 && reg_lmap[OFFS_REG(b)] != 5) + b = SLJIT_MEM | OFFS_REG(b) | TO_OFFS_REG(b & REG_MASK); + else + inst_size += sizeof(sljit_s8); + } + + if (reg_map[b & REG_MASK] >= 8) + rex |= REX_B; + + if (reg_lmap[b & REG_MASK] == 4 && !(b & OFFS_REG_MASK)) + b |= TO_OFFS_REG(SLJIT_SP); if (b & OFFS_REG_MASK) { inst_size += 1; /* SIB byte. */ @@ -153,9 +157,9 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw inst_size += 4; } else if (flags & EX86_SHIFT_INS) { - imma &= compiler->mode32 ? 0x1f : 0x3f; + SLJIT_ASSERT(imma <= (compiler->mode32 ? 0x1f : 0x3f)); if (imma != 1) { - inst_size ++; + inst_size++; flags |= EX86_BYTE_ARG; } } else if (flags & EX86_BYTE_ARG) @@ -223,7 +227,7 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw } else if (b & REG_MASK) { reg_lmap_b = reg_lmap[b & REG_MASK]; - if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP) || reg_lmap_b == 5) { + if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) { if (immb != 0 || reg_lmap_b == 5) { if (immb <= 127 && immb >= -128) *buf_ptr |= 0x40; @@ -248,8 +252,14 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw } } else { + if (reg_lmap_b == 5) + *buf_ptr |= 0x40; + *buf_ptr++ |= 0x04; *buf_ptr++ = U8(reg_lmap_b | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6)); + + if (reg_lmap_b == 5) + *buf_ptr++ = 0; } } else { @@ -366,7 +376,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi { sljit_uw size; sljit_s32 word_arg_count = 0; - sljit_s32 saved_arg_count = 0; + sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options); sljit_s32 saved_regs_size, tmp, i; #ifdef _WIN64 sljit_s32 saved_float_regs_size; @@ -379,16 +389,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size)); set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size); + if (options & SLJIT_ENTER_REG_ARG) + arg_types = 0; + /* Emit ENDBR64 at function entry if needed. */ FAIL_IF(emit_endbranch(compiler)); compiler->mode32 = 0; /* Including the return address saved by the call instruction. */ - saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1); tmp = SLJIT_S0 - saveds; - for (i = SLJIT_S0; i > tmp; i--) { + for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) { size = reg_map[i] >= 8 ? 2 : 1; inst = (sljit_u8*)ensure_buf(compiler, 1 + size); FAIL_IF(!inst); @@ -561,15 +574,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp #endif /* _WIN64 */ /* Including the return address saved by the call instruction. */ - saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1); + saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1); compiler->local_size = ((local_size + saved_regs_size + 0xf) & ~0xf) - saved_regs_size; return SLJIT_SUCCESS; } -static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) +static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to) { sljit_uw size; - sljit_s32 i, tmp; + sljit_s32 local_size, i, tmp; sljit_u8 *inst; #ifdef _WIN64 sljit_s32 saved_float_regs_offset; @@ -598,30 +611,21 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) *inst = MOVAPS_x_xm; saved_float_regs_offset += 16; } + + compiler->mode32 = 0; } #endif /* _WIN64 */ - if (compiler->local_size > 0) { - if (compiler->local_size <= 127) { - inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); - FAIL_IF(!inst); - INC_SIZE(4); - *inst++ = REX_W; - *inst++ = GROUP_BINARY_83; - *inst++ = MOD_REG | ADD | 4; - *inst = U8(compiler->local_size); - } - else { - inst = (sljit_u8*)ensure_buf(compiler, 1 + 7); - FAIL_IF(!inst); - INC_SIZE(7); - *inst++ = REX_W; - *inst++ = GROUP_BINARY_81; - *inst++ = MOD_REG | ADD | 4; - sljit_unaligned_store_s32(inst, compiler->local_size); - } + local_size = compiler->local_size; + + if (is_return_to && compiler->scratches < SLJIT_FIRST_SAVED_REG && (compiler->saveds == SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { + local_size += SSIZE_OF(sw); + is_return_to = 0; } + if (local_size > 0) + BINARY_IMM32(ADD, local_size, SLJIT_SP, 0); + tmp = compiler->scratches; for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) { size = reg_map[i] >= 8 ? 2 : 1; @@ -633,8 +637,8 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) POP_REG(reg_lmap[i]); } - tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; - for (i = tmp; i <= SLJIT_S0; i++) { + tmp = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); + for (i = SLJIT_S0 + 1 - compiler->saveds; i <= tmp; i++) { size = reg_map[i] >= 8 ? 2 : 1; inst = (sljit_u8*)ensure_buf(compiler, 1 + size); FAIL_IF(!inst); @@ -644,6 +648,9 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler) POP_REG(reg_lmap[i]); } + if (is_return_to) + BINARY_IMM32(ADD, sizeof(sljit_sw), SLJIT_SP, 0); + return SLJIT_SUCCESS; } @@ -654,7 +661,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler CHECK_ERROR(); CHECK(check_sljit_emit_return_void(compiler)); - FAIL_IF(emit_stack_frame_release(compiler)); + compiler->mode32 = 0; + + FAIL_IF(emit_stack_frame_release(compiler, 0)); inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); FAIL_IF(!inst); @@ -663,6 +672,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler return SLJIT_SUCCESS; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_return_to(compiler, src, srcw)); + + compiler->mode32 = 0; + + if ((src & SLJIT_MEM) || (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) { + ADJUST_LOCAL_OFFSET(src, srcw); + + EMIT_MOV(compiler, TMP_REG2, 0, src, srcw); + src = TMP_REG2; + srcw = 0; + } + + FAIL_IF(emit_stack_frame_release(compiler, 1)); + + SLJIT_SKIP_CHECKS(compiler); + return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw); +} + /* --------------------------------------------------------------------- */ /* Call / return instructions */ /* --------------------------------------------------------------------- */ @@ -786,17 +817,15 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile compiler->mode32 = 0; - PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL)); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL)); if (type & SLJIT_CALL_RETURN) { - PTR_FAIL_IF(emit_stack_frame_release(compiler)); + PTR_FAIL_IF(emit_stack_frame_release(compiler, 0)); type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP); } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_jump(compiler, type); } @@ -816,22 +845,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi } if (type & SLJIT_CALL_RETURN) { - if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) { + if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) { EMIT_MOV(compiler, TMP_REG2, 0, src, srcw); src = TMP_REG2; } - FAIL_IF(emit_stack_frame_release(compiler)); - type = SLJIT_JUMP; + FAIL_IF(emit_stack_frame_release(compiler, 0)); } - FAIL_IF(call_with_args(compiler, arg_types, &src)); + if ((type & 0xff) != SLJIT_CALL_REG_ARG) + FAIL_IF(call_with_args(compiler, arg_types, &src)); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + if (type & SLJIT_CALL_RETURN) + type = SLJIT_JUMP; + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_ijump(compiler, type, src, srcw); } @@ -907,9 +935,89 @@ static sljit_s32 emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src } /* --------------------------------------------------------------------- */ -/* Extend input */ +/* Other operations */ /* --------------------------------------------------------------------- */ +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 reg, + sljit_s32 mem, sljit_sw memw) +{ + sljit_u8* inst; + sljit_s32 i, next, reg_idx; + sljit_u8 regs[2]; + + CHECK_ERROR(); + CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw)); + + if (!(reg & REG_PAIR_MASK)) + return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw); + + ADJUST_LOCAL_OFFSET(mem, memw); + + compiler->mode32 = 0; + + if ((mem & REG_MASK) == 0) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, memw); + + mem = SLJIT_MEM1(TMP_REG1); + memw = 0; + } else if (!(mem & OFFS_REG_MASK) && ((memw < HALFWORD_MIN) || (memw > HALFWORD_MAX - SSIZE_OF(sw)))) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, memw); + + mem = SLJIT_MEM2(mem & REG_MASK, TMP_REG1); + memw = 0; + } + + regs[0] = U8(REG_PAIR_FIRST(reg)); + regs[1] = U8(REG_PAIR_SECOND(reg)); + + next = SSIZE_OF(sw); + + if (!(type & SLJIT_MEM_STORE) && (regs[0] == (mem & REG_MASK) || regs[0] == OFFS_REG(mem))) { + if (regs[1] == (mem & REG_MASK) || regs[1] == OFFS_REG(mem)) { + /* Base and offset cannot be TMP_REG1. */ + EMIT_MOV(compiler, TMP_REG1, 0, OFFS_REG(mem), 0); + + if (regs[1] == OFFS_REG(mem)) + next = -SSIZE_OF(sw); + + mem = (mem & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG1); + } else { + next = -SSIZE_OF(sw); + + if (!(mem & OFFS_REG_MASK)) + memw += SSIZE_OF(sw); + } + } + + for (i = 0; i < 2; i++) { + reg_idx = next > 0 ? i : (i ^ 0x1); + reg = regs[reg_idx]; + + if ((mem & OFFS_REG_MASK) && (reg_idx == 1)) { + inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(1 + 5)); + FAIL_IF(!inst); + + INC_SIZE(5); + + inst[0] = U8(REX_W | ((reg_map[reg] >= 8) ? REX_R : 0) | ((reg_map[mem & REG_MASK] >= 8) ? REX_B : 0) | ((reg_map[OFFS_REG(mem)] >= 8) ? REX_X : 0)); + inst[1] = (type & SLJIT_MEM_STORE) ? MOV_rm_r : MOV_r_rm; + inst[2] = 0x44 | U8(reg_lmap[reg] << 3); + inst[3] = U8(memw << 6) | U8(reg_lmap[OFFS_REG(mem)] << 3) | reg_lmap[mem & REG_MASK]; + inst[4] = sizeof(sljit_sw); + } else if (type & SLJIT_MEM_STORE) { + EMIT_MOV(compiler, mem, memw, reg, 0); + } else { + EMIT_MOV(compiler, reg, 0, mem, memw); + } + + if (!(mem & OFFS_REG_MASK)) + memw += next; + } + + return SLJIT_SUCCESS; +} + static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign, sljit_s32 dst, sljit_sw dstw, sljit_s32 src, sljit_sw srcw) diff --git a/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c b/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c index c7dd9be8fd..651942be80 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c @@ -26,11 +26,7 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) { -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - return "x86" SLJIT_CPUINFO " ABI:fastcall"; -#else return "x86" SLJIT_CPUINFO; -#endif } /* @@ -78,10 +74,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = { #define CHECK_EXTRA_REGS(p, w, do) \ if (p >= SLJIT_R3 && p <= SLJIT_S3) { \ - if (p <= compiler->scratches) \ - w = compiler->scratches_offset + ((p) - SLJIT_R3) * SSIZE_OF(sw); \ - else \ - w = compiler->locals_offset + ((p) - SLJIT_S2) * SSIZE_OF(sw); \ + w = (2 * SSIZE_OF(sw)) + ((p) - SLJIT_R3) * SSIZE_OF(sw); \ p = SLJIT_MEM1(SLJIT_SP); \ do; \ } @@ -181,6 +174,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { #define AND_rm_r 0x21 #define ANDPD_x_xm 0x54 #define BSR_r_rm (/* GROUP_0F */ 0xbd) +#define BSF_r_rm (/* GROUP_0F */ 0xbc) #define CALL_i32 0xe8 #define CALL_rm (/* GROUP_FF */ 2 << 3) #define CDQ 0x99 @@ -194,6 +188,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { #define CVTTSD2SI_r_xm 0x2c #define DIV (/* GROUP_F7 */ 6 << 3) #define DIVSD_x_xm 0x5e +#define FLDS 0xd9 +#define FLDL 0xdd #define FSTPS 0xd9 #define FSTPD 0xdd #define INT3 0xcc @@ -209,6 +205,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { #define JMP_rm (/* GROUP_FF */ 4 << 3) #define LEA_r_m 0x8d #define LOOP_i8 0xe2 +#define LZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbd) #define MOV_r_rm 0x8b #define MOV_r_i32 0xb8 #define MOV_rm_r 0x89 @@ -242,6 +239,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { #define PUSH_r 0x50 #define PUSH_rm (/* GROUP_FF */ 6 << 3) #define PUSHF 0x9c +#define ROL (/* SHIFT */ 0 << 3) +#define ROR (/* SHIFT */ 1 << 3) #define RET_near 0xc3 #define RET_i16 0xc2 #define SBB (/* BINARY */ 3 << 3) @@ -250,6 +249,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { #define SBB_rm_r 0x19 #define SAR (/* SHIFT */ 7 << 3) #define SHL (/* SHIFT */ 4 << 3) +#define SHLD (/* GROUP_0F */ 0xa5) +#define SHRD (/* GROUP_0F */ 0xad) #define SHR (/* SHIFT */ 5 << 3) #define SUB (/* BINARY */ 5 << 3) #define SUB_EAX_i32 0x2d @@ -258,6 +259,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { #define SUBSD_x_xm 0x5c #define TEST_EAX_i32 0xa9 #define TEST_rm_r 0x85 +#define TZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbc) #define UCOMISD_x_xm 0x2e #define UNPCKLPD_x_xm 0x14 #define XCHG_EAX_r 0x90 @@ -269,6 +271,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { #define XORPD_x_xm 0x57 #define GROUP_0F 0x0f +#define GROUP_F3 0xf3 #define GROUP_F7 0xf7 #define GROUP_FF 0xff #define GROUP_BINARY_81 0x81 @@ -290,10 +293,15 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { /* Multithreading does not affect these static variables, since they store built-in CPU features. Therefore they can be overwritten by different threads if they detect the CPU features in the same time. */ +#define CPU_FEATURE_DETECTED 0x001 #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) -static sljit_s32 cpu_has_sse2 = -1; +#define CPU_FEATURE_SSE2 0x002 #endif -static sljit_s32 cpu_has_cmov = -1; +#define CPU_FEATURE_LZCNT 0x004 +#define CPU_FEATURE_TZCNT 0x008 +#define CPU_FEATURE_CMOV 0x010 + +static sljit_u32 cpu_feature_list = 0; #ifdef _WIN32_WCE #include <cmnintrin.h> @@ -326,18 +334,65 @@ static SLJIT_INLINE void sljit_unaligned_store_sw(void *addr, sljit_sw value) static void get_cpu_features(void) { - sljit_u32 features; + sljit_u32 feature_list = CPU_FEATURE_DETECTED; + sljit_u32 value; #if defined(_MSC_VER) && _MSC_VER >= 1400 int CPUInfo[4]; + + __cpuid(CPUInfo, 0); + if (CPUInfo[0] >= 7) { + __cpuidex(CPUInfo, 7, 0); + if (CPUInfo[1] & 0x8) + feature_list |= CPU_FEATURE_TZCNT; + } + + __cpuid(CPUInfo, (int)0x80000001); + if (CPUInfo[2] & 0x20) + feature_list |= CPU_FEATURE_LZCNT; + __cpuid(CPUInfo, 1); - features = (sljit_u32)CPUInfo[3]; + value = (sljit_u32)CPUInfo[3]; #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) /* AT&T syntax. */ __asm__ ( + "movl $0x0, %%eax\n" + "lzcnt %%eax, %%eax\n" + "setnz %%al\n" + "movl %%eax, %0\n" + : "=g" (value) + : +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + : "eax" +#else + : "rax" +#endif + ); + + if (value & 0x1) + feature_list |= CPU_FEATURE_LZCNT; + + __asm__ ( + "movl $0x0, %%eax\n" + "tzcnt %%eax, %%eax\n" + "setnz %%al\n" + "movl %%eax, %0\n" + : "=g" (value) + : +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + : "eax" +#else + : "rax" +#endif + ); + + if (value & 0x1) + feature_list |= CPU_FEATURE_TZCNT; + + __asm__ ( "movl $0x1, %%eax\n" #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) /* On x86-32, there is no red zone, so this @@ -349,7 +404,7 @@ static void get_cpu_features(void) "pop %%ebx\n" #endif "movl %%edx, %0\n" - : "=g" (features) + : "=g" (value) : #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) : "%eax", "%ecx", "%edx" @@ -362,46 +417,82 @@ static void get_cpu_features(void) /* Intel syntax. */ __asm { + mov eax, 0 + lzcnt eax, eax + setnz al + mov value, eax + } + + if (value & 0x1) + feature_list |= CPU_FEATURE_LZCNT; + + __asm { + mov eax, 0 + tzcnt eax, eax + setnz al + mov value, eax + } + + if (value & 0x1) + feature_list |= CPU_FEATURE_TZCNT; + + __asm { mov eax, 1 cpuid - mov features, edx + mov value, edx } #endif /* _MSC_VER && _MSC_VER >= 1400 */ #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) - cpu_has_sse2 = (features >> 26) & 0x1; + if (value & 0x4000000) + feature_list |= CPU_FEATURE_SSE2; #endif - cpu_has_cmov = (features >> 15) & 0x1; + if (value & 0x8000) + feature_list |= CPU_FEATURE_CMOV; + + cpu_feature_list = feature_list; } static sljit_u8 get_jump_code(sljit_uw type) { switch (type) { case SLJIT_EQUAL: - case SLJIT_EQUAL_F64: + case SLJIT_F_EQUAL: + case SLJIT_UNORDERED_OR_EQUAL: + case SLJIT_ORDERED_EQUAL: /* Not supported. */ return 0x84 /* je */; case SLJIT_NOT_EQUAL: - case SLJIT_NOT_EQUAL_F64: + case SLJIT_F_NOT_EQUAL: + case SLJIT_ORDERED_NOT_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: /* Not supported. */ return 0x85 /* jne */; case SLJIT_LESS: case SLJIT_CARRY: - case SLJIT_LESS_F64: + case SLJIT_F_LESS: + case SLJIT_UNORDERED_OR_LESS: + case SLJIT_UNORDERED_OR_GREATER: return 0x82 /* jc */; case SLJIT_GREATER_EQUAL: case SLJIT_NOT_CARRY: - case SLJIT_GREATER_EQUAL_F64: + case SLJIT_F_GREATER_EQUAL: + case SLJIT_ORDERED_GREATER_EQUAL: + case SLJIT_ORDERED_LESS_EQUAL: return 0x83 /* jae */; case SLJIT_GREATER: - case SLJIT_GREATER_F64: + case SLJIT_F_GREATER: + case SLJIT_ORDERED_LESS: + case SLJIT_ORDERED_GREATER: return 0x87 /* jnbe */; case SLJIT_LESS_EQUAL: - case SLJIT_LESS_EQUAL_F64: + case SLJIT_F_LESS_EQUAL: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_LESS_EQUAL: return 0x86 /* jbe */; case SLJIT_SIG_LESS: @@ -422,10 +513,10 @@ static sljit_u8 get_jump_code(sljit_uw type) case SLJIT_NOT_OVERFLOW: return 0x81 /* jno */; - case SLJIT_UNORDERED_F64: + case SLJIT_UNORDERED: return 0x8a /* jp */; - case SLJIT_ORDERED_F64: + case SLJIT_ORDERED: return 0x8b /* jpo */; } return 0; @@ -449,13 +540,13 @@ static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code else label_addr = jump->u.target - (sljit_uw)executable_offset; - short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127; - #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if ((sljit_sw)(label_addr - (jump->addr + 1)) > HALFWORD_MAX || (sljit_sw)(label_addr - (jump->addr + 1)) < HALFWORD_MIN) return generate_far_jump_code(jump, code_ptr); #endif + short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127; + if (type == SLJIT_JUMP) { if (short_jump) *code_ptr++ = JMP_i8; @@ -581,32 +672,33 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil jump = compiler->jumps; while (jump) { - jump_addr = jump->addr + (sljit_uw)executable_offset; + if (jump->flags & (PATCH_MB | PATCH_MW)) { + if (jump->flags & JUMP_LABEL) + jump_addr = jump->u.label->addr; + else + jump_addr = jump->u.target; - if (jump->flags & PATCH_MB) { - SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) <= 127); - *(sljit_u8*)jump->addr = U8(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))); - } else if (jump->flags & PATCH_MW) { - if (jump->flags & JUMP_LABEL) { -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_sw)))); -#else - SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX); - sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32)))); -#endif - } - else { + jump_addr -= jump->addr + (sljit_uw)executable_offset; + + if (jump->flags & PATCH_MB) { + jump_addr -= sizeof(sljit_s8); + SLJIT_ASSERT((sljit_sw)jump_addr >= -128 && (sljit_sw)jump_addr <= 127); + *(sljit_u8*)jump->addr = U8(jump_addr); + } else { + jump_addr -= sizeof(sljit_s32); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_sw)))); + sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump_addr); #else - SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX); - sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.target - (jump_addr + sizeof(sljit_s32)))); + SLJIT_ASSERT((sljit_sw)jump_addr >= HALFWORD_MIN && (sljit_sw)jump_addr <= HALFWORD_MAX); + sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)jump_addr); #endif } } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - else if (jump->flags & PATCH_MD) - sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump->u.label->addr); + else if (jump->flags & PATCH_MD) { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump->u.label->addr); + } #endif jump = jump->next; @@ -647,9 +739,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) #ifdef SLJIT_IS_FPU_AVAILABLE return SLJIT_IS_FPU_AVAILABLE; #elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) - if (cpu_has_sse2 == -1) + if (cpu_feature_list == 0) get_cpu_features(); - return cpu_has_sse2; + return (cpu_feature_list & CPU_FEATURE_SSE2) != 0; #else /* SLJIT_DETECT_SSE2 */ return 1; #endif /* SLJIT_DETECT_SSE2 */ @@ -657,31 +749,57 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) case SLJIT_HAS_VIRTUAL_REGISTERS: return 1; -#endif +#endif /* SLJIT_CONFIG_X86_32 */ case SLJIT_HAS_CLZ: + if (cpu_feature_list == 0) + get_cpu_features(); + + return (cpu_feature_list & CPU_FEATURE_LZCNT) ? 1 : 2; + + case SLJIT_HAS_CTZ: + if (cpu_feature_list == 0) + get_cpu_features(); + + return (cpu_feature_list & CPU_FEATURE_TZCNT) ? 1 : 2; + case SLJIT_HAS_CMOV: - if (cpu_has_cmov == -1) + if (cpu_feature_list == 0) get_cpu_features(); - return cpu_has_cmov; + return (cpu_feature_list & CPU_FEATURE_CMOV) != 0; + case SLJIT_HAS_ROT: case SLJIT_HAS_PREFETCH: return 1; case SLJIT_HAS_SSE2: #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) - if (cpu_has_sse2 == -1) + if (cpu_feature_list == 0) get_cpu_features(); - return cpu_has_sse2; -#else + return (cpu_feature_list & CPU_FEATURE_SSE2) != 0; +#else /* !SLJIT_DETECT_SSE2 */ return 1; -#endif +#endif /* SLJIT_DETECT_SSE2 */ default: return 0; } } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type) +{ + if (type < SLJIT_UNORDERED || type > SLJIT_ORDERED_LESS_EQUAL) + return 0; + + switch (type) { + case SLJIT_ORDERED_EQUAL: + case SLJIT_UNORDERED_OR_NOT_EQUAL: + return 0; + } + + return 1; +} + /* --------------------------------------------------------------------- */ /* Operators */ /* --------------------------------------------------------------------- */ @@ -1385,47 +1503,75 @@ static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler, #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) static const sljit_sw emit_clz_arg = 32 + 31; +static const sljit_sw emit_ctz_arg = 32; #endif -static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags, +static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 is_clz, sljit_s32 dst, sljit_sw dstw, sljit_s32 src, sljit_sw srcw) { sljit_u8* inst; sljit_s32 dst_r; + sljit_sw max; - SLJIT_UNUSED_ARG(op_flags); - - if (cpu_has_cmov == -1) + if (cpu_feature_list == 0) get_cpu_features(); dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + if (is_clz ? (cpu_feature_list & CPU_FEATURE_LZCNT) : (cpu_feature_list & CPU_FEATURE_TZCNT)) { + /* Group prefix added separately. */ + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst++ = GROUP_F3; + + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = is_clz ? LZCNT_r_rm : TZCNT_r_rm; + + if (dst & SLJIT_MEM) + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); + return SLJIT_SUCCESS; + } + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw); FAIL_IF(!inst); *inst++ = GROUP_0F; - *inst = BSR_r_rm; + *inst = is_clz ? BSR_r_rm : BSF_r_rm; #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (cpu_has_cmov) { + max = is_clz ? (32 + 31) : 32; + + if (cpu_feature_list & CPU_FEATURE_CMOV) { if (dst_r != TMP_REG1) { - EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 32 + 31); + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, max); inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0); } else - inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), (sljit_sw)&emit_clz_arg); + inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), is_clz ? (sljit_sw)&emit_clz_arg : (sljit_sw)&emit_ctz_arg); FAIL_IF(!inst); *inst++ = GROUP_0F; *inst = CMOVE_r_rm; } else - FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, 32 + 31)); + FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max)); - inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0); + if (is_clz) { + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0); + FAIL_IF(!inst); + *(inst + 1) |= XOR; + } #else - if (cpu_has_cmov) { - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, !(op_flags & SLJIT_32) ? (64 + 63) : (32 + 31)); + if (is_clz) + max = compiler->mode32 ? (32 + 31) : (64 + 63); + else + max = compiler->mode32 ? 32 : 64; + + if (cpu_feature_list & CPU_FEATURE_CMOV) { + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, max); inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); FAIL_IF(!inst); @@ -1433,14 +1579,15 @@ static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags, *inst = CMOVE_r_rm; } else - FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, !(op_flags & SLJIT_32) ? (64 + 63) : (32 + 31))); + FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max)); - inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_32) ? 63 : 31, dst_r, 0); + if (is_clz) { + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, max >> 1, dst_r, 0); + FAIL_IF(!inst); + *(inst + 1) |= XOR; + } #endif - FAIL_IF(!inst); - *(inst + 1) |= XOR; - if (dst & SLJIT_MEM) EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; @@ -1578,7 +1725,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw); case SLJIT_CLZ: - return emit_clz(compiler, op_flags, dst, dstw, src, srcw); + case SLJIT_CTZ: + return emit_clz_ctz(compiler, (op == SLJIT_CLZ), dst, dstw, src, srcw); } return SLJIT_SUCCESS; @@ -2116,6 +2264,9 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + sljit_s32 mode32; +#endif sljit_u8* inst; if ((src2 & SLJIT_IMM) || (src2 == SLJIT_PREF_SHIFT_REG)) { @@ -2155,40 +2306,61 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler, inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst |= mode; - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); + return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); } - else if (FAST_IS_REG(dst) && dst != src2 && dst != TMP_REG1 && !ADDRESSING_DEPENDS_ON(src2, dst)) { + + if (FAST_IS_REG(dst) && dst != src2 && dst != TMP_REG1 && !ADDRESSING_DEPENDS_ON(src2, dst)) { if (src1 != dst) EMIT_MOV(compiler, dst, 0, src1, src1w); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + mode32 = compiler->mode32; + compiler->mode32 = 0; +#endif EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = mode32; +#endif EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, dst, 0); FAIL_IF(!inst); *inst |= mode; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = mode32; +#endif + return SLJIT_SUCCESS; } - else { - /* This case is complex since ecx itself may be used for - addressing, and this case must be supported as well. */ - EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + + /* This case is complex since ecx itself may be used for + addressing, and this case must be supported as well. */ + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0); - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); - FAIL_IF(!inst); - *inst |= mode; - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0); + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0); +#else /* !SLJIT_CONFIG_X86_32 */ + mode32 = compiler->mode32; + compiler->mode32 = 0; + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0); + compiler->mode32 = mode32; +#endif /* SLJIT_CONFIG_X86_32 */ + + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); + FAIL_IF(!inst); + *inst |= mode; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0); #else - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0); - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); - FAIL_IF(!inst); - *inst |= mode; - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0); -#endif - if (dst != TMP_REG1) - return emit_mov(compiler, dst, dstw, TMP_REG1, 0); - } + compiler->mode32 = 0; + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0); + compiler->mode32 = mode32; +#endif /* SLJIT_CONFIG_X86_32 */ + + if (dst != TMP_REG1) + return emit_mov(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } @@ -2202,12 +2374,13 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler, /* The CPU does not set flags if the shift count is 0. */ if (src2 & SLJIT_IMM) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if ((src2w & 0x3f) != 0 || (compiler->mode32 && (src2w & 0x1f) != 0)) - return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w); -#else - if ((src2w & 0x1f) != 0) + src2w &= compiler->mode32 ? 0x1f : 0x3f; +#else /* !SLJIT_CONFIG_X86_64 */ + src2w &= 0x1f; +#endif /* SLJIT_CONFIG_X86_64 */ + if (src2w != 0) return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w); -#endif + if (!set_flags) return emit_mov(compiler, dst, dstw, src1, src1w); /* OR dst, src, 0 */ @@ -2289,14 +2462,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile return emit_cum_binary(compiler, BINARY_OPCODE(XOR), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SHL: + case SLJIT_MSHL: return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); case SLJIT_LSHR: + case SLJIT_MLSHR: return emit_shift_with_flags(compiler, SHR, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); case SLJIT_ASHR: + case SLJIT_MASHR: return emit_shift_with_flags(compiler, SAR, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); + case SLJIT_ROTL: + return emit_shift_with_flags(compiler, ROL, 0, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_ROTR: + return emit_shift_with_flags(compiler, ROR, 0, + dst, dstw, src1, src1w, src2, src2w); } return SLJIT_SUCCESS; @@ -2312,10 +2494,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w)); if (opcode != SLJIT_SUB && opcode != SLJIT_AND) { -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w); } @@ -2334,6 +2513,122 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil return emit_test_binary(compiler, src1, src1w, src2, src2w); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src_dst, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w) +{ + sljit_s32 restore_ecx = 0; + sljit_s32 is_rotate, is_left; + sljit_u8* inst; + sljit_sw dstw = 0; +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + sljit_s32 tmp2 = SLJIT_MEM1(SLJIT_SP); +#else /* !SLJIT_CONFIG_X86_32 */ + sljit_s32 tmp2 = TMP_REG2; +#endif /* SLJIT_CONFIG_X86_32 */ + + CHECK_ERROR(); + CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w)); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + CHECK_EXTRA_REGS(src1, src1w, (void)0); + CHECK_EXTRA_REGS(src2, src2w, (void)0); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = op & SLJIT_32; +#endif + + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + src2w &= 0x1f; +#else /* !SLJIT_CONFIG_X86_32 */ + src2w &= (op & SLJIT_32) ? 0x1f : 0x3f; +#endif /* SLJIT_CONFIG_X86_32 */ + + if (src2w == 0) + return SLJIT_SUCCESS; + } + + is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL); + + is_rotate = (src_dst == src1); + CHECK_EXTRA_REGS(src_dst, dstw, (void)0); + + if (is_rotate) + return emit_shift(compiler, is_left ? ROL : ROR, src_dst, dstw, src1, src1w, src2, src2w); + + if ((src2 & SLJIT_IMM) || src2 == SLJIT_PREF_SHIFT_REG) { + if (!FAST_IS_REG(src1)) { + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); + src1 = TMP_REG1; + } + } else if (FAST_IS_REG(src1)) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = op & SLJIT_32; +#endif + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + + if (src1 == SLJIT_PREF_SHIFT_REG) + src1 = TMP_REG1; + + if (src_dst == SLJIT_PREF_SHIFT_REG) + src_dst = TMP_REG1; + + restore_ecx = 1; + } else { + EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif + EMIT_MOV(compiler, tmp2, 0, SLJIT_PREF_SHIFT_REG, 0); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = op & SLJIT_32; +#endif + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + + src1 = TMP_REG1; + + if (src_dst == SLJIT_PREF_SHIFT_REG) { + src_dst = tmp2; + SLJIT_ASSERT(dstw == 0); + } + + restore_ecx = 2; + } + + inst = emit_x86_instruction(compiler, 2, src1, 0, src_dst, dstw); + FAIL_IF(!inst); + inst[0] = GROUP_0F; + + if (src2 & SLJIT_IMM) { + inst[1] = U8((is_left ? SHLD : SHRD) - 1); + + /* Immedate argument is added separately. */ + inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst = U8(src2w); + } else + inst[1] = U8(is_left ? SHLD : SHRD); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif + + if (restore_ecx == 1) + return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); + if (restore_ecx == 2) + return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, tmp2, 0); + + return SLJIT_SUCCESS; +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) { @@ -2516,6 +2811,19 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w) { + switch (GET_FLAG_TYPE(op)) { + case SLJIT_ORDERED_LESS: + case SLJIT_UNORDERED_OR_GREATER_EQUAL: + case SLJIT_UNORDERED_OR_GREATER: + case SLJIT_ORDERED_LESS_EQUAL: + if (!FAST_IS_REG(src2)) { + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src2, src2w)); + src2 = TMP_FREG; + } + + return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_32), src2, src1, src1w); + } + if (!FAST_IS_REG(src1)) { FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w)); src1 = TMP_FREG; @@ -2769,7 +3077,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co ADJUST_LOCAL_OFFSET(dst, dstw); CHECK_EXTRA_REGS(dst, dstw, (void)0); - type &= 0xff; /* setcc = jcc + 0x10. */ cond_set = U8(get_jump_code((sljit_uw)type) + 0x10); @@ -2813,10 +3120,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co return emit_mov(compiler, dst, dstw, TMP_REG1, 0); } -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0); #else @@ -2839,10 +3143,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co } /* Low byte is not accessible. */ - if (cpu_has_cmov == -1) + if (cpu_feature_list == 0) get_cpu_features(); - if (cpu_has_cmov) { + if (cpu_feature_list & CPU_FEATURE_CMOV) { EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1); /* a xor reg, reg operation would overwrite the flags. */ EMIT_MOV(compiler, dst, 0, SLJIT_IMM, 0); @@ -2927,10 +3231,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co if (GET_OPCODE(op) < SLJIT_ADD) return emit_mov(compiler, dst, dstw, TMP_REG1, 0); -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ - || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - compiler->skip_checks = 1; -#endif + SLJIT_SKIP_CHECKS(compiler); return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0); #endif /* SLJIT_CONFIG_X86_64 */ } @@ -2945,7 +3246,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - dst_reg &= ~SLJIT_32; + type &= ~SLJIT_32; if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV) || (dst_reg >= SLJIT_R3 && dst_reg <= SLJIT_S3)) return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw); @@ -2958,8 +3259,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil CHECK_EXTRA_REGS(src, srcw, (void)0); #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - compiler->mode32 = dst_reg & SLJIT_32; - dst_reg &= ~SLJIT_32; + compiler->mode32 = type & SLJIT_32; + type &= ~SLJIT_32; #endif if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { @@ -2971,7 +3272,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw); FAIL_IF(!inst); *inst++ = GROUP_0F; - *inst = U8(get_jump_code(type & 0xff) - 0x40); + *inst = U8(get_jump_code((sljit_uw)type) - 0x40); return SLJIT_SUCCESS; } diff --git a/thirdparty/pcre2/src/sljit/sljitWXExecAllocator.c b/thirdparty/pcre2/src/sljit/sljitWXExecAllocator.c index 72d5b8dd2b..6893813155 100644 --- a/thirdparty/pcre2/src/sljit/sljitWXExecAllocator.c +++ b/thirdparty/pcre2/src/sljit/sljitWXExecAllocator.c @@ -59,38 +59,15 @@ #include <sys/mman.h> #ifdef __NetBSD__ -#if defined(PROT_MPROTECT) -#define check_se_protected(ptr, size) (0) #define SLJIT_PROT_WX PROT_MPROTECT(PROT_EXEC) -#else /* !PROT_MPROTECT */ -#ifdef _NETBSD_SOURCE -#include <sys/param.h> -#else /* !_NETBSD_SOURCE */ -typedef unsigned int u_int; -#define devmajor_t sljit_s32 -#endif /* _NETBSD_SOURCE */ -#include <sys/sysctl.h> -#include <unistd.h> - -#define check_se_protected(ptr, size) netbsd_se_protected() - -static SLJIT_INLINE int netbsd_se_protected(void) -{ - int mib[3]; - int paxflags; - size_t len = sizeof(paxflags); - - mib[0] = CTL_PROC; - mib[1] = getpid(); - mib[2] = PROC_PID_PAXFLAGS; - - if (SLJIT_UNLIKELY(sysctl(mib, 3, &paxflags, &len, NULL, 0) < 0)) - return -1; - - return (paxflags & CTL_PROC_PAXFLAGS_MPROTECT) ? -1 : 0; -} -#endif /* PROT_MPROTECT */ +#define check_se_protected(ptr, size) (0) #else /* POSIX */ +#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) +#include <pthread.h> +#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock) +#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock) +#endif /* !SLJIT_SINGLE_THREADED */ + #define check_se_protected(ptr, size) generic_se_protected(ptr, size) static SLJIT_INLINE int generic_se_protected(void *ptr, sljit_uw size) @@ -102,22 +79,20 @@ static SLJIT_INLINE int generic_se_protected(void *ptr, sljit_uw size) } #endif /* NetBSD */ -#if defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED +#ifndef SLJIT_SE_LOCK #define SLJIT_SE_LOCK() +#endif +#ifndef SLJIT_SE_UNLOCK #define SLJIT_SE_UNLOCK() -#else /* !SLJIT_SINGLE_THREADED */ -#include <pthread.h> -#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock) -#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock) -#endif /* SLJIT_SINGLE_THREADED */ - +#endif #ifndef SLJIT_PROT_WX #define SLJIT_PROT_WX 0 -#endif /* !SLJIT_PROT_WX */ +#endif SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size) { -#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) +#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) \ + && !defined(__NetBSD__) static pthread_mutex_t se_lock = PTHREAD_MUTEX_INITIALIZER; #endif static int se_protected = !SLJIT_PROT_WX; |