summaryrefslogtreecommitdiff
path: root/thirdparty/libwebsockets/core
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/libwebsockets/core')
-rw-r--r--thirdparty/libwebsockets/core/alloc.c92
-rw-r--r--thirdparty/libwebsockets/core/context.c1962
-rw-r--r--thirdparty/libwebsockets/core/libwebsockets.c3479
-rw-r--r--thirdparty/libwebsockets/core/output.c320
-rw-r--r--thirdparty/libwebsockets/core/pollfd.c616
-rw-r--r--thirdparty/libwebsockets/core/private.h1770
-rw-r--r--thirdparty/libwebsockets/core/service.c987
7 files changed, 9226 insertions, 0 deletions
diff --git a/thirdparty/libwebsockets/core/alloc.c b/thirdparty/libwebsockets/core/alloc.c
new file mode 100644
index 0000000000..f169fc3767
--- /dev/null
+++ b/thirdparty/libwebsockets/core/alloc.c
@@ -0,0 +1,92 @@
+#include "core/private.h"
+
+#if defined(LWS_PLAT_OPTEE)
+
+#define TEE_USER_MEM_HINT_NO_FILL_ZERO 0x80000000
+
+void *__attribute__((weak))
+ TEE_Malloc(uint32_t size, uint32_t hint)
+{
+ return NULL;
+}
+void *__attribute__((weak))
+ TEE_Realloc(void *buffer, uint32_t newSize)
+{
+ return NULL;
+}
+void __attribute__((weak))
+ TEE_Free(void *buffer)
+{
+}
+
+void *lws_realloc(void *ptr, size_t size, const char *reason)
+{
+ return TEE_Realloc(ptr, size);
+}
+
+void *lws_malloc(size_t size, const char *reason)
+{
+ return TEE_Malloc(size, TEE_USER_MEM_HINT_NO_FILL_ZERO);
+}
+
+void lws_free(void *p)
+{
+ TEE_Free(p);
+}
+
+void *lws_zalloc(size_t size, const char *reason)
+{
+ void *ptr = TEE_Malloc(size, TEE_USER_MEM_HINT_NO_FILL_ZERO);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+void lws_set_allocator(void *(*cb)(void *ptr, size_t size, const char *reason))
+{
+ (void)cb;
+}
+#else
+
+static void *_realloc(void *ptr, size_t size, const char *reason)
+{
+ if (size) {
+#if defined(LWS_WITH_ESP32)
+ lwsl_notice("%s: size %lu: %s (free heap %d)\n", __func__,
+ (unsigned long)size, reason, (unsigned int)esp_get_free_heap_size() - (int)size);
+#else
+ lwsl_debug("%s: size %lu: %s\n", __func__,
+ (unsigned long)size, reason);
+#endif
+#if defined(LWS_PLAT_OPTEE)
+ return (void *)TEE_Realloc(ptr, size);
+#else
+ return (void *)realloc(ptr, size);
+#endif
+ }
+ if (ptr)
+ free(ptr);
+
+ return NULL;
+}
+
+void *(*_lws_realloc)(void *ptr, size_t size, const char *reason) = _realloc;
+
+void *lws_realloc(void *ptr, size_t size, const char *reason)
+{
+ return _lws_realloc(ptr, size, reason);
+}
+
+void *lws_zalloc(size_t size, const char *reason)
+{
+ void *ptr = _lws_realloc(NULL, size, reason);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+void lws_set_allocator(void *(*cb)(void *ptr, size_t size, const char *reason))
+{
+ _lws_realloc = cb;
+}
+#endif
diff --git a/thirdparty/libwebsockets/core/context.c b/thirdparty/libwebsockets/core/context.c
new file mode 100644
index 0000000000..7be004df33
--- /dev/null
+++ b/thirdparty/libwebsockets/core/context.c
@@ -0,0 +1,1962 @@
+/*
+ * libwebsockets - small server side websockets and web server implementation
+ *
+ * Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation:
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include "core/private.h"
+
+#ifndef LWS_BUILD_HASH
+#define LWS_BUILD_HASH "unknown-build-hash"
+#endif
+
+const struct lws_role_ops *available_roles[] = {
+#if defined(LWS_ROLE_H2)
+ &role_ops_h2,
+#endif
+#if defined(LWS_ROLE_H1)
+ &role_ops_h1,
+#endif
+#if defined(LWS_ROLE_WS)
+ &role_ops_ws,
+#endif
+ NULL
+};
+
+const struct lws_event_loop_ops *available_event_libs[] = {
+#if defined(LWS_WITH_POLL)
+ &event_loop_ops_poll,
+#endif
+#if defined(LWS_WITH_LIBUV)
+ &event_loop_ops_uv,
+#endif
+#if defined(LWS_WITH_LIBEVENT)
+ &event_loop_ops_event,
+#endif
+#if defined(LWS_WITH_LIBEV)
+ &event_loop_ops_ev,
+#endif
+ NULL
+};
+
+static const char *library_version = LWS_LIBRARY_VERSION " " LWS_BUILD_HASH;
+
+/**
+ * lws_get_library_version: get version and git hash library built from
+ *
+ * returns a const char * to a string like "1.1 178d78c"
+ * representing the library version followed by the git head hash it
+ * was built from
+ */
+LWS_VISIBLE const char *
+lws_get_library_version(void)
+{
+ return library_version;
+}
+
+int
+lws_role_call_alpn_negotiated(struct lws *wsi, const char *alpn)
+{
+#if defined(LWS_WITH_TLS)
+ if (!alpn)
+ return 0;
+
+ lwsl_info("%s: '%s'\n", __func__, alpn);
+
+ LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar)
+ if (ar->alpn && !strcmp(ar->alpn, alpn) && ar->alpn_negotiated)
+ return ar->alpn_negotiated(wsi, alpn);
+ LWS_FOR_EVERY_AVAILABLE_ROLE_END;
+#endif
+ return 0;
+}
+
+static const char * const mount_protocols[] = {
+ "http://",
+ "https://",
+ "file://",
+ "cgi://",
+ ">http://",
+ ">https://",
+ "callback://"
+};
+
+LWS_VISIBLE void *
+lws_protocol_vh_priv_zalloc(struct lws_vhost *vhost,
+ const struct lws_protocols *prot, int size)
+{
+ int n = 0;
+
+ /* allocate the vh priv array only on demand */
+ if (!vhost->protocol_vh_privs) {
+ vhost->protocol_vh_privs = (void **)lws_zalloc(
+ vhost->count_protocols * sizeof(void *),
+ "protocol_vh_privs");
+ if (!vhost->protocol_vh_privs)
+ return NULL;
+ }
+
+ while (n < vhost->count_protocols && &vhost->protocols[n] != prot)
+ n++;
+
+ if (n == vhost->count_protocols) {
+ n = 0;
+ while (n < vhost->count_protocols &&
+ strcmp(vhost->protocols[n].name, prot->name))
+ n++;
+
+ if (n == vhost->count_protocols)
+ return NULL;
+ }
+
+ vhost->protocol_vh_privs[n] = lws_zalloc(size, "vh priv");
+ return vhost->protocol_vh_privs[n];
+}
+
+LWS_VISIBLE void *
+lws_protocol_vh_priv_get(struct lws_vhost *vhost,
+ const struct lws_protocols *prot)
+{
+ int n = 0;
+
+ if (!vhost || !vhost->protocol_vh_privs || !prot)
+ return NULL;
+
+ while (n < vhost->count_protocols && &vhost->protocols[n] != prot)
+ n++;
+
+ if (n == vhost->count_protocols) {
+ n = 0;
+ while (n < vhost->count_protocols &&
+ strcmp(vhost->protocols[n].name, prot->name))
+ n++;
+
+ if (n == vhost->count_protocols) {
+ lwsl_err("%s: unknown protocol %p\n", __func__, prot);
+ return NULL;
+ }
+ }
+
+ return vhost->protocol_vh_privs[n];
+}
+
+static const struct lws_protocol_vhost_options *
+lws_vhost_protocol_options(struct lws_vhost *vh, const char *name)
+{
+ const struct lws_protocol_vhost_options *pvo = vh->pvo;
+
+ if (!name)
+ return NULL;
+
+ while (pvo) {
+ if (!strcmp(pvo->name, name))
+ return pvo;
+ pvo = pvo->next;
+ }
+
+ return NULL;
+}
+
+/*
+ * inform every vhost that hasn't already done it, that
+ * his protocols are initializing
+ */
+LWS_VISIBLE int
+lws_protocol_init(struct lws_context *context)
+{
+ struct lws_vhost *vh = context->vhost_list;
+ const struct lws_protocol_vhost_options *pvo, *pvo1;
+ struct lws wsi;
+ int n, any = 0;
+
+ if (context->doing_protocol_init)
+ return 0;
+
+ context->doing_protocol_init = 1;
+
+ memset(&wsi, 0, sizeof(wsi));
+ wsi.context = context;
+
+ lwsl_info("%s\n", __func__);
+
+ while (vh) {
+ wsi.vhost = vh;
+
+ /* only do the protocol init once for a given vhost */
+ if (vh->created_vhost_protocols ||
+ (vh->options & LWS_SERVER_OPTION_SKIP_PROTOCOL_INIT))
+ goto next;
+
+ /* initialize supported protocols on this vhost */
+
+ for (n = 0; n < vh->count_protocols; n++) {
+ wsi.protocol = &vh->protocols[n];
+ if (!vh->protocols[n].name)
+ continue;
+ pvo = lws_vhost_protocol_options(vh,
+ vh->protocols[n].name);
+ if (pvo) {
+ /*
+ * linked list of options specific to
+ * vh + protocol
+ */
+ pvo1 = pvo;
+ pvo = pvo1->options;
+
+ while (pvo) {
+ lwsl_debug(
+ " vhost \"%s\", "
+ "protocol \"%s\", "
+ "option \"%s\"\n",
+ vh->name,
+ vh->protocols[n].name,
+ pvo->name);
+
+ if (!strcmp(pvo->name, "default")) {
+ lwsl_info("Setting default "
+ "protocol for vh %s to %s\n",
+ vh->name,
+ vh->protocols[n].name);
+ vh->default_protocol_index = n;
+ }
+ if (!strcmp(pvo->name, "raw")) {
+ lwsl_info("Setting raw "
+ "protocol for vh %s to %s\n",
+ vh->name,
+ vh->protocols[n].name);
+ vh->raw_protocol_index = n;
+ }
+ pvo = pvo->next;
+ }
+
+ pvo = pvo1->options;
+ }
+
+#if defined(LWS_WITH_TLS)
+ any |= !!vh->tls.ssl_ctx;
+#endif
+
+ /*
+ * inform all the protocols that they are doing their
+ * one-time initialization if they want to.
+ *
+ * NOTE the wsi is all zeros except for the context, vh
+ * + protocol ptrs so lws_get_context(wsi) etc can work
+ */
+ if (vh->protocols[n].callback(&wsi,
+ LWS_CALLBACK_PROTOCOL_INIT, NULL,
+ (void *)pvo, 0)) {
+ lws_free(vh->protocol_vh_privs[n]);
+ vh->protocol_vh_privs[n] = NULL;
+ lwsl_err("%s: protocol %s failed init\n", __func__,
+ vh->protocols[n].name);
+ }
+ }
+
+ vh->created_vhost_protocols = 1;
+next:
+ vh = vh->vhost_next;
+ }
+
+ context->doing_protocol_init = 0;
+
+ if (!context->protocol_init_done)
+ lws_finalize_startup(context);
+
+ context->protocol_init_done = 1;
+
+ if (any)
+ lws_tls_check_all_cert_lifetimes(context);
+
+ return 0;
+}
+
+LWS_VISIBLE int
+lws_callback_http_dummy(struct lws *wsi, enum lws_callback_reasons reason,
+ void *user, void *in, size_t len)
+{
+ struct lws_ssl_info *si;
+#ifdef LWS_WITH_CGI
+ struct lws_cgi_args *args;
+#endif
+#if defined(LWS_WITH_CGI) || defined(LWS_WITH_HTTP_PROXY)
+ char buf[512];
+ int n;
+#endif
+
+ switch (reason) {
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ case LWS_CALLBACK_HTTP:
+#ifndef LWS_NO_SERVER
+ if (lws_return_http_status(wsi, HTTP_STATUS_NOT_FOUND, NULL))
+ return -1;
+
+ if (lws_http_transaction_completed(wsi))
+#endif
+ return -1;
+ break;
+#if !defined(LWS_NO_SERVER)
+ case LWS_CALLBACK_HTTP_FILE_COMPLETION:
+ if (lws_http_transaction_completed(wsi))
+ return -1;
+ break;
+#endif
+
+ case LWS_CALLBACK_HTTP_WRITEABLE:
+#ifdef LWS_WITH_CGI
+ if (wsi->reason_bf & (LWS_CB_REASON_AUX_BF__CGI_HEADERS |
+ LWS_CB_REASON_AUX_BF__CGI)) {
+ n = lws_cgi_write_split_stdout_headers(wsi);
+ if (n < 0) {
+ lwsl_debug("AUX_BF__CGI forcing close\n");
+ return -1;
+ }
+ if (!n)
+ lws_rx_flow_control(
+ wsi->http.cgi->stdwsi[LWS_STDOUT], 1);
+
+ if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_HEADERS)
+ wsi->reason_bf &=
+ ~LWS_CB_REASON_AUX_BF__CGI_HEADERS;
+ else
+ wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__CGI;
+ break;
+ }
+
+ if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_CHUNK_END) {
+ if (!wsi->http2_substream) {
+ memcpy(buf + LWS_PRE, "0\x0d\x0a\x0d\x0a", 5);
+ lwsl_debug("writing chunk term and exiting\n");
+ n = lws_write(wsi, (unsigned char *)buf +
+ LWS_PRE, 5, LWS_WRITE_HTTP);
+ } else
+ n = lws_write(wsi, (unsigned char *)buf +
+ LWS_PRE, 0,
+ LWS_WRITE_HTTP_FINAL);
+
+ /* always close after sending it */
+ return -1;
+ }
+#endif
+#if defined(LWS_WITH_HTTP_PROXY)
+ if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY) {
+ char *px = buf + LWS_PRE;
+ int lenx = sizeof(buf) - LWS_PRE;
+
+ /*
+ * our sink is writeable and our source has something
+ * to read. So read a lump of source material of
+ * suitable size to send or what's available, whichever
+ * is the smaller.
+ */
+ wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__PROXY;
+ if (!lws_get_child(wsi))
+ break;
+ if (lws_http_client_read(lws_get_child(wsi), &px,
+ &lenx) < 0)
+ return -1;
+ break;
+ }
+#endif
+ break;
+
+#if defined(LWS_WITH_HTTP_PROXY)
+ case LWS_CALLBACK_RECEIVE_CLIENT_HTTP:
+ assert(lws_get_parent(wsi));
+ if (!lws_get_parent(wsi))
+ break;
+ lws_get_parent(wsi)->reason_bf |= LWS_CB_REASON_AUX_BF__PROXY;
+ lws_callback_on_writable(lws_get_parent(wsi));
+ break;
+
+ case LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ:
+ assert(lws_get_parent(wsi));
+ n = lws_write(lws_get_parent(wsi), (unsigned char *)in,
+ len, LWS_WRITE_HTTP);
+ if (n < 0)
+ return -1;
+ break;
+
+ case LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: {
+ unsigned char *p, *end;
+ char ctype[64], ctlen = 0;
+
+ p = (unsigned char *)buf + LWS_PRE;
+ end = p + sizeof(buf) - LWS_PRE;
+
+ if (lws_add_http_header_status(lws_get_parent(wsi),
+ HTTP_STATUS_OK, &p, end))
+ return 1;
+ if (lws_add_http_header_by_token(lws_get_parent(wsi),
+ WSI_TOKEN_HTTP_SERVER,
+ (unsigned char *)"libwebsockets",
+ 13, &p, end))
+ return 1;
+
+ ctlen = lws_hdr_copy(wsi, ctype, sizeof(ctype),
+ WSI_TOKEN_HTTP_CONTENT_TYPE);
+ if (ctlen > 0) {
+ if (lws_add_http_header_by_token(lws_get_parent(wsi),
+ WSI_TOKEN_HTTP_CONTENT_TYPE,
+ (unsigned char *)ctype, ctlen, &p, end))
+ return 1;
+ }
+
+ if (lws_finalize_http_header(lws_get_parent(wsi), &p, end))
+ return 1;
+
+ *p = '\0';
+ n = lws_write(lws_get_parent(wsi),
+ (unsigned char *)buf + LWS_PRE,
+ p - ((unsigned char *)buf + LWS_PRE),
+ LWS_WRITE_HTTP_HEADERS);
+ if (n < 0)
+ return -1;
+
+ break; }
+
+#endif
+
+#ifdef LWS_WITH_CGI
+ /* CGI IO events (POLLIN/OUT) appear here, our default policy is:
+ *
+ * - POST data goes on subprocess stdin
+ * - subprocess stdout goes on http via writeable callback
+ * - subprocess stderr goes to the logs
+ */
+ case LWS_CALLBACK_CGI:
+ args = (struct lws_cgi_args *)in;
+ switch (args->ch) { /* which of stdin/out/err ? */
+ case LWS_STDIN:
+ /* TBD stdin rx flow control */
+ break;
+ case LWS_STDOUT:
+ /* quench POLLIN on STDOUT until MASTER got writeable */
+ lws_rx_flow_control(args->stdwsi[LWS_STDOUT], 0);
+ wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI;
+ /* when writing to MASTER would not block */
+ lws_callback_on_writable(wsi);
+ break;
+ case LWS_STDERR:
+ n = lws_get_socket_fd(args->stdwsi[LWS_STDERR]);
+ if (n < 0)
+ break;
+ n = read(n, buf, sizeof(buf) - 2);
+ if (n > 0) {
+ if (buf[n - 1] != '\n')
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ lwsl_notice("CGI-stderr: %s\n", buf);
+ }
+ break;
+ }
+ break;
+
+ case LWS_CALLBACK_CGI_TERMINATED:
+ lwsl_debug("LWS_CALLBACK_CGI_TERMINATED: %d %" PRIu64 "\n",
+ wsi->http.cgi->explicitly_chunked,
+ (uint64_t)wsi->http.cgi->content_length);
+ if (!wsi->http.cgi->explicitly_chunked &&
+ !wsi->http.cgi->content_length) {
+ /* send terminating chunk */
+ lwsl_debug("LWS_CALLBACK_CGI_TERMINATED: ending\n");
+ wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI_CHUNK_END;
+ lws_callback_on_writable(wsi);
+ lws_set_timeout(wsi, PENDING_TIMEOUT_CGI, 3);
+ break;
+ }
+ return -1;
+
+ case LWS_CALLBACK_CGI_STDIN_DATA: /* POST body for stdin */
+ args = (struct lws_cgi_args *)in;
+ args->data[args->len] = '\0';
+ n = lws_get_socket_fd(args->stdwsi[LWS_STDIN]);
+ if (n < 0)
+ return -1;
+ n = write(n, args->data, args->len);
+ if (n < args->len)
+ lwsl_notice("LWS_CALLBACK_CGI_STDIN_DATA: "
+ "sent %d only %d went", n, args->len);
+ return n;
+#endif
+#endif
+ case LWS_CALLBACK_SSL_INFO:
+ si = in;
+
+ (void)si;
+ lwsl_notice("LWS_CALLBACK_SSL_INFO: where: 0x%x, ret: 0x%x\n",
+ si->where, si->ret);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* list of supported protocols and callbacks */
+
+static const struct lws_protocols protocols_dummy[] = {
+ /* first protocol must always be HTTP handler */
+
+ {
+ "http-only", /* name */
+ lws_callback_http_dummy, /* callback */
+ 0, /* per_session_data_size */
+ 0, /* rx_buffer_size */
+ 0, /* id */
+ NULL, /* user */
+ 0 /* tx_packet_size */
+ },
+ /*
+ * the other protocols are provided by lws plugins
+ */
+ { NULL, NULL, 0, 0, 0, NULL, 0} /* terminator */
+};
+
+#ifdef LWS_PLAT_OPTEE
+#undef LWS_HAVE_GETENV
+#endif
+
+static void
+lws_vhost_destroy2(struct lws_vhost *vh);
+
+LWS_VISIBLE struct lws_vhost *
+lws_create_vhost(struct lws_context *context,
+ const struct lws_context_creation_info *info)
+{
+ struct lws_vhost *vh = lws_zalloc(sizeof(*vh), "create vhost"),
+ **vh1 = &context->vhost_list;
+ const struct lws_http_mount *mounts;
+ const struct lws_protocols *pcols = info->protocols;
+ const struct lws_protocol_vhost_options *pvo;
+#ifdef LWS_WITH_PLUGINS
+ struct lws_plugin *plugin = context->plugin_list;
+#endif
+ struct lws_protocols *lwsp;
+ int m, f = !info->pvo;
+ char buf[20];
+#if !defined(LWS_WITHOUT_CLIENT) && defined(LWS_HAVE_GETENV)
+ char *p;
+#endif
+ int n;
+
+ if (!vh)
+ return NULL;
+
+#if LWS_MAX_SMP > 1
+ pthread_mutex_init(&vh->lock, NULL);
+#endif
+
+ if (!pcols)
+ pcols = &protocols_dummy[0];
+
+ vh->context = context;
+ if (!info->vhost_name)
+ vh->name = "default";
+ else
+ vh->name = info->vhost_name;
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ vh->http.error_document_404 = info->error_document_404;
+#endif
+
+ if (info->options & LWS_SERVER_OPTION_ONLY_RAW)
+ lwsl_info("%s set to only support RAW\n", vh->name);
+
+ vh->iface = info->iface;
+#if !defined(LWS_WITH_ESP32) && \
+ !defined(OPTEE_TA) && !defined(WIN32)
+ vh->bind_iface = info->bind_iface;
+#endif
+
+ for (vh->count_protocols = 0;
+ pcols[vh->count_protocols].callback;
+ vh->count_protocols++)
+ ;
+
+ vh->options = info->options;
+ vh->pvo = info->pvo;
+ vh->headers = info->headers;
+ vh->user = info->user;
+
+ LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar)
+ if (ar->init_vhost)
+ if (ar->init_vhost(vh, info))
+ return NULL;
+ LWS_FOR_EVERY_AVAILABLE_ROLE_END;
+
+
+ if (info->keepalive_timeout)
+ vh->keepalive_timeout = info->keepalive_timeout;
+ else
+ vh->keepalive_timeout = 5;
+
+ if (info->timeout_secs_ah_idle)
+ vh->timeout_secs_ah_idle = info->timeout_secs_ah_idle;
+ else
+ vh->timeout_secs_ah_idle = 10;
+
+#if defined(LWS_WITH_TLS)
+
+ vh->tls.alpn = info->alpn;
+ vh->tls.ssl_info_event_mask = info->ssl_info_event_mask;
+
+ if (info->ecdh_curve)
+ lws_strncpy(vh->tls.ecdh_curve, info->ecdh_curve,
+ sizeof(vh->tls.ecdh_curve));
+
+ /* carefully allocate and take a copy of cert + key paths if present */
+ n = 0;
+ if (info->ssl_cert_filepath)
+ n += (int)strlen(info->ssl_cert_filepath) + 1;
+ if (info->ssl_private_key_filepath)
+ n += (int)strlen(info->ssl_private_key_filepath) + 1;
+
+ if (n) {
+ vh->tls.key_path = vh->tls.alloc_cert_path = lws_malloc(n, "vh paths");
+ if (info->ssl_cert_filepath) {
+ n = (int)strlen(info->ssl_cert_filepath) + 1;
+ memcpy(vh->tls.alloc_cert_path, info->ssl_cert_filepath, n);
+ vh->tls.key_path += n;
+ }
+ if (info->ssl_private_key_filepath)
+ memcpy(vh->tls.key_path, info->ssl_private_key_filepath,
+ strlen(info->ssl_private_key_filepath) + 1);
+ }
+#endif
+
+ /*
+ * give the vhost a unified list of protocols including the
+ * ones that came from plugins
+ */
+ lwsp = lws_zalloc(sizeof(struct lws_protocols) * (vh->count_protocols +
+ context->plugin_protocol_count + 1),
+ "vhost-specific plugin table");
+ if (!lwsp) {
+ lwsl_err("OOM\n");
+ return NULL;
+ }
+
+ m = vh->count_protocols;
+ memcpy(lwsp, pcols, sizeof(struct lws_protocols) * m);
+
+ /* for compatibility, all protocols enabled on vhost if only
+ * the default vhost exists. Otherwise only vhosts who ask
+ * for a protocol get it enabled.
+ */
+
+ if (context->options & LWS_SERVER_OPTION_EXPLICIT_VHOSTS)
+ f = 0;
+ (void)f;
+#ifdef LWS_WITH_PLUGINS
+ if (plugin) {
+
+ while (plugin) {
+ for (n = 0; n < plugin->caps.count_protocols; n++) {
+ /*
+ * for compatibility's sake, no pvo implies
+ * allow all protocols
+ */
+ if (f || lws_vhost_protocol_options(vh,
+ plugin->caps.protocols[n].name)) {
+ memcpy(&lwsp[m],
+ &plugin->caps.protocols[n],
+ sizeof(struct lws_protocols));
+ m++;
+ vh->count_protocols++;
+ }
+ }
+ plugin = plugin->list;
+ }
+ }
+#endif
+
+ if (
+#ifdef LWS_WITH_PLUGINS
+ (context->plugin_list) ||
+#endif
+ context->options & LWS_SERVER_OPTION_EXPLICIT_VHOSTS)
+ vh->protocols = lwsp;
+ else {
+ vh->protocols = pcols;
+ lws_free(lwsp);
+ }
+
+ vh->same_vh_protocol_list = (struct lws **)
+ lws_zalloc(sizeof(struct lws *) * vh->count_protocols,
+ "same vh list");
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ vh->http.mount_list = info->mounts;
+#endif
+
+#ifdef LWS_WITH_UNIX_SOCK
+ if (LWS_UNIX_SOCK_ENABLED(context)) {
+ lwsl_notice("Creating Vhost '%s' path \"%s\", %d protocols\n",
+ vh->name, vh->iface, vh->count_protocols);
+ } else
+#endif
+ {
+ switch(info->port) {
+ case CONTEXT_PORT_NO_LISTEN:
+ strcpy(buf, "(serving disabled)");
+ break;
+ case CONTEXT_PORT_NO_LISTEN_SERVER:
+ strcpy(buf, "(no listener)");
+ break;
+ default:
+ lws_snprintf(buf, sizeof(buf), "port %u", info->port);
+ break;
+ }
+ lwsl_notice("Creating Vhost '%s' %s, %d protocols, IPv6 %s\n",
+ vh->name, buf, vh->count_protocols,
+ LWS_IPV6_ENABLED(vh) ? "on" : "off");
+ }
+ mounts = info->mounts;
+ while (mounts) {
+ (void)mount_protocols[0];
+ lwsl_info(" mounting %s%s to %s\n",
+ mount_protocols[mounts->origin_protocol],
+ mounts->origin, mounts->mountpoint);
+
+ /* convert interpreter protocol names to pointers */
+ pvo = mounts->interpret;
+ while (pvo) {
+ for (n = 0; n < vh->count_protocols; n++) {
+ if (strcmp(pvo->value, vh->protocols[n].name))
+ continue;
+ ((struct lws_protocol_vhost_options *)pvo)->
+ value = (const char *)(lws_intptr_t)n;
+ break;
+ }
+ if (n == vh->count_protocols)
+ lwsl_err("ignoring unknown interp pr %s\n",
+ pvo->value);
+ pvo = pvo->next;
+ }
+
+ mounts = mounts->mount_next;
+ }
+
+ vh->listen_port = info->port;
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ vh->http.http_proxy_port = 0;
+ vh->http.http_proxy_address[0] = '\0';
+#endif
+#if defined(LWS_WITH_SOCKS5)
+ vh->socks_proxy_port = 0;
+ vh->socks_proxy_address[0] = '\0';
+#endif
+
+#if !defined(LWS_WITHOUT_CLIENT)
+ /* either use proxy from info, or try get it from env var */
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ /* http proxy */
+ if (info->http_proxy_address) {
+ /* override for backwards compatibility */
+ if (info->http_proxy_port)
+ vh->http.http_proxy_port = info->http_proxy_port;
+ lws_set_proxy(vh, info->http_proxy_address);
+ } else
+#endif
+ {
+#ifdef LWS_HAVE_GETENV
+ p = getenv("http_proxy");
+ if (p)
+ lws_set_proxy(vh, p);
+#endif
+ }
+#endif
+#if defined(LWS_WITH_SOCKS5)
+ /* socks proxy */
+ if (info->socks_proxy_address) {
+ /* override for backwards compatibility */
+ if (info->socks_proxy_port)
+ vh->socks_proxy_port = info->socks_proxy_port;
+ lws_set_socks(vh, info->socks_proxy_address);
+ } else {
+#ifdef LWS_HAVE_GETENV
+ p = getenv("socks_proxy");
+ if (p)
+ lws_set_socks(vh, p);
+#endif
+ }
+#endif
+
+ vh->ka_time = info->ka_time;
+ vh->ka_interval = info->ka_interval;
+ vh->ka_probes = info->ka_probes;
+
+ if (vh->options & LWS_SERVER_OPTION_STS)
+ lwsl_notice(" STS enabled\n");
+
+#ifdef LWS_WITH_ACCESS_LOG
+ if (info->log_filepath) {
+ vh->log_fd = lws_open(info->log_filepath,
+ O_CREAT | O_APPEND | O_RDWR, 0600);
+ if (vh->log_fd == (int)LWS_INVALID_FILE) {
+ lwsl_err("unable to open log filepath %s\n",
+ info->log_filepath);
+ goto bail;
+ }
+#ifndef WIN32
+ if (context->uid != -1)
+ if (chown(info->log_filepath, context->uid,
+ context->gid) == -1)
+ lwsl_err("unable to chown log file %s\n",
+ info->log_filepath);
+#endif
+ } else
+ vh->log_fd = (int)LWS_INVALID_FILE;
+#endif
+ if (lws_context_init_server_ssl(info, vh)) {
+ lwsl_err("%s: lws_context_init_server_ssl failed\n", __func__);
+ goto bail1;
+ }
+ if (lws_context_init_client_ssl(info, vh)) {
+ lwsl_err("%s: lws_context_init_client_ssl failed\n", __func__);
+ goto bail1;
+ }
+ lws_context_lock(context);
+ n = _lws_vhost_init_server(info, vh);
+ lws_context_unlock(context);
+ if (n < 0) {
+ lwsl_err("init server failed\n");
+ goto bail1;
+ }
+
+
+ while (1) {
+ if (!(*vh1)) {
+ *vh1 = vh;
+ break;
+ }
+ vh1 = &(*vh1)->vhost_next;
+ };
+
+ /* for the case we are adding a vhost much later, after server init */
+
+ if (context->protocol_init_done)
+ if (lws_protocol_init(context)) {
+ lwsl_err("%s: lws_protocol_init failed\n", __func__);
+ goto bail1;
+ }
+
+ return vh;
+
+bail1:
+ lws_vhost_destroy(vh);
+ lws_vhost_destroy2(vh);
+
+ return NULL;
+
+#ifdef LWS_WITH_ACCESS_LOG
+bail:
+ lws_free(vh);
+#endif
+
+ return NULL;
+}
+
+LWS_VISIBLE int
+lws_init_vhost_client_ssl(const struct lws_context_creation_info *info,
+ struct lws_vhost *vhost)
+{
+ struct lws_context_creation_info i;
+
+ memcpy(&i, info, sizeof(i));
+ i.port = CONTEXT_PORT_NO_LISTEN;
+
+ return lws_context_init_client_ssl(&i, vhost);
+}
+
+LWS_VISIBLE void
+lws_cancel_service_pt(struct lws *wsi)
+{
+ lws_plat_pipe_signal(wsi);
+}
+
+LWS_VISIBLE void
+lws_cancel_service(struct lws_context *context)
+{
+ struct lws_context_per_thread *pt = &context->pt[0];
+ short m = context->count_threads;
+
+ if (context->being_destroyed1)
+ return;
+
+ lwsl_info("%s\n", __func__);
+
+ while (m--) {
+ if (pt->pipe_wsi)
+ lws_plat_pipe_signal(pt->pipe_wsi);
+ pt++;
+ }
+}
+
+int
+lws_create_event_pipes(struct lws_context *context)
+{
+ struct lws *wsi;
+ int n;
+
+ /*
+ * Create the pt event pipes... these are unique in that they are
+ * not bound to a vhost or protocol (both are NULL)
+ */
+
+ for (n = 0; n < context->count_threads; n++) {
+ if (context->pt[n].pipe_wsi)
+ continue;
+
+ wsi = lws_zalloc(sizeof(*wsi), "event pipe wsi");
+ if (!wsi) {
+ lwsl_err("Out of mem\n");
+ return 1;
+ }
+ wsi->context = context;
+ lws_role_transition(wsi, 0, LRS_UNCONNECTED, &role_ops_pipe);
+ wsi->protocol = NULL;
+ wsi->tsi = n;
+ wsi->vhost = NULL;
+ wsi->event_pipe = 1;
+ wsi->desc.sockfd = LWS_SOCK_INVALID;
+ context->pt[n].pipe_wsi = wsi;
+ context->count_wsi_allocated++;
+
+ if (lws_plat_pipe_create(wsi))
+ /*
+ * platform code returns 0 if it actually created pipes
+ * and initialized pt->dummy_pipe_fds[]. If it used
+ * some other mechanism outside of signaling in the
+ * normal event loop, we skip treating the pipe as
+ * related to dummy_pipe_fds[], adding it to the fds,
+ * etc.
+ */
+ continue;
+
+ wsi->desc.sockfd = context->pt[n].dummy_pipe_fds[0];
+ lwsl_debug("event pipe fd %d\n", wsi->desc.sockfd);
+
+ if (context->event_loop_ops->accept)
+ context->event_loop_ops->accept(wsi);
+
+ if (__insert_wsi_socket_into_fds(context, wsi))
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+lws_destroy_event_pipe(struct lws *wsi)
+{
+ lwsl_info("%s\n", __func__);
+ __remove_wsi_socket_from_fds(wsi);
+
+ if (wsi->context->event_loop_ops->wsi_logical_close) {
+ wsi->context->event_loop_ops->wsi_logical_close(wsi);
+ lws_plat_pipe_close(wsi);
+ return;
+ }
+
+ if (wsi->context->event_loop_ops->destroy_wsi)
+ wsi->context->event_loop_ops->destroy_wsi(wsi);
+ lws_plat_pipe_close(wsi);
+ wsi->context->count_wsi_allocated--;
+ lws_free(wsi);
+}
+
+LWS_VISIBLE struct lws_context *
+lws_create_context(const struct lws_context_creation_info *info)
+{
+ struct lws_context *context = NULL;
+ struct lws_plat_file_ops *prev;
+#ifndef LWS_NO_DAEMONIZE
+ int pid_daemon = get_daemonize_pid();
+#endif
+ int n;
+#if defined(__ANDROID__)
+ struct rlimit rt;
+#endif
+
+
+
+ lwsl_info("Initial logging level %d\n", log_level);
+ lwsl_info("Libwebsockets version: %s\n", library_version);
+#if defined(GCC_VER)
+ lwsl_info("Compiled with %s\n", GCC_VER);
+#endif
+
+#ifdef LWS_WITH_IPV6
+ if (!lws_check_opt(info->options, LWS_SERVER_OPTION_DISABLE_IPV6))
+ lwsl_info("IPV6 compiled in and enabled\n");
+ else
+ lwsl_info("IPV6 compiled in but disabled\n");
+#else
+ lwsl_info("IPV6 not compiled in\n");
+#endif
+
+ lwsl_info(" LWS_DEF_HEADER_LEN : %u\n", LWS_DEF_HEADER_LEN);
+ lwsl_info(" LWS_MAX_PROTOCOLS : %u\n", LWS_MAX_PROTOCOLS);
+ lwsl_info(" LWS_MAX_SMP : %u\n", LWS_MAX_SMP);
+ lwsl_info(" sizeof (*info) : %ld\n", (long)sizeof(*info));
+#if defined(LWS_WITH_STATS)
+ lwsl_info(" LWS_WITH_STATS : on\n");
+#endif
+ lwsl_info(" SYSTEM_RANDOM_FILEPATH: '%s'\n", SYSTEM_RANDOM_FILEPATH);
+#if defined(LWS_WITH_HTTP2)
+ lwsl_info(" HTTP2 support : available\n");
+#else
+ lwsl_info(" HTTP2 support : not configured\n");
+#endif
+ if (lws_plat_context_early_init())
+ return NULL;
+
+ context = lws_zalloc(sizeof(struct lws_context), "context");
+ if (!context) {
+ lwsl_err("No memory for websocket context\n");
+ return NULL;
+ }
+
+#if defined(LWS_WITH_TLS)
+#if defined(LWS_WITH_MBEDTLS)
+ context->tls_ops = &tls_ops_mbedtls;
+#else
+ context->tls_ops = &tls_ops_openssl;
+#endif
+#endif
+
+ if (info->pt_serv_buf_size)
+ context->pt_serv_buf_size = info->pt_serv_buf_size;
+ else
+ context->pt_serv_buf_size = 4096;
+
+#if defined(LWS_ROLE_H2)
+ role_ops_h2.init_context(context, info);
+#endif
+
+#if LWS_MAX_SMP > 1
+ pthread_mutex_init(&context->lock, NULL);
+#endif
+
+#if defined(LWS_WITH_ESP32)
+ context->last_free_heap = esp_get_free_heap_size();
+#endif
+
+ /* default to just the platform fops implementation */
+
+ context->fops_platform.LWS_FOP_OPEN = _lws_plat_file_open;
+ context->fops_platform.LWS_FOP_CLOSE = _lws_plat_file_close;
+ context->fops_platform.LWS_FOP_SEEK_CUR = _lws_plat_file_seek_cur;
+ context->fops_platform.LWS_FOP_READ = _lws_plat_file_read;
+ context->fops_platform.LWS_FOP_WRITE = _lws_plat_file_write;
+ context->fops_platform.fi[0].sig = NULL;
+
+ /*
+ * arrange a linear linked-list of fops starting from context->fops
+ *
+ * platform fops
+ * [ -> fops_zip (copied into context so .next settable) ]
+ * [ -> info->fops ]
+ */
+
+ context->fops = &context->fops_platform;
+ prev = (struct lws_plat_file_ops *)context->fops;
+
+#if defined(LWS_WITH_ZIP_FOPS)
+ /* make a soft copy so we can set .next */
+ context->fops_zip = fops_zip;
+ prev->next = &context->fops_zip;
+ prev = (struct lws_plat_file_ops *)prev->next;
+#endif
+
+ /* if user provided fops, tack them on the end of the list */
+ if (info->fops)
+ prev->next = info->fops;
+
+ context->reject_service_keywords = info->reject_service_keywords;
+ if (info->external_baggage_free_on_destroy)
+ context->external_baggage_free_on_destroy =
+ info->external_baggage_free_on_destroy;
+
+ context->time_up = time(NULL);
+ context->pcontext_finalize = info->pcontext;
+
+ context->simultaneous_ssl_restriction =
+ info->simultaneous_ssl_restriction;
+
+#ifndef LWS_NO_DAEMONIZE
+ if (pid_daemon) {
+ context->started_with_parent = pid_daemon;
+ lwsl_info(" Started with daemon pid %d\n", pid_daemon);
+ }
+#endif
+#if defined(__ANDROID__)
+ n = getrlimit ( RLIMIT_NOFILE,&rt);
+ if (-1 == n) {
+ lwsl_err("Get RLIMIT_NOFILE failed!\n");
+ return NULL;
+ }
+ context->max_fds = rt.rlim_cur;
+#else
+ context->max_fds = getdtablesize();
+#endif
+
+ if (info->count_threads)
+ context->count_threads = info->count_threads;
+ else
+ context->count_threads = 1;
+
+ if (context->count_threads > LWS_MAX_SMP)
+ context->count_threads = LWS_MAX_SMP;
+
+ context->token_limits = info->token_limits;
+
+ context->options = info->options;
+
+ /*
+ * set the context event loops ops struct
+ *
+ * after this, all event_loop actions use the generic ops
+ */
+
+#if defined(LWS_WITH_POLL)
+ context->event_loop_ops = &event_loop_ops_poll;
+#endif
+
+ if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
+#if defined(LWS_WITH_LIBUV)
+ context->event_loop_ops = &event_loop_ops_uv;
+#else
+ goto fail_event_libs;
+#endif
+
+ if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEV))
+#if defined(LWS_WITH_LIBEV)
+ context->event_loop_ops = &event_loop_ops_ev;
+#else
+ goto fail_event_libs;
+#endif
+
+ if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEVENT))
+#if defined(LWS_WITH_LIBEVENT)
+ context->event_loop_ops = &event_loop_ops_event;
+#else
+ goto fail_event_libs;
+#endif
+
+ if (!context->event_loop_ops)
+ goto fail_event_libs;
+
+ lwsl_info("Using event loop: %s\n", context->event_loop_ops->name);
+
+#if defined(LWS_WITH_TLS)
+ time(&context->tls.last_cert_check_s);
+ if (info->alpn)
+ context->tls.alpn_default = info->alpn;
+ else {
+ char *p = context->tls.alpn_discovered, first = 1;
+
+ LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
+ if (ar->alpn) {
+ if (!first)
+ *p++ = ',';
+ p += lws_snprintf(p,
+ context->tls.alpn_discovered +
+ sizeof(context->tls.alpn_discovered) -
+ 2 - p, "%s", ar->alpn);
+ first = 0;
+ }
+ } LWS_FOR_EVERY_AVAILABLE_ROLE_END;
+
+ context->tls.alpn_default = context->tls.alpn_discovered;
+ }
+
+ lwsl_info("Default ALPN advertisment: %s\n", context->tls.alpn_default);
+#endif
+
+ if (info->timeout_secs)
+ context->timeout_secs = info->timeout_secs;
+ else
+ context->timeout_secs = AWAITING_TIMEOUT;
+
+ context->ws_ping_pong_interval = info->ws_ping_pong_interval;
+
+ lwsl_info(" default timeout (secs): %u\n", context->timeout_secs);
+
+ if (info->max_http_header_data)
+ context->max_http_header_data = info->max_http_header_data;
+ else
+ if (info->max_http_header_data2)
+ context->max_http_header_data =
+ info->max_http_header_data2;
+ else
+ context->max_http_header_data = LWS_DEF_HEADER_LEN;
+
+ if (info->max_http_header_pool)
+ context->max_http_header_pool = info->max_http_header_pool;
+ else
+ context->max_http_header_pool = context->max_fds;
+
+ if (info->fd_limit_per_thread)
+ context->fd_limit_per_thread = info->fd_limit_per_thread;
+ else
+ context->fd_limit_per_thread = context->max_fds /
+ context->count_threads;
+
+ /*
+ * Allocate the per-thread storage for scratchpad buffers,
+ * and header data pool
+ */
+ for (n = 0; n < context->count_threads; n++) {
+ context->pt[n].serv_buf = lws_malloc(context->pt_serv_buf_size,
+ "pt_serv_buf");
+ if (!context->pt[n].serv_buf) {
+ lwsl_err("OOM\n");
+ return NULL;
+ }
+
+ context->pt[n].context = context;
+ context->pt[n].tid = n;
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ context->pt[n].http.ah_list = NULL;
+ context->pt[n].http.ah_pool_length = 0;
+#endif
+ lws_pt_mutex_init(&context->pt[n]);
+ }
+
+ lwsl_info(" Threads: %d each %d fds\n", context->count_threads,
+ context->fd_limit_per_thread);
+
+ if (!info->ka_interval && info->ka_time > 0) {
+ lwsl_err("info->ka_interval can't be 0 if ka_time used\n");
+ return NULL;
+ }
+
+
+#if defined(LWS_WITH_PEER_LIMITS)
+ /* scale the peer hash table according to the max fds for the process,
+ * so that the max list depth averages 16. Eg, 1024 fd -> 64,
+ * 102400 fd -> 6400
+ */
+ context->pl_hash_elements =
+ (context->count_threads * context->fd_limit_per_thread) / 16;
+ context->pl_hash_table = lws_zalloc(sizeof(struct lws_peer *) *
+ context->pl_hash_elements, "peer limits hash table");
+ context->ip_limit_ah = info->ip_limit_ah;
+ context->ip_limit_wsi = info->ip_limit_wsi;
+#endif
+
+ lwsl_info(" mem: context: %5lu B (%ld ctx + (%ld thr x %d))\n",
+ (long)sizeof(struct lws_context) +
+ (context->count_threads * context->pt_serv_buf_size),
+ (long)sizeof(struct lws_context),
+ (long)context->count_threads,
+ context->pt_serv_buf_size);
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ lwsl_info(" mem: http hdr rsvd: %5lu B (%u thr x (%u + %lu) x %u))\n",
+ (long)(context->max_http_header_data +
+ sizeof(struct allocated_headers)) *
+ context->max_http_header_pool * context->count_threads,
+ context->count_threads,
+ context->max_http_header_data,
+ (long)sizeof(struct allocated_headers),
+ context->max_http_header_pool);
+#endif
+ n = sizeof(struct lws_pollfd) * context->count_threads *
+ context->fd_limit_per_thread;
+ context->pt[0].fds = lws_zalloc(n, "fds table");
+ if (context->pt[0].fds == NULL) {
+ lwsl_err("OOM allocating %d fds\n", context->max_fds);
+ goto bail;
+ }
+ lwsl_info(" mem: pollfd map: %5u\n", n);
+
+ if (info->server_string) {
+ context->server_string = info->server_string;
+ context->server_string_len = (short)
+ strlen(context->server_string);
+ }
+
+#if LWS_MAX_SMP > 1
+ /* each thread serves his own chunk of fds */
+ for (n = 1; n < (int)info->count_threads; n++)
+ context->pt[n].fds = context->pt[n - 1].fds +
+ context->fd_limit_per_thread;
+#endif
+
+ if (lws_plat_init(context, info))
+ goto bail;
+
+ if (context->event_loop_ops->init_context)
+ if (context->event_loop_ops->init_context(context, info))
+ goto bail;
+
+
+ if (context->event_loop_ops->init_pt)
+ for (n = 0; n < context->count_threads; n++) {
+ void *lp = NULL;
+
+ if (info->foreign_loops)
+ lp = info->foreign_loops[n];
+
+ if (context->event_loop_ops->init_pt(context, lp, n))
+ goto bail;
+ }
+
+ if (lws_create_event_pipes(context))
+ goto bail;
+
+ lws_context_init_ssl_library(info);
+
+ context->user_space = info->user;
+
+ /*
+ * if he's not saying he'll make his own vhosts later then act
+ * compatibly and make a default vhost using the data in the info
+ */
+ if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
+ if (!lws_create_vhost(context, info)) {
+ lwsl_err("Failed to create default vhost\n");
+ for (n = 0; n < context->count_threads; n++)
+ lws_free_set_NULL(context->pt[n].serv_buf);
+#if defined(LWS_WITH_PEER_LIMITS)
+ lws_free_set_NULL(context->pl_hash_table);
+#endif
+ lws_free_set_NULL(context->pt[0].fds);
+ lws_plat_context_late_destroy(context);
+ lws_free_set_NULL(context);
+ return NULL;
+ }
+
+ lws_context_init_extensions(info, context);
+
+ lwsl_info(" mem: per-conn: %5lu bytes + protocol rx buf\n",
+ (unsigned long)sizeof(struct lws));
+
+ strcpy(context->canonical_hostname, "unknown");
+ lws_server_get_canonical_hostname(context, info);
+
+ context->uid = info->uid;
+ context->gid = info->gid;
+
+#if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
+ memcpy(context->caps, info->caps, sizeof(context->caps));
+ context->count_caps = info->count_caps;
+#endif
+
+ /*
+ * drop any root privs for this process
+ * to listen on port < 1023 we would have needed root, but now we are
+ * listening, we don't want the power for anything else
+ */
+ if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
+ lws_plat_drop_app_privileges(info);
+
+ /* expedite post-context init (eg, protocols) */
+ lws_cancel_service(context);
+
+#if defined(LWS_WITH_SELFTESTS)
+ lws_jws_selftest();
+#endif
+
+ return context;
+
+bail:
+ lws_context_destroy(context);
+
+ return NULL;
+
+fail_event_libs:
+ lwsl_err("Requested event library support not configured, available:\n");
+ {
+ const struct lws_event_loop_ops **elops = available_event_libs;
+
+ while (*elops) {
+ lwsl_err(" - %s\n", (*elops)->name);
+ elops++;
+ }
+ }
+ lws_free(context);
+
+ return NULL;
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_context_deprecate(struct lws_context *context, lws_reload_func cb)
+{
+ struct lws_vhost *vh = context->vhost_list, *vh1;
+ struct lws *wsi;
+
+ /*
+ * "deprecation" means disable the context from accepting any new
+ * connections and free up listen sockets to be used by a replacement
+ * context.
+ *
+ * Otherwise the deprecated context remains operational, until its
+ * number of connected sockets falls to zero, when it is deleted.
+ */
+
+ /* for each vhost, close his listen socket */
+
+ while (vh) {
+ wsi = vh->lserv_wsi;
+ if (wsi) {
+ wsi->socket_is_permanently_unusable = 1;
+ lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "ctx deprecate");
+ wsi->context->deprecation_pending_listen_close_count++;
+ /*
+ * other vhosts can share the listen port, they
+ * point to the same wsi. So zap those too.
+ */
+ vh1 = context->vhost_list;
+ while (vh1) {
+ if (vh1->lserv_wsi == wsi)
+ vh1->lserv_wsi = NULL;
+ vh1 = vh1->vhost_next;
+ }
+ }
+ vh = vh->vhost_next;
+ }
+
+ context->deprecated = 1;
+ context->deprecation_cb = cb;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_context_is_deprecated(struct lws_context *context)
+{
+ return context->deprecated;
+}
+
+void
+lws_vhost_destroy1(struct lws_vhost *vh)
+{
+ const struct lws_protocols *protocol = NULL;
+ struct lws_context_per_thread *pt;
+ int n, m = vh->context->count_threads;
+ struct lws_context *context = vh->context;
+ struct lws wsi;
+
+ lwsl_info("%s\n", __func__);
+
+ if (vh->being_destroyed)
+ return;
+
+ vh->being_destroyed = 1;
+
+ /*
+ * Are there other vhosts that are piggybacking on our listen socket?
+ * If so we need to hand the listen socket off to one of the others
+ * so it will remain open. If not, leave it attached to the closing
+ * vhost and it will get closed.
+ */
+
+ if (vh->lserv_wsi)
+ lws_start_foreach_ll(struct lws_vhost *, v,
+ context->vhost_list) {
+ if (v != vh &&
+ !v->being_destroyed &&
+ v->listen_port == vh->listen_port &&
+ ((!v->iface && !vh->iface) ||
+ (v->iface && vh->iface &&
+ !strcmp(v->iface, vh->iface)))) {
+ /*
+ * this can only be a listen wsi, which is
+ * restricted... it has no protocol or other
+ * bindings or states. So we can simply
+ * swap it to a vhost that has the same
+ * iface + port, but is not closing.
+ */
+ assert(v->lserv_wsi == NULL);
+ v->lserv_wsi = vh->lserv_wsi;
+ vh->lserv_wsi = NULL;
+ if (v->lserv_wsi)
+ v->lserv_wsi->vhost = v;
+
+ lwsl_notice("%s: listen skt from %s to %s\n",
+ __func__, vh->name, v->name);
+ break;
+ }
+ } lws_end_foreach_ll(v, vhost_next);
+
+ /*
+ * Forcibly close every wsi assoicated with this vhost. That will
+ * include the listen socket if it is still associated with the closing
+ * vhost.
+ */
+
+ while (m--) {
+ pt = &context->pt[m];
+
+ for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
+ struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
+ if (!wsi)
+ continue;
+ if (wsi->vhost != vh)
+ continue;
+
+ lws_close_free_wsi(wsi,
+ LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
+ "vh destroy"
+ /* no protocol close */);
+ n--;
+ }
+ }
+
+ /*
+ * destroy any pending timed events
+ */
+
+ while (vh->timed_vh_protocol_list)
+ lws_timed_callback_remove(vh, vh->timed_vh_protocol_list);
+
+ /*
+ * let the protocols destroy the per-vhost protocol objects
+ */
+
+ memset(&wsi, 0, sizeof(wsi));
+ wsi.context = vh->context;
+ wsi.vhost = vh;
+ protocol = vh->protocols;
+ if (protocol && vh->created_vhost_protocols) {
+ n = 0;
+ while (n < vh->count_protocols) {
+ wsi.protocol = protocol;
+ protocol->callback(&wsi, LWS_CALLBACK_PROTOCOL_DESTROY,
+ NULL, NULL, 0);
+ protocol++;
+ n++;
+ }
+ }
+
+ /*
+ * remove vhost from context list of vhosts
+ */
+
+ lws_start_foreach_llp(struct lws_vhost **, pv, context->vhost_list) {
+ if (*pv == vh) {
+ *pv = vh->vhost_next;
+ break;
+ }
+ } lws_end_foreach_llp(pv, vhost_next);
+
+ /* add ourselves to the pending destruction list */
+
+ vh->vhost_next = vh->context->vhost_pending_destruction_list;
+ vh->context->vhost_pending_destruction_list = vh;
+}
+
+static void
+lws_vhost_destroy2(struct lws_vhost *vh)
+{
+ const struct lws_protocols *protocol = NULL;
+ struct lws_context *context = vh->context;
+ struct lws_deferred_free *df;
+ int n;
+
+ lwsl_info("%s: %p\n", __func__, vh);
+
+ /* if we are still on deferred free list, remove ourselves */
+
+ lws_start_foreach_llp(struct lws_deferred_free **, pdf,
+ context->deferred_free_list) {
+ if ((*pdf)->payload == vh) {
+ df = *pdf;
+ *pdf = df->next;
+ lws_free(df);
+ break;
+ }
+ } lws_end_foreach_llp(pdf, next);
+
+ /* remove ourselves from the pending destruction list */
+
+ lws_start_foreach_llp(struct lws_vhost **, pv,
+ context->vhost_pending_destruction_list) {
+ if ((*pv) == vh) {
+ *pv = (*pv)->vhost_next;
+ break;
+ }
+ } lws_end_foreach_llp(pv, vhost_next);
+
+ /*
+ * Free all the allocations associated with the vhost
+ */
+
+ protocol = vh->protocols;
+ if (protocol) {
+ n = 0;
+ while (n < vh->count_protocols) {
+ if (vh->protocol_vh_privs &&
+ vh->protocol_vh_privs[n]) {
+ lws_free(vh->protocol_vh_privs[n]);
+ vh->protocol_vh_privs[n] = NULL;
+ }
+ protocol++;
+ n++;
+ }
+ }
+ if (vh->protocol_vh_privs)
+ lws_free(vh->protocol_vh_privs);
+ lws_ssl_SSL_CTX_destroy(vh);
+ lws_free(vh->same_vh_protocol_list);
+
+ if (context->plugin_list ||
+ (context->options & LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
+ lws_free((void *)vh->protocols);
+
+ LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar)
+ if (ar->destroy_vhost)
+ ar->destroy_vhost(vh);
+ LWS_FOR_EVERY_AVAILABLE_ROLE_END;
+
+#ifdef LWS_WITH_ACCESS_LOG
+ if (vh->log_fd != (int)LWS_INVALID_FILE)
+ close(vh->log_fd);
+#endif
+
+#if defined (LWS_WITH_TLS)
+ lws_free_set_NULL(vh->tls.alloc_cert_path);
+#endif
+
+#if LWS_MAX_SMP > 1
+ pthread_mutex_destroy(&vh->lock);
+#endif
+
+#if defined(LWS_WITH_UNIX_SOCK)
+ if (LWS_UNIX_SOCK_ENABLED(context)) {
+ n = unlink(vh->iface);
+ if (n)
+ lwsl_info("Closing unix socket %s: errno %d\n",
+ vh->iface, errno);
+ }
+#endif
+ /*
+ * although async event callbacks may still come for wsi handles with
+ * pending close in the case of asycn event library like libuv,
+ * they do not refer to the vhost. So it's safe to free.
+ */
+
+ lwsl_info(" %s: Freeing vhost %p\n", __func__, vh);
+
+ memset(vh, 0, sizeof(*vh));
+ lws_free(vh);
+}
+
+int
+lws_check_deferred_free(struct lws_context *context, int force)
+{
+ struct lws_deferred_free *df;
+ time_t now = lws_now_secs();
+
+ lws_start_foreach_llp(struct lws_deferred_free **, pdf,
+ context->deferred_free_list) {
+ if (force ||
+ lws_compare_time_t(context, now, (*pdf)->deadline) > 5) {
+ df = *pdf;
+ *pdf = df->next;
+ /* finalize vh destruction */
+ lwsl_notice("deferred vh %p destroy\n", df->payload);
+ lws_vhost_destroy2(df->payload);
+ lws_free(df);
+ continue; /* after deletion we already point to next */
+ }
+ } lws_end_foreach_llp(pdf, next);
+
+ return 0;
+}
+
+LWS_VISIBLE void
+lws_vhost_destroy(struct lws_vhost *vh)
+{
+ struct lws_deferred_free *df = lws_malloc(sizeof(*df), "deferred free");
+
+ if (!df)
+ return;
+
+ lws_vhost_destroy1(vh);
+
+ /* part 2 is deferred to allow all the handle closes to complete */
+
+ df->next = vh->context->deferred_free_list;
+ df->deadline = lws_now_secs();
+ df->payload = vh;
+ vh->context->deferred_free_list = df;
+}
+
+/*
+ * When using an event loop, the context destruction is in three separate
+ * parts. This is to cover both internal and foreign event loops cleanly.
+ *
+ * - lws_context_destroy() simply starts a soft close of all wsi and
+ * related allocations. The event loop continues.
+ *
+ * As the closes complete in the event loop, reference counting is used
+ * to determine when everything is closed. It then calls
+ * lws_context_destroy2().
+ *
+ * - lws_context_destroy2() cleans up the rest of the higher-level logical
+ * lws pieces like vhosts. If the loop was foreign, it then proceeds to
+ * lws_context_destroy3(). If it the loop is internal, it stops the
+ * internal loops and waits for lws_context_destroy() to be called again
+ * outside the event loop (since we cannot destroy the loop from
+ * within the loop). That will cause lws_context_destroy3() to run
+ * directly.
+ *
+ * - lws_context_destroy3() destroys any internal event loops and then
+ * destroys the context itself, setting what was info.pcontext to NULL.
+ */
+
+/*
+ * destroy the actual context itself
+ */
+
+static void
+lws_context_destroy3(struct lws_context *context)
+{
+ struct lws_context **pcontext_finalize = context->pcontext_finalize;
+ struct lws_context_per_thread *pt;
+ int n;
+
+ for (n = 0; n < context->count_threads; n++) {
+ pt = &context->pt[n];
+
+ if (context->event_loop_ops->destroy_pt)
+ context->event_loop_ops->destroy_pt(context, n);
+
+ lws_free_set_NULL(context->pt[n].serv_buf);
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ while (pt->http.ah_list)
+ _lws_destroy_ah(pt, pt->http.ah_list);
+#endif
+ }
+
+ lws_free(context);
+ lwsl_info("%s: ctx %p freed\n", __func__, context);
+
+ if (pcontext_finalize)
+ *pcontext_finalize = NULL;
+}
+
+/*
+ * really start destroying things
+ */
+
+void
+lws_context_destroy2(struct lws_context *context)
+{
+ struct lws_vhost *vh = NULL, *vh1;
+#if defined(LWS_WITH_PEER_LIMITS)
+ uint32_t nu;
+#endif
+ int n;
+
+ lwsl_info("%s: ctx %p\n", __func__, context);
+
+ context->being_destroyed2 = 1;
+
+ if (context->pt[0].fds)
+ lws_free_set_NULL(context->pt[0].fds);
+
+ /*
+ * free all the per-vhost allocations
+ */
+
+ vh = context->vhost_list;
+ while (vh) {
+ vh1 = vh->vhost_next;
+ lws_vhost_destroy2(vh);
+ vh = vh1;
+ }
+
+ /* remove ourselves from the pending destruction list */
+
+ while (context->vhost_pending_destruction_list)
+ /* removes itself from list */
+ lws_vhost_destroy2(context->vhost_pending_destruction_list);
+
+
+ lws_stats_log_dump(context);
+
+ lws_ssl_context_destroy(context);
+ lws_plat_context_late_destroy(context);
+
+#if defined(LWS_WITH_PEER_LIMITS)
+ for (nu = 0; nu < context->pl_hash_elements; nu++) {
+ lws_start_foreach_llp(struct lws_peer **, peer,
+ context->pl_hash_table[nu]) {
+ struct lws_peer *df = *peer;
+ *peer = df->next;
+ lws_free(df);
+ continue;
+ } lws_end_foreach_llp(peer, next);
+ }
+ lws_free(context->pl_hash_table);
+#endif
+
+ if (context->external_baggage_free_on_destroy)
+ free(context->external_baggage_free_on_destroy);
+
+ lws_check_deferred_free(context, 1);
+
+#if LWS_MAX_SMP > 1
+ pthread_mutex_destroy(&context->lock);
+#endif
+
+ if (context->event_loop_ops->destroy_context2)
+ if (context->event_loop_ops->destroy_context2(context)) {
+ context->finalize_destroy_after_internal_loops_stopped = 1;
+ return;
+ }
+
+ if (!context->pt[0].event_loop_foreign)
+ for (n = 0; n < context->count_threads; n++)
+ if (context->pt[n].inside_service)
+ return;
+
+ lws_context_destroy3(context);
+}
+
+/*
+ * Begin the context takedown
+ */
+
+LWS_VISIBLE void
+lws_context_destroy(struct lws_context *context)
+{
+ volatile struct lws_foreign_thread_pollfd *ftp, *next;
+ volatile struct lws_context_per_thread *vpt;
+ struct lws_context_per_thread *pt;
+ struct lws_vhost *vh = NULL;
+ struct lws wsi;
+ int n, m;
+
+ if (!context)
+ return;
+
+ if (context->finalize_destroy_after_internal_loops_stopped) {
+ if (context->event_loop_ops->destroy_context2)
+ context->event_loop_ops->destroy_context2(context);
+
+ lws_context_destroy3(context);
+
+ return;
+ }
+
+ if (context->being_destroyed1) {
+ if (!context->being_destroyed2) {
+ lws_context_destroy2(context);
+
+ return;
+ }
+ lwsl_info("%s: ctx %p: already being destroyed\n",
+ __func__, context);
+
+ lws_context_destroy3(context);
+ return;
+ }
+
+ lwsl_info("%s: ctx %p\n", __func__, context);
+
+ m = context->count_threads;
+ context->being_destroyed = 1;
+ context->being_destroyed1 = 1;
+ context->requested_kill = 1;
+
+ memset(&wsi, 0, sizeof(wsi));
+ wsi.context = context;
+
+#ifdef LWS_LATENCY
+ if (context->worst_latency_info[0])
+ lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
+#endif
+
+ while (m--) {
+ pt = &context->pt[m];
+ vpt = (volatile struct lws_context_per_thread *)pt;
+
+ ftp = vpt->foreign_pfd_list;
+ while (ftp) {
+ next = ftp->next;
+ lws_free((void *)ftp);
+ ftp = next;
+ }
+ vpt->foreign_pfd_list = NULL;
+
+ for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
+ struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
+ if (!wsi)
+ continue;
+
+ if (wsi->event_pipe)
+ lws_destroy_event_pipe(wsi);
+ else
+ lws_close_free_wsi(wsi,
+ LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
+ "ctx destroy"
+ /* no protocol close */);
+ n--;
+ }
+ lws_pt_mutex_destroy(pt);
+ }
+
+ /*
+ * inform all the protocols that they are done and will have no more
+ * callbacks.
+ *
+ * We can't free things until after the event loop shuts down.
+ */
+ if (context->protocol_init_done)
+ vh = context->vhost_list;
+ while (vh) {
+ struct lws_vhost *vhn = vh->vhost_next;
+ lws_vhost_destroy1(vh);
+ vh = vhn;
+ }
+
+ lws_plat_context_early_destroy(context);
+
+ /*
+ * We face two different needs depending if foreign loop or not.
+ *
+ * 1) If foreign loop, we really want to advance the destroy_context()
+ * past here, and block only for libuv-style async close completion.
+ *
+ * 2a) If poll, and we exited by ourselves and are calling a final
+ * destroy_context() outside of any service already, we want to
+ * advance all the way in one step.
+ *
+ * 2b) If poll, and we are reacting to a SIGINT, service thread(s) may
+ * be in poll wait or servicing. We can't advance the
+ * destroy_context() to the point it's freeing things; we have to
+ * leave that for the final destroy_context() after the service
+ * thread(s) are finished calling for service.
+ */
+
+ if (context->event_loop_ops->destroy_context1) {
+ context->event_loop_ops->destroy_context1(context);
+
+ return;
+ }
+
+ lws_context_destroy2(context);
+}
diff --git a/thirdparty/libwebsockets/core/libwebsockets.c b/thirdparty/libwebsockets/core/libwebsockets.c
new file mode 100644
index 0000000000..58f00226f6
--- /dev/null
+++ b/thirdparty/libwebsockets/core/libwebsockets.c
@@ -0,0 +1,3479 @@
+/*
+ * libwebsockets - small server side websockets and web server implementation
+ *
+ * Copyright (C) 2010-2017 Andy Green <andy@warmcat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation:
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include "core/private.h"
+
+#ifdef LWS_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef LWS_WITH_IPV6
+#if defined(WIN32) || defined(_WIN32)
+#include <wincrypt.h>
+#include <iphlpapi.h>
+#else
+#include <net/if.h>
+#endif
+#endif
+
+int log_level = LLL_ERR | LLL_WARN | LLL_NOTICE;
+static void (*lwsl_emit)(int level, const char *line)
+#ifndef LWS_PLAT_OPTEE
+ = lwsl_emit_stderr
+#endif
+ ;
+#ifndef LWS_PLAT_OPTEE
+static const char * const log_level_names[] = {
+ "ERR",
+ "WARN",
+ "NOTICE",
+ "INFO",
+ "DEBUG",
+ "PARSER",
+ "HEADER",
+ "EXTENSION",
+ "CLIENT",
+ "LATENCY",
+ "USER",
+ "?",
+ "?"
+};
+#endif
+
+int lws_open(const char *__file, int __oflag, ...)
+{
+ va_list ap;
+ int n;
+
+ va_start(ap, __oflag);
+ if (((__oflag & O_CREAT) == O_CREAT)
+#if defined(O_TMPFILE)
+ || ((__oflag & O_TMPFILE) == O_TMPFILE)
+#endif
+ )
+ /* last arg is really a mode_t. But windows... */
+ n = open(__file, __oflag, va_arg(ap, uint32_t));
+ else
+ n = open(__file, __oflag);
+ va_end(ap);
+
+ lws_plat_apply_FD_CLOEXEC(n);
+
+ return n;
+}
+
+#if defined (_DEBUG)
+void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role)
+{
+ wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role;
+
+ lwsl_debug("lwsi_set_role(%p, 0x%x)\n", wsi, wsi->wsistate);
+}
+
+void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs)
+{
+ wsi->wsistate = (wsi->wsistate & (~LRS_MASK)) | lrs;
+
+ lwsl_debug("lwsi_set_state(%p, 0x%x)\n", wsi, wsi->wsistate);
+}
+#endif
+
+signed char char_to_hex(const char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+
+ return -1;
+}
+
+void
+__lws_free_wsi(struct lws *wsi)
+{
+ if (!wsi)
+ return;
+
+ /*
+ * Protocol user data may be allocated either internally by lws
+ * or by specified the user. We should only free what we allocated.
+ */
+ if (wsi->protocol && wsi->protocol->per_session_data_size &&
+ wsi->user_space && !wsi->user_space_externally_allocated)
+ lws_free(wsi->user_space);
+
+ lws_buflist_destroy_all_segments(&wsi->buflist);
+ lws_free_set_NULL(wsi->trunc_alloc);
+ lws_free_set_NULL(wsi->udp);
+
+ if (wsi->vhost && wsi->vhost->lserv_wsi == wsi)
+ wsi->vhost->lserv_wsi = NULL;
+
+ // lws_peer_dump_from_wsi(wsi);
+
+ if (wsi->role_ops->destroy_role)
+ wsi->role_ops->destroy_role(wsi);
+
+#if defined(LWS_WITH_PEER_LIMITS)
+ lws_peer_track_wsi_close(wsi->context, wsi->peer);
+ wsi->peer = NULL;
+#endif
+
+ /* since we will destroy the wsi, make absolutely sure now */
+
+#if defined(LWS_WITH_OPENSSL)
+ __lws_ssl_remove_wsi_from_buffered_list(wsi);
+#endif
+ __lws_remove_from_timeout_list(wsi);
+
+ if (wsi->context->event_loop_ops->destroy_wsi)
+ wsi->context->event_loop_ops->destroy_wsi(wsi);
+
+ wsi->context->count_wsi_allocated--;
+ lwsl_debug("%s: %p, remaining wsi %d\n", __func__, wsi,
+ wsi->context->count_wsi_allocated);
+
+ lws_free(wsi);
+}
+
+void
+lws_dll_add_front(struct lws_dll *d, struct lws_dll *phead)
+{
+ if (d->prev)
+ return;
+
+ /* our next guy is current first guy */
+ d->next = phead->next;
+ /* if there is a next guy, set his prev ptr to our next ptr */
+ if (d->next)
+ d->next->prev = d;
+ /* our prev ptr is first ptr */
+ d->prev = phead;
+ /* set the first guy to be us */
+ phead->next = d;
+}
+
+/* situation is:
+ *
+ * HEAD: struct lws_dll * = &entry1
+ *
+ * Entry 1: struct lws_dll .pprev = &HEAD , .next = Entry 2
+ * Entry 2: struct lws_dll .pprev = &entry1 , .next = &entry2
+ * Entry 3: struct lws_dll .pprev = &entry2 , .next = NULL
+ *
+ * Delete Entry1:
+ *
+ * - HEAD = &entry2
+ * - Entry2: .pprev = &HEAD, .next = &entry3
+ * - Entry3: .pprev = &entry2, .next = NULL
+ *
+ * Delete Entry2:
+ *
+ * - HEAD = &entry1
+ * - Entry1: .pprev = &HEAD, .next = &entry3
+ * - Entry3: .pprev = &entry1, .next = NULL
+ *
+ * Delete Entry3:
+ *
+ * - HEAD = &entry1
+ * - Entry1: .pprev = &HEAD, .next = &entry2
+ * - Entry2: .pprev = &entry1, .next = NULL
+ *
+ */
+
+void
+lws_dll_remove(struct lws_dll *d)
+{
+ if (!d->prev) /* ie, not part of the list */
+ return;
+
+ /*
+ * remove us
+ *
+ * USp <-> us <-> USn --> USp <-> USn
+ */
+
+ /* if we have a next guy, set his prev to our prev */
+ if (d->next)
+ d->next->prev = d->prev;
+
+ /* set our prev guy to our next guy instead of us */
+ if (d->prev)
+ d->prev->next = d->next;
+
+ /* we're out of the list, we should not point anywhere any more */
+ d->prev = NULL;
+ d->next = NULL;
+}
+
+void
+__lws_remove_from_timeout_list(struct lws *wsi)
+{
+ lws_dll_lws_remove(&wsi->dll_timeout);
+}
+
+void
+lws_remove_from_timeout_list(struct lws *wsi)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+
+ lws_pt_lock(pt, __func__);
+ __lws_remove_from_timeout_list(wsi);
+ lws_pt_unlock(pt);
+}
+
+void
+lws_dll_dump(struct lws_dll_lws *head, const char *title)
+{
+ int n = 0;
+
+ (void)n;
+ lwsl_notice("%s: %s (head.next %p)\n", __func__, title, head->next);
+
+ lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1, head->next) {
+ struct lws *wsi = lws_container_of(d, struct lws, dll_hrtimer);
+
+ (void)wsi;
+
+ lwsl_notice(" %d: wsi %p: %llu\n", n++, wsi,
+ (unsigned long long)wsi->pending_timer);
+ } lws_end_foreach_dll_safe(d, d1);
+}
+
+void
+__lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ struct lws_dll_lws *dd = &pt->dll_head_hrtimer;
+ struct timeval now;
+ struct lws *wsi1;
+ int bef = 0;
+
+ lws_dll_lws_remove(&wsi->dll_hrtimer);
+
+ if (usecs == LWS_SET_TIMER_USEC_CANCEL)
+ return;
+
+ gettimeofday(&now, NULL);
+ wsi->pending_timer = ((now.tv_sec * 1000000ll) + now.tv_usec) + usecs;
+
+ /*
+ * we sort the hrtimer list with the earliest timeout first
+ */
+
+ lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
+ pt->dll_head_hrtimer.next) {
+ dd = d;
+ wsi1 = lws_container_of(d, struct lws, dll_hrtimer);
+
+ if (wsi1->pending_timer >= wsi->pending_timer) {
+ /* d, dprev's next, is >= our time */
+ bef = 1;
+ break;
+ }
+ } lws_end_foreach_dll_safe(d, d1);
+
+ if (bef) {
+ /*
+ * we go before dd
+ * DDp <-> DD <-> DDn --> DDp <-> us <-> DD <-> DDn
+ */
+ /* we point forward to dd */
+ wsi->dll_hrtimer.next = dd;
+ /* we point back to what dd used to point back to */
+ wsi->dll_hrtimer.prev = dd->prev;
+ /* DDp points forward to us now */
+ dd->prev->next = &wsi->dll_hrtimer;
+ /* DD points back to us now */
+ dd->prev = &wsi->dll_hrtimer;
+ } else {
+ /*
+ * we go after dd
+ * DDp <-> DD <-> DDn --> DDp <-> DD <-> us <-> DDn
+ */
+ /* we point forward to what dd used to point forward to */
+ wsi->dll_hrtimer.next = dd->next;
+ /* we point back to dd */
+ wsi->dll_hrtimer.prev = dd;
+ /* DDn points back to us */
+ if (dd->next)
+ dd->next->prev = &wsi->dll_hrtimer;
+ /* DD points forward to us */
+ dd->next = &wsi->dll_hrtimer;
+ }
+
+// lws_dll_dump(&pt->dll_head_hrtimer, "after set_timer_usec");
+}
+
+LWS_VISIBLE void
+lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
+{
+ __lws_set_timer_usecs(wsi, usecs);
+}
+
+lws_usec_t
+__lws_hrtimer_service(struct lws_context_per_thread *pt)
+{
+ struct timeval now;
+ struct lws *wsi;
+ lws_usec_t t;
+
+ gettimeofday(&now, NULL);
+ t = (now.tv_sec * 1000000ll) + now.tv_usec;
+
+ lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
+ pt->dll_head_hrtimer.next) {
+ wsi = lws_container_of(d, struct lws, dll_hrtimer);
+
+ /*
+ * if we met one in the future, we are done, because the list
+ * is sorted by time in the future.
+ */
+ if (wsi->pending_timer > t)
+ break;
+
+ lws_set_timer_usecs(wsi, LWS_SET_TIMER_USEC_CANCEL);
+
+ /* it's time for the timer to be serviced */
+
+ if (wsi->protocol &&
+ wsi->protocol->callback(wsi, LWS_CALLBACK_TIMER,
+ wsi->user_space, NULL, 0))
+ __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
+ "timer cb errored");
+ } lws_end_foreach_dll_safe(d, d1);
+
+ /* return an estimate how many us until next timer hit */
+
+ if (!pt->dll_head_hrtimer.next)
+ return LWS_HRTIMER_NOWAIT;
+
+ wsi = lws_container_of(pt->dll_head_hrtimer.next, struct lws, dll_hrtimer);
+
+ gettimeofday(&now, NULL);
+ t = (now.tv_sec * 1000000ll) + now.tv_usec;
+
+ if (wsi->pending_timer < t)
+ return 0;
+
+ return wsi->pending_timer - t;
+}
+
+void
+__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ time_t now;
+
+ time(&now);
+
+ lwsl_debug("%s: %p: %d secs\n", __func__, wsi, secs);
+ wsi->pending_timeout_limit = secs;
+ wsi->pending_timeout_set = now;
+ wsi->pending_timeout = reason;
+
+ if (!reason)
+ lws_dll_lws_remove(&wsi->dll_timeout);
+ else
+ lws_dll_lws_add_front(&wsi->dll_timeout, &pt->dll_head_timeout);
+}
+
+LWS_VISIBLE void
+lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+
+ if (secs == LWS_TO_KILL_SYNC) {
+ lws_remove_from_timeout_list(wsi);
+ lwsl_debug("synchronously killing %p\n", wsi);
+ lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "to sync kill");
+ return;
+ }
+
+ if (secs == LWS_TO_KILL_ASYNC)
+ secs = 0;
+
+ lws_pt_lock(pt, __func__);
+ __lws_set_timeout(wsi, reason, secs);
+ lws_pt_unlock(pt);
+}
+
+int
+lws_timed_callback_remove(struct lws_vhost *vh, struct lws_timed_vh_protocol *p)
+{
+ lws_start_foreach_llp(struct lws_timed_vh_protocol **, pt,
+ vh->timed_vh_protocol_list) {
+ if (*pt == p) {
+ *pt = p->next;
+ lws_free(p);
+
+ return 0;
+ }
+ } lws_end_foreach_llp(pt, next);
+
+ return 1;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_timed_callback_vh_protocol(struct lws_vhost *vh, const struct lws_protocols *prot,
+ int reason, int secs)
+{
+ struct lws_timed_vh_protocol *p = (struct lws_timed_vh_protocol *)
+ lws_malloc(sizeof(*p), "timed_vh");
+
+ if (!p)
+ return 1;
+
+ p->protocol = prot;
+ p->reason = reason;
+ p->time = lws_now_secs() + secs;
+ p->next = vh->timed_vh_protocol_list;
+
+ vh->timed_vh_protocol_list = p;
+
+ return 0;
+}
+
+static void
+lws_remove_child_from_any_parent(struct lws *wsi)
+{
+ struct lws **pwsi;
+ int seen = 0;
+
+ if (!wsi->parent)
+ return;
+
+ /* detach ourselves from parent's child list */
+ pwsi = &wsi->parent->child_list;
+ while (*pwsi) {
+ if (*pwsi == wsi) {
+ lwsl_info("%s: detach %p from parent %p\n", __func__,
+ wsi, wsi->parent);
+
+ if (wsi->parent->protocol)
+ wsi->parent->protocol->callback(wsi,
+ LWS_CALLBACK_CHILD_CLOSING,
+ wsi->parent->user_space, wsi, 0);
+
+ *pwsi = wsi->sibling_list;
+ seen = 1;
+ break;
+ }
+ pwsi = &(*pwsi)->sibling_list;
+ }
+ if (!seen)
+ lwsl_err("%s: failed to detach from parent\n", __func__);
+
+ wsi->parent = NULL;
+}
+
+int
+lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p)
+{
+// if (wsi->protocol == p)
+// return 0;
+ const struct lws_protocols *vp = wsi->vhost->protocols, *vpo;
+
+ if (wsi->protocol && wsi->protocol_bind_balance) {
+ wsi->protocol->callback(wsi, LWS_CALLBACK_HTTP_DROP_PROTOCOL,
+ wsi->user_space, NULL, 0);
+ wsi->protocol_bind_balance = 0;
+ }
+ if (!wsi->user_space_externally_allocated)
+ lws_free_set_NULL(wsi->user_space);
+
+ lws_same_vh_protocol_remove(wsi);
+
+ wsi->protocol = p;
+ if (!p)
+ return 0;
+
+ if (lws_ensure_user_space(wsi))
+ return 1;
+
+ if (p > vp && p < &vp[wsi->vhost->count_protocols])
+ lws_same_vh_protocol_insert(wsi, (int)(p - vp));
+ else {
+ int n = wsi->vhost->count_protocols;
+ int hit = 0;
+
+ vpo = vp;
+
+ while (n--) {
+ if (p->name && vp->name && !strcmp(p->name, vp->name)) {
+ hit = 1;
+ lws_same_vh_protocol_insert(wsi, (int)(vp - vpo));
+ break;
+ }
+ vp++;
+ }
+ if (!hit)
+ lwsl_err("%s: %p is not in vhost '%s' protocols list\n",
+ __func__, p, wsi->vhost->name);
+ }
+
+ if (wsi->protocol->callback(wsi, LWS_CALLBACK_HTTP_BIND_PROTOCOL,
+ wsi->user_space, NULL, 0))
+ return 1;
+
+ wsi->protocol_bind_balance = 1;
+
+ return 0;
+}
+
+void
+__lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *caller)
+{
+ struct lws_context_per_thread *pt;
+ struct lws *wsi1, *wsi2;
+ struct lws_context *context;
+ int n;
+
+ lwsl_info("%s: %p: caller: %s\n", __func__, wsi, caller);
+
+ if (!wsi)
+ return;
+
+ lws_access_log(wsi);
+
+ context = wsi->context;
+ pt = &context->pt[(int)wsi->tsi];
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_CLOSE, 1);
+
+#if !defined(LWS_NO_CLIENT)
+
+ lws_free_set_NULL(wsi->client_hostname_copy);
+ /* we are no longer an active client connection that can piggyback */
+ lws_dll_lws_remove(&wsi->dll_active_client_conns);
+
+ /*
+ * if we have wsi in our transaction queue, if we are closing we
+ * must go through and close all those first
+ */
+ if (wsi->vhost) {
+ if ((int)reason != -1)
+ lws_vhost_lock(wsi->vhost);
+ lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
+ wsi->dll_client_transaction_queue_head.next) {
+ struct lws *w = lws_container_of(d, struct lws,
+ dll_client_transaction_queue);
+
+ __lws_close_free_wsi(w, -1, "trans q leader closing");
+ } lws_end_foreach_dll_safe(d, d1);
+
+ /*
+ * !!! If we are closing, but we have pending pipelined transaction
+ * results we already sent headers for, that's going to destroy sync
+ * for HTTP/1 and leave H2 stream with no live swsi.
+ *
+ * However this is normal if we are being closed because the transaction
+ * queue leader is closing.
+ */
+ lws_dll_lws_remove(&wsi->dll_client_transaction_queue);
+ if ((int)reason !=-1)
+ lws_vhost_unlock(wsi->vhost);
+ }
+#endif
+
+ /* if we have children, close them first */
+ if (wsi->child_list) {
+ wsi2 = wsi->child_list;
+ while (wsi2) {
+ wsi1 = wsi2->sibling_list;
+ wsi2->parent = NULL;
+ /* stop it doing shutdown processing */
+ wsi2->socket_is_permanently_unusable = 1;
+ __lws_close_free_wsi(wsi2, reason, "general child recurse");
+ wsi2 = wsi1;
+ }
+ wsi->child_list = NULL;
+ }
+
+ if (wsi->role_ops == &role_ops_raw_file) {
+ lws_remove_child_from_any_parent(wsi);
+ __remove_wsi_socket_from_fds(wsi);
+ wsi->protocol->callback(wsi, wsi->role_ops->close_cb[0],
+ wsi->user_space, NULL, 0);
+ goto async_close;
+ }
+
+ wsi->wsistate_pre_close = wsi->wsistate;
+
+#ifdef LWS_WITH_CGI
+ if (wsi->role_ops == &role_ops_cgi) {
+ /* we are not a network connection, but a handler for CGI io */
+ if (wsi->parent && wsi->parent->http.cgi) {
+
+ if (wsi->cgi_channel == LWS_STDOUT)
+ lws_cgi_remove_and_kill(wsi->parent);
+
+ /* end the binding between us and master */
+ wsi->parent->http.cgi->stdwsi[(int)wsi->cgi_channel] = NULL;
+ }
+ wsi->socket_is_permanently_unusable = 1;
+
+ goto just_kill_connection;
+ }
+
+ if (wsi->http.cgi)
+ lws_cgi_remove_and_kill(wsi);
+#endif
+
+#if !defined(LWS_NO_CLIENT)
+ lws_client_stash_destroy(wsi);
+#endif
+
+ if (wsi->role_ops == &role_ops_raw_skt) {
+ wsi->socket_is_permanently_unusable = 1;
+ goto just_kill_connection;
+ }
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ if (lwsi_role_http(wsi) && lwsi_role_server(wsi) &&
+ wsi->http.fop_fd != NULL)
+ lws_vfs_file_close(&wsi->http.fop_fd);
+#endif
+
+ if (lwsi_state(wsi) == LRS_DEAD_SOCKET)
+ return;
+
+ if (wsi->socket_is_permanently_unusable ||
+ reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY ||
+ lwsi_state(wsi) == LRS_SHUTDOWN)
+ goto just_kill_connection;
+
+ switch (lwsi_state_PRE_CLOSE(wsi)) {
+ case LRS_DEAD_SOCKET:
+ return;
+
+ /* we tried the polite way... */
+ case LRS_WAITING_TO_SEND_CLOSE:
+ case LRS_AWAITING_CLOSE_ACK:
+ case LRS_RETURNED_CLOSE:
+ goto just_kill_connection;
+
+ case LRS_FLUSHING_BEFORE_CLOSE:
+ if (wsi->trunc_len) {
+ lws_callback_on_writable(wsi);
+ return;
+ }
+ lwsl_info("%p: end LRS_FLUSHING_BEFORE_CLOSE\n", wsi);
+ goto just_kill_connection;
+ default:
+ if (wsi->trunc_len) {
+ lwsl_info("%p: LRS_FLUSHING_BEFORE_CLOSE\n", wsi);
+ lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE);
+ __lws_set_timeout(wsi,
+ PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5);
+ return;
+ }
+ break;
+ }
+
+ if (lwsi_state(wsi) == LRS_WAITING_CONNECT ||
+ lwsi_state(wsi) == LRS_H1C_ISSUE_HANDSHAKE)
+ goto just_kill_connection;
+
+ if (!wsi->told_user_closed && lwsi_role_http(wsi) &&
+ lwsi_role_server(wsi)) {
+ if (wsi->user_space && wsi->protocol &&
+ wsi->protocol_bind_balance) {
+ wsi->protocol->callback(wsi,
+ LWS_CALLBACK_HTTP_DROP_PROTOCOL,
+ wsi->user_space, NULL, 0);
+ wsi->protocol_bind_balance = 0;
+ }
+ }
+
+ /*
+ * signal we are closing, lws_write will
+ * add any necessary version-specific stuff. If the write fails,
+ * no worries we are closing anyway. If we didn't initiate this
+ * close, then our state has been changed to
+ * LRS_RETURNED_CLOSE and we will skip this.
+ *
+ * Likewise if it's a second call to close this connection after we
+ * sent the close indication to the peer already, we are in state
+ * LRS_AWAITING_CLOSE_ACK and will skip doing this a second time.
+ */
+
+ if (wsi->role_ops->close_via_role_protocol &&
+ wsi->role_ops->close_via_role_protocol(wsi, reason))
+ return;
+
+just_kill_connection:
+
+ if (wsi->role_ops->close_kill_connection)
+ wsi->role_ops->close_kill_connection(wsi, reason);
+
+ lws_remove_child_from_any_parent(wsi);
+ n = 0;
+
+ if (!wsi->told_user_closed && wsi->user_space &&
+ wsi->protocol_bind_balance) {
+ lwsl_debug("%s: %p: DROP_PROTOCOL %s\n", __func__, wsi,
+ wsi->protocol->name);
+ wsi->protocol->callback(wsi, LWS_CALLBACK_HTTP_DROP_PROTOCOL,
+ wsi->user_space, NULL, 0);
+ wsi->protocol_bind_balance = 0;
+ }
+
+ if ((lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY ||
+ lwsi_state(wsi) == LRS_WAITING_CONNECT) && !wsi->already_did_cce)
+ wsi->protocol->callback(wsi,
+ LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
+ wsi->user_space, NULL, 0);
+
+ /*
+ * Testing with ab shows that we have to stage the socket close when
+ * the system is under stress... shutdown any further TX, change the
+ * state to one that won't emit anything more, and wait with a timeout
+ * for the POLLIN to show a zero-size rx before coming back and doing
+ * the actual close.
+ */
+ if (wsi->role_ops != &role_ops_raw_skt && !lwsi_role_client(wsi) &&
+ lwsi_state(wsi) != LRS_SHUTDOWN &&
+ lwsi_state(wsi) != LRS_UNCONNECTED &&
+ reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY &&
+ !wsi->socket_is_permanently_unusable) {
+
+#if defined(LWS_WITH_TLS)
+ if (lws_is_ssl(wsi) && wsi->tls.ssl) {
+ n = 0;
+ switch (__lws_tls_shutdown(wsi)) {
+ case LWS_SSL_CAPABLE_DONE:
+ case LWS_SSL_CAPABLE_ERROR:
+ case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
+ case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
+ case LWS_SSL_CAPABLE_MORE_SERVICE:
+ break;
+ }
+ } else
+#endif
+ {
+ lwsl_info("%s: shutdown conn: %p (sock %d, state 0x%x)\n",
+ __func__, wsi, (int)(long)wsi->desc.sockfd,
+ lwsi_state(wsi));
+ if (!wsi->socket_is_permanently_unusable &&
+ lws_socket_is_valid(wsi->desc.sockfd)) {
+ wsi->socket_is_permanently_unusable = 1;
+ n = shutdown(wsi->desc.sockfd, SHUT_WR);
+ }
+ }
+ if (n)
+ lwsl_debug("closing: shutdown (state 0x%x) ret %d\n",
+ lwsi_state(wsi), LWS_ERRNO);
+
+ /*
+ * This causes problems on WINCE / ESP32 with disconnection
+ * when the events are half closing connection
+ */
+#if !defined(_WIN32_WCE) && !defined(LWS_WITH_ESP32)
+ /* libuv: no event available to guarantee completion */
+ if (!wsi->socket_is_permanently_unusable &&
+ lws_socket_is_valid(wsi->desc.sockfd) &&
+ lwsi_state(wsi) != LRS_SHUTDOWN &&
+ context->event_loop_ops->periodic_events_available) {
+ __lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
+ lwsi_set_state(wsi, LRS_SHUTDOWN);
+ __lws_set_timeout(wsi, PENDING_TIMEOUT_SHUTDOWN_FLUSH,
+ context->timeout_secs);
+
+ return;
+ }
+#endif
+ }
+
+ lwsl_debug("%s: real just_kill_connection: %p (sockfd %d)\n", __func__,
+ wsi, wsi->desc.sockfd);
+
+#ifdef LWS_WITH_HTTP_PROXY
+ if (wsi->http.rw) {
+ lws_rewrite_destroy(wsi->http.rw);
+ wsi->http.rw = NULL;
+ }
+#endif
+ /*
+ * we won't be servicing or receiving anything further from this guy
+ * delete socket from the internal poll list if still present
+ */
+ __lws_ssl_remove_wsi_from_buffered_list(wsi);
+ __lws_remove_from_timeout_list(wsi);
+ lws_dll_lws_remove(&wsi->dll_hrtimer);
+
+ /* don't repeat event loop stuff */
+ if (wsi->told_event_loop_closed)
+ return;
+
+ /* checking return redundant since we anyway close */
+ if (wsi->desc.sockfd != LWS_SOCK_INVALID)
+ __remove_wsi_socket_from_fds(wsi);
+ else
+ lws_same_vh_protocol_remove(wsi);
+
+ lwsi_set_state(wsi, LRS_DEAD_SOCKET);
+ lws_buflist_destroy_all_segments(&wsi->buflist);
+ lws_dll_lws_remove(&wsi->dll_buflist);
+
+ if (wsi->role_ops->close_role)
+ wsi->role_ops->close_role(pt, wsi);
+
+ /* tell the user it's all over for this guy */
+
+ if (lwsi_state_est_PRE_CLOSE(wsi) && !wsi->told_user_closed &&
+ wsi->role_ops->close_cb[lwsi_role_server(wsi)]) {
+ const struct lws_protocols *pro = wsi->protocol;
+
+ if (!wsi->protocol)
+ pro = &wsi->vhost->protocols[0];
+
+ if (!wsi->upgraded_to_http2 || !lwsi_role_client(wsi))
+ /*
+ * The network wsi for a client h2 connection shouldn't
+ * call back for its role: the child stream connections
+ * own the role. Otherwise h2 will call back closed
+ * one too many times as the children do it and then
+ * the closing network stream.
+ */
+ pro->callback(wsi,
+ wsi->role_ops->close_cb[lwsi_role_server(wsi)],
+ wsi->user_space, NULL, 0);
+ wsi->told_user_closed = 1;
+ }
+
+async_close:
+ wsi->socket_is_permanently_unusable = 1;
+
+ if (wsi->context->event_loop_ops->wsi_logical_close)
+ if (wsi->context->event_loop_ops->wsi_logical_close(wsi))
+ return;
+
+ __lws_close_free_wsi_final(wsi);
+}
+
+void
+__lws_close_free_wsi_final(struct lws *wsi)
+{
+ int n;
+
+ if (lws_socket_is_valid(wsi->desc.sockfd) && !lws_ssl_close(wsi)) {
+ n = compatible_close(wsi->desc.sockfd);
+ if (n)
+ lwsl_debug("closing: close ret %d\n", LWS_ERRNO);
+
+ wsi->desc.sockfd = LWS_SOCK_INVALID;
+ }
+
+ /* outermost destroy notification for wsi (user_space still intact) */
+ if (wsi->vhost)
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_WSI_DESTROY,
+ wsi->user_space, NULL, 0);
+
+#ifdef LWS_WITH_CGI
+ if (wsi->http.cgi) {
+
+ for (n = 0; n < 3; n++) {
+ if (wsi->http.cgi->pipe_fds[n][!!(n == 0)] == 0)
+ lwsl_err("ZERO FD IN CGI CLOSE");
+
+ if (wsi->http.cgi->pipe_fds[n][!!(n == 0)] >= 0)
+ close(wsi->http.cgi->pipe_fds[n][!!(n == 0)]);
+ }
+
+ lws_free(wsi->http.cgi);
+ }
+#endif
+
+ __lws_free_wsi(wsi);
+}
+
+
+void
+lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *caller)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+
+ lws_pt_lock(pt, __func__);
+ __lws_close_free_wsi(wsi, reason, caller);
+ lws_pt_unlock(pt);
+}
+
+/* lws_buflist */
+
+int
+lws_buflist_append_segment(struct lws_buflist **head, const uint8_t *buf,
+ size_t len)
+{
+ struct lws_buflist *nbuf;
+ int first = !*head;
+ void *p = *head;
+ int sanity = 1024;
+
+ assert(buf);
+ assert(len);
+
+ /* append at the tail */
+ while (*head) {
+ if (!--sanity || head == &((*head)->next)) {
+ lwsl_err("%s: corrupt list points to self\n", __func__);
+ return -1;
+ }
+ head = &((*head)->next);
+ }
+
+ lwsl_info("%s: len %u first %d %p\n", __func__, (uint32_t)len, first, p);
+
+ nbuf = (struct lws_buflist *)
+ lws_malloc(sizeof(**head) + len, __func__);
+ if (!nbuf) {
+ lwsl_err("%s: OOM\n", __func__);
+ return -1;
+ }
+
+ nbuf->len = len;
+ nbuf->pos = 0;
+ nbuf->next = NULL;
+
+ p = (void *)nbuf->buf;
+ memcpy(p, buf, len);
+
+ *head = nbuf;
+
+ return first; /* returns 1 if first segment just created */
+}
+
+static int
+lws_buflist_destroy_segment(struct lws_buflist **head)
+{
+ struct lws_buflist *old = *head;
+
+ assert(*head);
+ *head = (*head)->next;
+ old->next = NULL;
+ lws_free(old);
+
+ return !*head; /* returns 1 if last segment just destroyed */
+}
+
+void
+lws_buflist_destroy_all_segments(struct lws_buflist **head)
+{
+ struct lws_buflist *p = *head, *p1;
+
+ while (p) {
+ p1 = p->next;
+ p->next = NULL;
+ lws_free(p);
+ p = p1;
+ }
+
+ *head = NULL;
+}
+
+size_t
+lws_buflist_next_segment_len(struct lws_buflist **head, uint8_t **buf)
+{
+ if (!*head) {
+ if (buf)
+ *buf = NULL;
+
+ return 0;
+ }
+
+ if (!(*head)->len && (*head)->next)
+ lws_buflist_destroy_segment(head);
+
+ if (!*head) {
+ if (buf)
+ *buf = NULL;
+
+ return 0;
+ }
+
+ assert((*head)->pos < (*head)->len);
+
+ if (buf)
+ *buf = (*head)->buf + (*head)->pos;
+
+ return (*head)->len - (*head)->pos;
+}
+
+int
+lws_buflist_use_segment(struct lws_buflist **head, size_t len)
+{
+ assert(*head);
+ assert(len);
+ assert((*head)->pos + len <= (*head)->len);
+
+ (*head)->pos += len;
+ if ((*head)->pos == (*head)->len)
+ lws_buflist_destroy_segment(head);
+
+ if (!*head)
+ return 0;
+
+ return (int)((*head)->len - (*head)->pos);
+}
+
+void
+lws_buflist_describe(struct lws_buflist **head, void *id)
+{
+ struct lws_buflist *old;
+ int n = 0;
+
+ if (*head == NULL)
+ lwsl_notice("%p: buflist empty\n", id);
+
+ while (*head) {
+ lwsl_notice("%p: %d: %llu / %llu (%llu left)\n", id, n,
+ (unsigned long long)(*head)->pos,
+ (unsigned long long)(*head)->len,
+ (unsigned long long)(*head)->len - (*head)->pos);
+ old = *head;
+ head = &((*head)->next);
+ if (*head == old) {
+ lwsl_err("%s: next points to self\n", __func__);
+ break;
+ }
+ n++;
+ }
+}
+
+/* ... */
+
+LWS_VISIBLE LWS_EXTERN const char *
+lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, int len)
+{
+ int n = 0, sl = (int)strlen(name);
+
+ while (lws_hdr_copy_fragment(wsi, buf, len,
+ WSI_TOKEN_HTTP_URI_ARGS, n) >= 0) {
+
+ if (!strncmp(buf, name, sl))
+ return buf + sl;
+
+ n++;
+ }
+
+ return NULL;
+}
+
+#if !defined(LWS_WITH_ESP32)
+LWS_VISIBLE int
+interface_to_sa(struct lws_vhost *vh, const char *ifname,
+ struct sockaddr_in *addr, size_t addrlen)
+{
+ int ipv6 = 0;
+#ifdef LWS_WITH_IPV6
+ ipv6 = LWS_IPV6_ENABLED(vh);
+#endif
+ (void)vh;
+
+ return lws_interface_to_sa(ipv6, ifname, addr, addrlen);
+}
+#endif
+
+#ifndef LWS_PLAT_OPTEE
+static int
+lws_get_addresses(struct lws_vhost *vh, void *ads, char *name,
+ int name_len, char *rip, int rip_len)
+{
+ struct addrinfo ai, *res;
+ struct sockaddr_in addr4;
+
+ rip[0] = '\0';
+ name[0] = '\0';
+ addr4.sin_family = AF_UNSPEC;
+
+#ifdef LWS_WITH_IPV6
+ if (LWS_IPV6_ENABLED(vh)) {
+ if (!lws_plat_inet_ntop(AF_INET6,
+ &((struct sockaddr_in6 *)ads)->sin6_addr,
+ rip, rip_len)) {
+ lwsl_err("inet_ntop: %s", strerror(LWS_ERRNO));
+ return -1;
+ }
+
+ // Strip off the IPv4 to IPv6 header if one exists
+ if (strncmp(rip, "::ffff:", 7) == 0)
+ memmove(rip, rip + 7, strlen(rip) - 6);
+
+ getnameinfo((struct sockaddr *)ads, sizeof(struct sockaddr_in6),
+ name, name_len, NULL, 0, 0);
+
+ return 0;
+ } else
+#endif
+ {
+ struct addrinfo *result;
+
+ memset(&ai, 0, sizeof ai);
+ ai.ai_family = PF_UNSPEC;
+ ai.ai_socktype = SOCK_STREAM;
+#if !defined(LWS_WITH_ESP32)
+ if (getnameinfo((struct sockaddr *)ads,
+ sizeof(struct sockaddr_in),
+ name, name_len, NULL, 0, 0))
+ return -1;
+#endif
+
+ if (getaddrinfo(name, NULL, &ai, &result))
+ return -1;
+
+ res = result;
+ while (addr4.sin_family == AF_UNSPEC && res) {
+ switch (res->ai_family) {
+ case AF_INET:
+ addr4.sin_addr =
+ ((struct sockaddr_in *)res->ai_addr)->sin_addr;
+ addr4.sin_family = AF_INET;
+ break;
+ }
+
+ res = res->ai_next;
+ }
+ freeaddrinfo(result);
+ }
+
+ if (addr4.sin_family == AF_UNSPEC)
+ return -1;
+
+ if (lws_plat_inet_ntop(AF_INET, &addr4.sin_addr, rip, rip_len) == NULL)
+ return -1;
+
+ return 0;
+}
+
+
+LWS_VISIBLE const char *
+lws_get_peer_simple(struct lws *wsi, char *name, int namelen)
+{
+ socklen_t len, olen;
+#ifdef LWS_WITH_IPV6
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr_in sin4;
+ int af = AF_INET;
+ void *p, *q;
+
+ wsi = lws_get_network_wsi(wsi);
+
+ if (wsi->parent_carries_io)
+ wsi = wsi->parent;
+
+#ifdef LWS_WITH_IPV6
+ if (LWS_IPV6_ENABLED(wsi->vhost)) {
+ len = sizeof(sin6);
+ p = &sin6;
+ af = AF_INET6;
+ q = &sin6.sin6_addr;
+ } else
+#endif
+ {
+ len = sizeof(sin4);
+ p = &sin4;
+ q = &sin4.sin_addr;
+ }
+
+ olen = len;
+ if (getpeername(wsi->desc.sockfd, p, &len) < 0 || len > olen) {
+ lwsl_warn("getpeername: %s\n", strerror(LWS_ERRNO));
+ return NULL;
+ }
+
+ return lws_plat_inet_ntop(af, q, name, namelen);
+}
+#endif
+
+LWS_VISIBLE void
+lws_get_peer_addresses(struct lws *wsi, lws_sockfd_type fd, char *name,
+ int name_len, char *rip, int rip_len)
+{
+#ifndef LWS_PLAT_OPTEE
+ socklen_t len;
+#ifdef LWS_WITH_IPV6
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr_in sin4;
+ struct lws_context *context = wsi->context;
+ int ret = -1;
+ void *p;
+
+ rip[0] = '\0';
+ name[0] = '\0';
+
+ lws_latency_pre(context, wsi);
+
+#ifdef LWS_WITH_IPV6
+ if (LWS_IPV6_ENABLED(wsi->vhost)) {
+ len = sizeof(sin6);
+ p = &sin6;
+ } else
+#endif
+ {
+ len = sizeof(sin4);
+ p = &sin4;
+ }
+
+ if (getpeername(fd, p, &len) < 0) {
+ lwsl_warn("getpeername: %s\n", strerror(LWS_ERRNO));
+ goto bail;
+ }
+
+ ret = lws_get_addresses(wsi->vhost, p, name, name_len, rip, rip_len);
+
+bail:
+ lws_latency(context, wsi, "lws_get_peer_addresses", ret, 1);
+#endif
+ (void)wsi;
+ (void)fd;
+ (void)name;
+ (void)name_len;
+ (void)rip;
+ (void)rip_len;
+
+}
+
+LWS_EXTERN void *
+lws_vhost_user(struct lws_vhost *vhost)
+{
+ return vhost->user;
+}
+
+LWS_EXTERN void *
+lws_context_user(struct lws_context *context)
+{
+ return context->user_space;
+}
+
+LWS_VISIBLE struct lws_vhost *
+lws_vhost_get(struct lws *wsi)
+{
+ return wsi->vhost;
+}
+
+LWS_VISIBLE struct lws_vhost *
+lws_get_vhost(struct lws *wsi)
+{
+ return wsi->vhost;
+}
+
+LWS_VISIBLE const struct lws_protocols *
+lws_protocol_get(struct lws *wsi)
+{
+ return wsi->protocol;
+}
+
+LWS_VISIBLE const struct lws_udp *
+lws_get_udp(const struct lws *wsi)
+{
+ return wsi->udp;
+}
+
+LWS_VISIBLE struct lws *
+lws_get_network_wsi(struct lws *wsi)
+{
+ if (!wsi)
+ return NULL;
+
+#if defined(LWS_WITH_HTTP2)
+ if (!wsi->http2_substream && !wsi->client_h2_substream)
+ return wsi;
+
+ while (wsi->h2.parent_wsi)
+ wsi = wsi->h2.parent_wsi;
+#endif
+
+ return wsi;
+}
+
+LWS_VISIBLE LWS_EXTERN const struct lws_protocols *
+lws_vhost_name_to_protocol(struct lws_vhost *vh, const char *name)
+{
+ int n;
+
+ for (n = 0; n < vh->count_protocols; n++)
+ if (!strcmp(name, vh->protocols[n].name))
+ return &vh->protocols[n];
+
+ return NULL;
+}
+
+LWS_VISIBLE int
+lws_callback_all_protocol(struct lws_context *context,
+ const struct lws_protocols *protocol, int reason)
+{
+ struct lws_context_per_thread *pt = &context->pt[0];
+ unsigned int n, m = context->count_threads;
+ struct lws *wsi;
+
+ while (m--) {
+ for (n = 0; n < pt->fds_count; n++) {
+ wsi = wsi_from_fd(context, pt->fds[n].fd);
+ if (!wsi)
+ continue;
+ if (wsi->protocol == protocol)
+ protocol->callback(wsi, reason, wsi->user_space,
+ NULL, 0);
+ }
+ pt++;
+ }
+
+ return 0;
+}
+
+LWS_VISIBLE int
+lws_callback_all_protocol_vhost_args(struct lws_vhost *vh,
+ const struct lws_protocols *protocol, int reason,
+ void *argp, size_t len)
+{
+ struct lws_context *context = vh->context;
+ struct lws_context_per_thread *pt = &context->pt[0];
+ unsigned int n, m = context->count_threads;
+ struct lws *wsi;
+
+ while (m--) {
+ for (n = 0; n < pt->fds_count; n++) {
+ wsi = wsi_from_fd(context, pt->fds[n].fd);
+ if (!wsi)
+ continue;
+ if (wsi->vhost == vh && (wsi->protocol == protocol ||
+ !protocol))
+ wsi->protocol->callback(wsi, reason,
+ wsi->user_space, argp, len);
+ }
+ pt++;
+ }
+
+ return 0;
+}
+
+LWS_VISIBLE int
+lws_callback_all_protocol_vhost(struct lws_vhost *vh,
+ const struct lws_protocols *protocol, int reason)
+{
+ return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0);
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, int len)
+{
+ int n;
+
+ for (n = 0; n < wsi->vhost->count_protocols; n++)
+ if (wsi->vhost->protocols[n].callback(wsi, reason, NULL, in, len))
+ return 1;
+
+ return 0;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, void *in,
+ size_t len)
+{
+ int n;
+ struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi");
+
+ wsi->context = vh->context;
+ wsi->vhost = vh;
+
+ for (n = 0; n < wsi->vhost->count_protocols; n++) {
+ wsi->protocol = &vh->protocols[n];
+ if (wsi->protocol->callback(wsi, reason, NULL, in, len)) {
+ lws_free(wsi);
+ return 1;
+ }
+ }
+
+ lws_free(wsi);
+
+ return 0;
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_set_fops(struct lws_context *context, const struct lws_plat_file_ops *fops)
+{
+ context->fops = fops;
+}
+
+LWS_VISIBLE LWS_EXTERN lws_filepos_t
+lws_vfs_tell(lws_fop_fd_t fop_fd)
+{
+ return fop_fd->pos;
+}
+
+LWS_VISIBLE LWS_EXTERN lws_filepos_t
+lws_vfs_get_length(lws_fop_fd_t fop_fd)
+{
+ return fop_fd->len;
+}
+
+LWS_VISIBLE LWS_EXTERN uint32_t
+lws_vfs_get_mod_time(lws_fop_fd_t fop_fd)
+{
+ return fop_fd->mod_time;
+}
+
+LWS_VISIBLE lws_fileofs_t
+lws_vfs_file_seek_set(lws_fop_fd_t fop_fd, lws_fileofs_t offset)
+{
+ lws_fileofs_t ofs;
+
+ ofs = fop_fd->fops->LWS_FOP_SEEK_CUR(fop_fd, offset - fop_fd->pos);
+
+ return ofs;
+}
+
+
+LWS_VISIBLE lws_fileofs_t
+lws_vfs_file_seek_end(lws_fop_fd_t fop_fd, lws_fileofs_t offset)
+{
+ return fop_fd->fops->LWS_FOP_SEEK_CUR(fop_fd, fop_fd->len +
+ fop_fd->pos + offset);
+}
+
+
+const struct lws_plat_file_ops *
+lws_vfs_select_fops(const struct lws_plat_file_ops *fops, const char *vfs_path,
+ const char **vpath)
+{
+ const struct lws_plat_file_ops *pf;
+ const char *p = vfs_path;
+ int n;
+
+ *vpath = NULL;
+
+ /* no non-platform fops, just use that */
+
+ if (!fops->next)
+ return fops;
+
+ /*
+ * scan the vfs path looking for indications we are to be
+ * handled by a specific fops
+ */
+
+ while (p && *p) {
+ if (*p != '/') {
+ p++;
+ continue;
+ }
+ /* the first one is always platform fops, so skip */
+ pf = fops->next;
+ while (pf) {
+ n = 0;
+ while (n < (int)LWS_ARRAY_SIZE(pf->fi) && pf->fi[n].sig) {
+ if (p >= vfs_path + pf->fi[n].len)
+ if (!strncmp(p - (pf->fi[n].len - 1),
+ pf->fi[n].sig,
+ pf->fi[n].len - 1)) {
+ *vpath = p + 1;
+ return pf;
+ }
+
+ n++;
+ }
+ pf = pf->next;
+ }
+ p++;
+ }
+
+ return fops;
+}
+
+LWS_VISIBLE LWS_EXTERN lws_fop_fd_t LWS_WARN_UNUSED_RESULT
+lws_vfs_file_open(const struct lws_plat_file_ops *fops, const char *vfs_path,
+ lws_fop_flags_t *flags)
+{
+ const char *vpath = "";
+ const struct lws_plat_file_ops *selected;
+
+ selected = lws_vfs_select_fops(fops, vfs_path, &vpath);
+
+ return selected->LWS_FOP_OPEN(fops, vfs_path, vpath, flags);
+}
+
+
+/**
+ * lws_now_secs() - seconds since 1970-1-1
+ *
+ */
+LWS_VISIBLE LWS_EXTERN unsigned long
+lws_now_secs(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+
+ return tv.tv_sec;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_compare_time_t(struct lws_context *context, time_t t1, time_t t2)
+{
+ if (t1 < context->time_discontiguity)
+ t1 += context->time_fixup;
+
+ if (t2 < context->time_discontiguity)
+ t2 += context->time_fixup;
+
+ return (int)(t1 - t2);
+}
+
+LWS_VISIBLE lws_sockfd_type
+lws_get_socket_fd(struct lws *wsi)
+{
+ if (!wsi)
+ return -1;
+ return wsi->desc.sockfd;
+}
+
+#ifdef LWS_LATENCY
+void
+lws_latency(struct lws_context *context, struct lws *wsi, const char *action,
+ int ret, int completed)
+{
+ unsigned long long u;
+ char buf[256];
+
+ u = time_in_microseconds();
+
+ if (!action) {
+ wsi->latency_start = u;
+ if (!wsi->action_start)
+ wsi->action_start = u;
+ return;
+ }
+ if (completed) {
+ if (wsi->action_start == wsi->latency_start)
+ sprintf(buf,
+ "Completion first try lat %lluus: %p: ret %d: %s\n",
+ u - wsi->latency_start,
+ (void *)wsi, ret, action);
+ else
+ sprintf(buf,
+ "Completion %lluus: lat %lluus: %p: ret %d: %s\n",
+ u - wsi->action_start,
+ u - wsi->latency_start,
+ (void *)wsi, ret, action);
+ wsi->action_start = 0;
+ } else
+ sprintf(buf, "lat %lluus: %p: ret %d: %s\n",
+ u - wsi->latency_start, (void *)wsi, ret, action);
+
+ if (u - wsi->latency_start > context->worst_latency) {
+ context->worst_latency = u - wsi->latency_start;
+ strcpy(context->worst_latency_info, buf);
+ }
+ lwsl_latency("%s", buf);
+}
+#endif
+
+LWS_VISIBLE int
+lws_rx_flow_control(struct lws *wsi, int _enable)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ int en = _enable;
+
+ // h2 ignores rx flow control atm
+ if (lwsi_role_h2(wsi) || wsi->http2_substream ||
+ lwsi_role_h2_ENCAPSULATION(wsi))
+ return 0; // !!!
+
+ lwsl_info("%s: %p 0x%x\n", __func__, wsi, _enable);
+
+ if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) {
+ /*
+ * convert user bool style to bitmap style... in user simple
+ * bool style _enable = 0 = flow control it, = 1 = allow rx
+ */
+ en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL;
+ if (_enable & 1)
+ en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT;
+ }
+
+ lws_pt_lock(pt, __func__);
+
+ /* any bit set in rxflow_bitmap DISABLEs rxflow control */
+ if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT)
+ wsi->rxflow_bitmap &= ~(en & 0xff);
+ else
+ wsi->rxflow_bitmap |= en & 0xff;
+
+ if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) ==
+ wsi->rxflow_change_to)
+ goto skip;
+
+ wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE | !wsi->rxflow_bitmap;
+
+ lwsl_info("%s: %p: bitmap 0x%x: en 0x%x, ch 0x%x\n", __func__, wsi,
+ wsi->rxflow_bitmap, en, wsi->rxflow_change_to);
+
+ if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW ||
+ !wsi->rxflow_will_be_applied) {
+ en = __lws_rx_flow_control(wsi);
+ lws_pt_unlock(pt);
+
+ return en;
+ }
+
+skip:
+ lws_pt_unlock(pt);
+
+ return 0;
+}
+
+LWS_VISIBLE void
+lws_rx_flow_allow_all_protocol(const struct lws_context *context,
+ const struct lws_protocols *protocol)
+{
+ const struct lws_context_per_thread *pt = &context->pt[0];
+ struct lws *wsi;
+ unsigned int n, m = context->count_threads;
+
+ while (m--) {
+ for (n = 0; n < pt->fds_count; n++) {
+ wsi = wsi_from_fd(context, pt->fds[n].fd);
+ if (!wsi)
+ continue;
+ if (wsi->protocol == protocol)
+ lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
+ }
+ pt++;
+ }
+}
+
+int
+lws_broadcast(struct lws_context *context, int reason, void *in, size_t len)
+{
+ struct lws_vhost *v = context->vhost_list;
+ struct lws wsi;
+ int n, ret = 0;
+
+ memset(&wsi, 0, sizeof(wsi));
+ wsi.context = context;
+
+ while (v) {
+ const struct lws_protocols *p = v->protocols;
+ wsi.vhost = v;
+
+ for (n = 0; n < v->count_protocols; n++) {
+ wsi.protocol = p;
+ if (p->callback &&
+ p->callback(&wsi, reason, NULL, in, len))
+ ret |= 1;
+ p++;
+ }
+ v = v->vhost_next;
+ }
+
+ return ret;
+}
+
+LWS_VISIBLE extern const char *
+lws_canonical_hostname(struct lws_context *context)
+{
+ return (const char *)context->canonical_hostname;
+}
+
+LWS_VISIBLE LWS_EXTERN const char *
+lws_get_vhost_name(struct lws_vhost *vhost)
+{
+ return vhost->name;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_get_vhost_port(struct lws_vhost *vhost)
+{
+ return vhost->listen_port;
+}
+
+LWS_VISIBLE LWS_EXTERN void *
+lws_get_vhost_user(struct lws_vhost *vhost)
+{
+ return vhost->user;
+}
+
+LWS_VISIBLE LWS_EXTERN const char *
+lws_get_vhost_iface(struct lws_vhost *vhost)
+{
+ return vhost->iface;
+}
+
+int user_callback_handle_rxflow(lws_callback_function callback_function,
+ struct lws *wsi,
+ enum lws_callback_reasons reason, void *user,
+ void *in, size_t len)
+{
+ int n;
+
+ wsi->rxflow_will_be_applied = 1;
+ n = callback_function(wsi, reason, user, in, len);
+ wsi->rxflow_will_be_applied = 0;
+ if (!n)
+ n = __lws_rx_flow_control(wsi);
+
+ return n;
+}
+
+#if !defined(LWS_WITHOUT_CLIENT)
+LWS_VISIBLE int
+lws_set_proxy(struct lws_vhost *vhost, const char *proxy)
+{
+ char *p;
+ char authstring[96];
+
+ if (!proxy)
+ return -1;
+
+ /* we have to deal with a possible redundant leading http:// */
+ if (!strncmp(proxy, "http://", 7))
+ proxy += 7;
+
+ p = strrchr(proxy, '@');
+ if (p) { /* auth is around */
+
+ if ((unsigned int)(p - proxy) > sizeof(authstring) - 1)
+ goto auth_too_long;
+
+ lws_strncpy(authstring, proxy, p - proxy + 1);
+ // null termination not needed on input
+ if (lws_b64_encode_string(authstring, lws_ptr_diff(p, proxy),
+ vhost->proxy_basic_auth_token,
+ sizeof vhost->proxy_basic_auth_token) < 0)
+ goto auth_too_long;
+
+ lwsl_info(" Proxy auth in use\n");
+
+ proxy = p + 1;
+ } else
+ vhost->proxy_basic_auth_token[0] = '\0';
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ lws_strncpy(vhost->http.http_proxy_address, proxy,
+ sizeof(vhost->http.http_proxy_address));
+
+ p = strchr(vhost->http.http_proxy_address, ':');
+ if (!p && !vhost->http.http_proxy_port) {
+ lwsl_err("http_proxy needs to be ads:port\n");
+
+ return -1;
+ } else {
+ if (p) {
+ *p = '\0';
+ vhost->http.http_proxy_port = atoi(p + 1);
+ }
+ }
+
+ lwsl_info(" Proxy %s:%u\n", vhost->http.http_proxy_address,
+ vhost->http.http_proxy_port);
+#endif
+ return 0;
+
+auth_too_long:
+ lwsl_err("proxy auth too long\n");
+
+ return -1;
+}
+#endif
+
+#if defined(LWS_WITH_SOCKS5)
+LWS_VISIBLE int
+lws_set_socks(struct lws_vhost *vhost, const char *socks)
+{
+ char *p_at, *p_colon;
+ char user[96];
+ char password[96];
+
+ if (!socks)
+ return -1;
+
+ vhost->socks_user[0] = '\0';
+ vhost->socks_password[0] = '\0';
+
+ p_at = strrchr(socks, '@');
+ if (p_at) { /* auth is around */
+ if ((unsigned int)(p_at - socks) > (sizeof(user)
+ + sizeof(password) - 2)) {
+ lwsl_err("Socks auth too long\n");
+ goto bail;
+ }
+
+ p_colon = strchr(socks, ':');
+ if (p_colon) {
+ if ((unsigned int)(p_colon - socks) > (sizeof(user)
+ - 1) ) {
+ lwsl_err("Socks user too long\n");
+ goto bail;
+ }
+ if ((unsigned int)(p_at - p_colon) > (sizeof(password)
+ - 1) ) {
+ lwsl_err("Socks password too long\n");
+ goto bail;
+ }
+
+ lws_strncpy(vhost->socks_user, socks, p_colon - socks + 1);
+ lws_strncpy(vhost->socks_password, p_colon + 1,
+ p_at - (p_colon + 1) + 1);
+ }
+
+ lwsl_info(" Socks auth, user: %s, password: %s\n",
+ vhost->socks_user, vhost->socks_password );
+
+ socks = p_at + 1;
+ }
+
+ lws_strncpy(vhost->socks_proxy_address, socks,
+ sizeof(vhost->socks_proxy_address));
+
+ p_colon = strchr(vhost->socks_proxy_address, ':');
+ if (!p_colon && !vhost->socks_proxy_port) {
+ lwsl_err("socks_proxy needs to be address:port\n");
+ return -1;
+ } else {
+ if (p_colon) {
+ *p_colon = '\0';
+ vhost->socks_proxy_port = atoi(p_colon + 1);
+ }
+ }
+
+ lwsl_info(" Socks %s:%u\n", vhost->socks_proxy_address,
+ vhost->socks_proxy_port);
+
+ return 0;
+
+bail:
+ return -1;
+}
+#endif
+
+LWS_VISIBLE const struct lws_protocols *
+lws_get_protocol(struct lws *wsi)
+{
+ return wsi->protocol;
+}
+
+
+int
+lws_ensure_user_space(struct lws *wsi)
+{
+ if (!wsi->protocol)
+ return 0;
+
+ /* allocate the per-connection user memory (if any) */
+
+ if (wsi->protocol->per_session_data_size && !wsi->user_space) {
+ wsi->user_space = lws_zalloc(
+ wsi->protocol->per_session_data_size, "user space");
+ if (wsi->user_space == NULL) {
+ lwsl_err("%s: OOM\n", __func__);
+ return 1;
+ }
+ } else
+ lwsl_debug("%s: %p protocol pss %lu, user_space=%p\n", __func__,
+ wsi, (long)wsi->protocol->per_session_data_size,
+ wsi->user_space);
+ return 0;
+}
+
+LWS_VISIBLE void *
+lws_adjust_protocol_psds(struct lws *wsi, size_t new_size)
+{
+ ((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size =
+ new_size;
+
+ if (lws_ensure_user_space(wsi))
+ return NULL;
+
+ return wsi->user_space;
+}
+
+LWS_VISIBLE int
+lwsl_timestamp(int level, char *p, int len)
+{
+#ifndef LWS_PLAT_OPTEE
+ time_t o_now = time(NULL);
+ unsigned long long now;
+ struct tm *ptm = NULL;
+#ifndef WIN32
+ struct tm tm;
+#endif
+ int n;
+
+#ifndef _WIN32_WCE
+#ifdef WIN32
+ ptm = localtime(&o_now);
+#else
+ if (localtime_r(&o_now, &tm))
+ ptm = &tm;
+#endif
+#endif
+ p[0] = '\0';
+ for (n = 0; n < LLL_COUNT; n++) {
+ if (level != (1 << n))
+ continue;
+ now = time_in_microseconds() / 100;
+ if (ptm)
+ n = lws_snprintf(p, len,
+ "[%04d/%02d/%02d %02d:%02d:%02d:%04d] %s: ",
+ ptm->tm_year + 1900,
+ ptm->tm_mon + 1,
+ ptm->tm_mday,
+ ptm->tm_hour,
+ ptm->tm_min,
+ ptm->tm_sec,
+ (int)(now % 10000), log_level_names[n]);
+ else
+ n = lws_snprintf(p, len, "[%llu:%04d] %s: ",
+ (unsigned long long) now / 10000,
+ (int)(now % 10000), log_level_names[n]);
+ return n;
+ }
+#else
+ p[0] = '\0';
+#endif
+
+ return 0;
+}
+
+#ifndef LWS_PLAT_OPTEE
+static const char * const colours[] = {
+ "[31;1m", /* LLL_ERR */
+ "[36;1m", /* LLL_WARN */
+ "[35;1m", /* LLL_NOTICE */
+ "[32;1m", /* LLL_INFO */
+ "[34;1m", /* LLL_DEBUG */
+ "[33;1m", /* LLL_PARSER */
+ "[33m", /* LLL_HEADER */
+ "[33m", /* LLL_EXT */
+ "[33m", /* LLL_CLIENT */
+ "[33;1m", /* LLL_LATENCY */
+ "[30;1m", /* LLL_USER */
+};
+
+LWS_VISIBLE void lwsl_emit_stderr(int level, const char *line)
+{
+ char buf[50];
+ static char tty = 3;
+ int n, m = LWS_ARRAY_SIZE(colours) - 1;
+
+ if (!tty)
+ tty = isatty(2) | 2;
+ lwsl_timestamp(level, buf, sizeof(buf));
+
+ if (tty == 3) {
+ n = 1 << (LWS_ARRAY_SIZE(colours) - 1);
+ while (n) {
+ if (level & n)
+ break;
+ m--;
+ n >>= 1;
+ }
+ fprintf(stderr, "%c%s%s%s%c[0m", 27, colours[m], buf, line, 27);
+ } else
+ fprintf(stderr, "%s%s", buf, line);
+}
+#endif
+
+LWS_VISIBLE void _lws_logv(int filter, const char *format, va_list vl)
+{
+ char buf[256];
+ int n;
+
+ if (!(log_level & filter))
+ return;
+
+ n = vsnprintf(buf, sizeof(buf) - 1, format, vl);
+ (void)n;
+ /* vnsprintf returns what it would have written, even if truncated */
+ if (n > (int)sizeof(buf) - 1)
+ n = sizeof(buf) - 1;
+ if (n > 0)
+ buf[n] = '\0';
+
+ lwsl_emit(filter, buf);
+}
+
+LWS_VISIBLE void _lws_log(int filter, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ _lws_logv(filter, format, ap);
+ va_end(ap);
+}
+
+LWS_VISIBLE void lws_set_log_level(int level,
+ void (*func)(int level, const char *line))
+{
+ log_level = level;
+ if (func)
+ lwsl_emit = func;
+}
+
+LWS_VISIBLE int lwsl_visible(int level)
+{
+ return log_level & level;
+}
+
+LWS_VISIBLE void
+lwsl_hexdump_level(int hexdump_level, const void *vbuf, size_t len)
+{
+ unsigned char *buf = (unsigned char *)vbuf;
+ unsigned int n, m, start;
+ char line[80];
+ char *p;
+
+ if (!lwsl_visible(hexdump_level))
+ return;
+
+ if (!len)
+ return;
+
+ if (!vbuf)
+ return;
+
+ _lws_log(hexdump_level, "\n");
+
+ for (n = 0; n < len;) {
+ start = n;
+ p = line;
+
+ p += sprintf(p, "%04X: ", start);
+
+ for (m = 0; m < 16 && n < len; m++)
+ p += sprintf(p, "%02X ", buf[n++]);
+ while (m++ < 16)
+ p += sprintf(p, " ");
+
+ p += sprintf(p, " ");
+
+ for (m = 0; m < 16 && (start + m) < len; m++) {
+ if (buf[start + m] >= ' ' && buf[start + m] < 127)
+ *p++ = buf[start + m];
+ else
+ *p++ = '.';
+ }
+ while (m++ < 16)
+ *p++ = ' ';
+
+ *p++ = '\n';
+ *p = '\0';
+ _lws_log(hexdump_level, "%s", line);
+ (void)line;
+ }
+
+ _lws_log(hexdump_level, "\n");
+}
+
+LWS_VISIBLE void
+lwsl_hexdump(const void *vbuf, size_t len)
+{
+#if defined(_DEBUG)
+ lwsl_hexdump_level(LLL_DEBUG, vbuf, len);
+#endif
+}
+
+LWS_VISIBLE int
+lws_is_ssl(struct lws *wsi)
+{
+#if defined(LWS_WITH_TLS)
+ return wsi->tls.use_ssl & LCCSCF_USE_SSL;
+#else
+ (void)wsi;
+ return 0;
+#endif
+}
+
+#if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS)
+LWS_VISIBLE lws_tls_conn*
+lws_get_ssl(struct lws *wsi)
+{
+ return wsi->tls.ssl;
+}
+#endif
+
+LWS_VISIBLE int
+lws_partial_buffered(struct lws *wsi)
+{
+ return !!wsi->trunc_len;
+}
+
+LWS_VISIBLE lws_fileofs_t
+lws_get_peer_write_allowance(struct lws *wsi)
+{
+ if (!wsi->role_ops->tx_credit)
+ return -1;
+ return wsi->role_ops->tx_credit(wsi);
+}
+
+LWS_VISIBLE void
+lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state,
+ struct lws_role_ops *ops)
+{
+#if defined(_DEBUG)
+ const char *name = "(unset)";
+#endif
+ wsi->wsistate = role | state;
+ if (ops)
+ wsi->role_ops = ops;
+#if defined(_DEBUG)
+ if (wsi->role_ops)
+ name = wsi->role_ops->name;
+ lwsl_debug("%s: %p: wsistate 0x%x, ops %s\n", __func__, wsi,
+ wsi->wsistate, name);
+#endif
+}
+
+LWS_VISIBLE struct lws_plat_file_ops *
+lws_get_fops(struct lws_context *context)
+{
+ return (struct lws_plat_file_ops *)context->fops;
+}
+
+LWS_VISIBLE LWS_EXTERN struct lws_context *
+lws_get_context(const struct lws *wsi)
+{
+ return wsi->context;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_get_count_threads(struct lws_context *context)
+{
+ return context->count_threads;
+}
+
+LWS_VISIBLE LWS_EXTERN void *
+lws_wsi_user(struct lws *wsi)
+{
+ return wsi->user_space;
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_set_wsi_user(struct lws *wsi, void *data)
+{
+ if (wsi->user_space_externally_allocated)
+ wsi->user_space = data;
+ else
+ lwsl_err("%s: Cannot set internally-allocated user_space\n",
+ __func__);
+}
+
+LWS_VISIBLE LWS_EXTERN struct lws *
+lws_get_parent(const struct lws *wsi)
+{
+ return wsi->parent;
+}
+
+LWS_VISIBLE LWS_EXTERN struct lws *
+lws_get_child(const struct lws *wsi)
+{
+ return wsi->child_list;
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_set_parent_carries_io(struct lws *wsi)
+{
+ wsi->parent_carries_io = 1;
+}
+
+LWS_VISIBLE LWS_EXTERN void *
+lws_get_opaque_parent_data(const struct lws *wsi)
+{
+ return wsi->opaque_parent_data;
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_set_opaque_parent_data(struct lws *wsi, void *data)
+{
+ wsi->opaque_parent_data = data;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_get_child_pending_on_writable(const struct lws *wsi)
+{
+ return wsi->parent_pending_cb_on_writable;
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_clear_child_pending_on_writable(struct lws *wsi)
+{
+ wsi->parent_pending_cb_on_writable = 0;
+}
+
+
+LWS_EXTERN int
+__lws_rx_flow_control(struct lws *wsi)
+{
+ struct lws *wsic = wsi->child_list;
+
+ // h2 ignores rx flow control atm
+ if (lwsi_role_h2(wsi) || wsi->http2_substream ||
+ lwsi_role_h2_ENCAPSULATION(wsi))
+ return 0; // !!!
+
+ /* if he has children, do those if they were changed */
+ while (wsic) {
+ if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)
+ __lws_rx_flow_control(wsic);
+
+ wsic = wsic->sibling_list;
+ }
+
+ /* there is no pending change */
+ if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE))
+ return 0;
+
+ /* stuff is still buffered, not ready to really accept new input */
+ if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
+ /* get ourselves called back to deal with stashed buffer */
+ lws_callback_on_writable(wsi);
+ return 0;
+ }
+
+ /* now the pending is cleared, we can change rxflow state */
+
+ wsi->rxflow_change_to &= ~LWS_RXFLOW_PENDING_CHANGE;
+
+ lwsl_info("rxflow: wsi %p change_to %d\n", wsi,
+ wsi->rxflow_change_to & LWS_RXFLOW_ALLOW);
+
+ /* adjust the pollfd for this wsi */
+
+ if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
+ if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
+ lwsl_info("%s: fail\n", __func__);
+ return -1;
+ }
+ } else
+ if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
+ return -1;
+
+ return 0;
+}
+
+LWS_EXTERN int
+lws_check_utf8(unsigned char *state, unsigned char *buf, size_t len)
+{
+ static const unsigned char e0f4[] = {
+ 0xa0 | ((2 - 1) << 2) | 1, /* e0 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e1 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e2 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e3 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e4 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e5 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e6 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e7 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e8 */
+ 0x80 | ((4 - 1) << 2) | 1, /* e9 */
+ 0x80 | ((4 - 1) << 2) | 1, /* ea */
+ 0x80 | ((4 - 1) << 2) | 1, /* eb */
+ 0x80 | ((4 - 1) << 2) | 1, /* ec */
+ 0x80 | ((2 - 1) << 2) | 1, /* ed */
+ 0x80 | ((4 - 1) << 2) | 1, /* ee */
+ 0x80 | ((4 - 1) << 2) | 1, /* ef */
+ 0x90 | ((3 - 1) << 2) | 2, /* f0 */
+ 0x80 | ((4 - 1) << 2) | 2, /* f1 */
+ 0x80 | ((4 - 1) << 2) | 2, /* f2 */
+ 0x80 | ((4 - 1) << 2) | 2, /* f3 */
+ 0x80 | ((1 - 1) << 2) | 2, /* f4 */
+
+ 0, /* s0 */
+ 0x80 | ((4 - 1) << 2) | 0, /* s2 */
+ 0x80 | ((4 - 1) << 2) | 1, /* s3 */
+ };
+ unsigned char s = *state;
+
+ while (len--) {
+ unsigned char c = *buf++;
+
+ if (!s) {
+ if (c >= 0x80) {
+ if (c < 0xc2 || c > 0xf4)
+ return 1;
+ if (c < 0xe0)
+ s = 0x80 | ((4 - 1) << 2);
+ else
+ s = e0f4[c - 0xe0];
+ }
+ } else {
+ if (c < (s & 0xf0) ||
+ c >= (s & 0xf0) + 0x10 + ((s << 2) & 0x30))
+ return 1;
+ s = e0f4[21 + (s & 3)];
+ }
+ }
+
+ *state = s;
+
+ return 0;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_parse_uri(char *p, const char **prot, const char **ads, int *port,
+ const char **path)
+{
+ const char *end;
+ static const char *slash = "/";
+
+ /* cut up the location into address, port and path */
+ *prot = p;
+ while (*p && (*p != ':' || p[1] != '/' || p[2] != '/'))
+ p++;
+ if (!*p) {
+ end = p;
+ p = (char *)*prot;
+ *prot = end;
+ } else {
+ *p = '\0';
+ p += 3;
+ }
+ *ads = p;
+ if (!strcmp(*prot, "http") || !strcmp(*prot, "ws"))
+ *port = 80;
+ else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss"))
+ *port = 443;
+
+ if (*p == '[')
+ {
+ ++(*ads);
+ while (*p && *p != ']')
+ p++;
+ if (*p)
+ *p++ = '\0';
+ }
+ else
+ {
+ while (*p && *p != ':' && *p != '/')
+ p++;
+ }
+ if (*p == ':') {
+ *p++ = '\0';
+ *port = atoi(p);
+ while (*p && *p != '/')
+ p++;
+ }
+ *path = slash;
+ if (*p) {
+ *p++ = '\0';
+ if (*p)
+ *path = p;
+ }
+
+ return 0;
+}
+
+#if defined(LWS_WITHOUT_EXTENSIONS)
+
+/* we need to provide dummy callbacks for internal exts
+ * so user code runs when faced with a lib compiled with
+ * extensions disabled.
+ */
+
+LWS_VISIBLE int
+lws_extension_callback_pm_deflate(struct lws_context *context,
+ const struct lws_extension *ext,
+ struct lws *wsi,
+ enum lws_extension_callback_reasons reason,
+ void *user, void *in, size_t len)
+{
+ (void)context;
+ (void)ext;
+ (void)wsi;
+ (void)reason;
+ (void)user;
+ (void)in;
+ (void)len;
+
+ return 0;
+}
+
+LWS_EXTERN int
+lws_set_extension_option(struct lws *wsi, const char *ext_name,
+ const char *opt_name, const char *opt_val)
+{
+ return -1;
+}
+#endif
+
+LWS_EXTERN int
+lws_socket_bind(struct lws_vhost *vhost, lws_sockfd_type sockfd, int port,
+ const char *iface)
+{
+#ifdef LWS_WITH_UNIX_SOCK
+ struct sockaddr_un serv_unix;
+#endif
+#ifdef LWS_WITH_IPV6
+ struct sockaddr_in6 serv_addr6;
+#endif
+ struct sockaddr_in serv_addr4;
+#ifndef LWS_PLAT_OPTEE
+ socklen_t len = sizeof(struct sockaddr_storage);
+#endif
+ int n;
+#if !defined(LWS_WITH_ESP32)
+ int m;
+#endif
+ struct sockaddr_storage sin;
+ struct sockaddr *v;
+
+#ifdef LWS_WITH_UNIX_SOCK
+ if (LWS_UNIX_SOCK_ENABLED(vhost)) {
+ v = (struct sockaddr *)&serv_unix;
+ n = sizeof(struct sockaddr_un);
+ bzero((char *) &serv_unix, sizeof(serv_unix));
+ serv_unix.sun_family = AF_UNIX;
+ if (!iface)
+ return -1;
+ if (sizeof(serv_unix.sun_path) <= strlen(iface)) {
+ lwsl_err("\"%s\" too long for UNIX domain socket\n",
+ iface);
+ return -1;
+ }
+ strcpy(serv_unix.sun_path, iface);
+ if (serv_unix.sun_path[0] == '@')
+ serv_unix.sun_path[0] = '\0';
+
+ } else
+#endif
+#if defined(LWS_WITH_IPV6) && !defined(LWS_WITH_ESP32)
+ if (LWS_IPV6_ENABLED(vhost)) {
+ v = (struct sockaddr *)&serv_addr6;
+ n = sizeof(struct sockaddr_in6);
+ bzero((char *) &serv_addr6, sizeof(serv_addr6));
+ if (iface) {
+ m = interface_to_sa(vhost, iface,
+ (struct sockaddr_in *)v, n);
+ if (m == LWS_ITOSA_NOT_USABLE) {
+ lwsl_info("%s: netif %s: Not usable\n",
+ __func__, iface);
+ return m;
+ }
+ if (m == LWS_ITOSA_NOT_EXIST) {
+ lwsl_info("%s: netif %s: Does not exist\n",
+ __func__, iface);
+ return m;
+ }
+ serv_addr6.sin6_scope_id = lws_get_addr_scope(iface);
+ }
+
+ serv_addr6.sin6_family = AF_INET6;
+ serv_addr6.sin6_port = htons(port);
+ } else
+#endif
+ {
+ v = (struct sockaddr *)&serv_addr4;
+ n = sizeof(serv_addr4);
+ bzero((char *) &serv_addr4, sizeof(serv_addr4));
+ serv_addr4.sin_addr.s_addr = INADDR_ANY;
+ serv_addr4.sin_family = AF_INET;
+#if !defined(LWS_WITH_ESP32)
+
+ if (iface) {
+ m = interface_to_sa(vhost, iface,
+ (struct sockaddr_in *)v, n);
+ if (m == LWS_ITOSA_NOT_USABLE) {
+ lwsl_info("%s: netif %s: Not usable\n",
+ __func__, iface);
+ return m;
+ }
+ if (m == LWS_ITOSA_NOT_EXIST) {
+ lwsl_info("%s: netif %s: Does not exist\n",
+ __func__, iface);
+ return m;
+ }
+ }
+#endif
+ serv_addr4.sin_port = htons(port);
+ } /* ipv4 */
+
+ /* just checking for the interface extant */
+ if (sockfd == LWS_SOCK_INVALID)
+ return 0;
+
+ n = bind(sockfd, v, n);
+#ifdef LWS_WITH_UNIX_SOCK
+ if (n < 0 && LWS_UNIX_SOCK_ENABLED(vhost)) {
+ lwsl_err("ERROR on binding fd %d to \"%s\" (%d %d)\n",
+ sockfd, iface, n, LWS_ERRNO);
+ return -1;
+ } else
+#endif
+ if (n < 0) {
+ lwsl_err("ERROR on binding fd %d to port %d (%d %d)\n",
+ sockfd, port, n, LWS_ERRNO);
+ return -1;
+ }
+
+#ifndef LWS_PLAT_OPTEE
+ if (getsockname(sockfd, (struct sockaddr *)&sin, &len) == -1)
+ lwsl_warn("getsockname: %s\n", strerror(LWS_ERRNO));
+ else
+#endif
+#if defined(LWS_WITH_IPV6)
+ port = (sin.ss_family == AF_INET6) ?
+ ntohs(((struct sockaddr_in6 *) &sin)->sin6_port) :
+ ntohs(((struct sockaddr_in *) &sin)->sin_port);
+#else
+ {
+ struct sockaddr_in sain;
+ memcpy(&sain, &sin, sizeof(sain));
+ port = ntohs(sain.sin_port);
+ }
+#endif
+
+ return port;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_get_vhost_listen_port(struct lws_vhost *vhost)
+{
+ return vhost->listen_port;
+}
+
+#if defined(LWS_WITH_IPV6)
+LWS_EXTERN unsigned long
+lws_get_addr_scope(const char *ipaddr)
+{
+ unsigned long scope = 0;
+
+#ifndef WIN32
+ struct ifaddrs *addrs, *addr;
+ char ip[NI_MAXHOST];
+ unsigned int i;
+
+ getifaddrs(&addrs);
+ for (addr = addrs; addr; addr = addr->ifa_next) {
+ if (!addr->ifa_addr ||
+ addr->ifa_addr->sa_family != AF_INET6)
+ continue;
+
+ getnameinfo(addr->ifa_addr,
+ sizeof(struct sockaddr_in6),
+ ip, sizeof(ip),
+ NULL, 0, NI_NUMERICHOST);
+
+ i = 0;
+ while (ip[i])
+ if (ip[i++] == '%') {
+ ip[i - 1] = '\0';
+ break;
+ }
+
+ if (!strcmp(ip, ipaddr)) {
+ scope = if_nametoindex(addr->ifa_name);
+ break;
+ }
+ }
+ freeifaddrs(addrs);
+#else
+ PIP_ADAPTER_ADDRESSES adapter, addrs = NULL;
+ PIP_ADAPTER_UNICAST_ADDRESS addr;
+ ULONG size = 0;
+ DWORD ret;
+ struct sockaddr_in6 *sockaddr;
+ char ip[NI_MAXHOST];
+ unsigned int i;
+ int found = 0;
+
+ for (i = 0; i < 5; i++)
+ {
+ ret = GetAdaptersAddresses(AF_INET6, GAA_FLAG_INCLUDE_PREFIX,
+ NULL, addrs, &size);
+ if ((ret == NO_ERROR) || (ret == ERROR_NO_DATA)) {
+ break;
+ } else if (ret == ERROR_BUFFER_OVERFLOW)
+ {
+ if (addrs)
+ free(addrs);
+ addrs = (IP_ADAPTER_ADDRESSES *)malloc(size);
+ } else
+ {
+ if (addrs)
+ {
+ free(addrs);
+ addrs = NULL;
+ }
+ lwsl_err("Failed to get IPv6 address table (%d)", ret);
+ break;
+ }
+ }
+
+ if ((ret == NO_ERROR) && (addrs)) {
+ adapter = addrs;
+ while (adapter && !found) {
+ addr = adapter->FirstUnicastAddress;
+ while (addr && !found) {
+ if (addr->Address.lpSockaddr->sa_family ==
+ AF_INET6) {
+ sockaddr = (struct sockaddr_in6 *)
+ (addr->Address.lpSockaddr);
+
+ lws_plat_inet_ntop(sockaddr->sin6_family,
+ &sockaddr->sin6_addr,
+ ip, sizeof(ip));
+
+ if (!strcmp(ip, ipaddr)) {
+ scope = sockaddr->sin6_scope_id;
+ found = 1;
+ break;
+ }
+ }
+ addr = addr->Next;
+ }
+ adapter = adapter->Next;
+ }
+ }
+ if (addrs)
+ free(addrs);
+#endif
+
+ return scope;
+}
+#endif
+
+#if !defined(LWS_NO_SERVER)
+
+LWS_EXTERN struct lws *
+lws_create_adopt_udp(struct lws_vhost *vhost, int port, int flags,
+ const char *protocol_name, struct lws *parent_wsi)
+{
+ lws_sock_file_fd_type sock;
+ struct addrinfo h, *r, *rp;
+ struct lws *wsi = NULL;
+ char buf[16];
+ int n;
+
+ memset(&h, 0, sizeof(h));
+ h.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
+ h.ai_socktype = SOCK_DGRAM;
+ h.ai_protocol = IPPROTO_UDP;
+ h.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
+
+ lws_snprintf(buf, sizeof(buf), "%u", port);
+ n = getaddrinfo(NULL, buf, &h, &r);
+ if (n) {
+ lwsl_info("%s: getaddrinfo error: %s\n", __func__,
+ gai_strerror(n));
+ goto bail;
+ }
+
+ for (rp = r; rp; rp = rp->ai_next) {
+ sock.sockfd = socket(rp->ai_family, rp->ai_socktype,
+ rp->ai_protocol);
+ if (sock.sockfd >= 0)
+ break;
+ }
+ if (!rp) {
+ lwsl_err("%s: unable to create INET socket\n", __func__);
+ goto bail1;
+ }
+
+ if ((flags & LWS_CAUDP_BIND) && bind(sock.sockfd, rp->ai_addr,
+#if defined(_WIN32)
+ (int)rp->ai_addrlen
+#else
+ rp->ai_addrlen
+#endif
+ ) == -1) {
+ lwsl_err("%s: bind failed\n", __func__);
+ goto bail2;
+ }
+
+ wsi = lws_adopt_descriptor_vhost(vhost, LWS_ADOPT_RAW_SOCKET_UDP, sock,
+ protocol_name, parent_wsi);
+ if (!wsi)
+ lwsl_err("%s: udp adoption failed\n", __func__);
+
+bail2:
+ if (!wsi)
+ close((int)sock.sockfd);
+bail1:
+ freeaddrinfo(r);
+
+bail:
+ return wsi;
+}
+
+#endif
+
+
+
+static const char *hex = "0123456789ABCDEF";
+
+LWS_VISIBLE LWS_EXTERN const char *
+lws_sql_purify(char *escaped, const char *string, int len)
+{
+ const char *p = string;
+ char *q = escaped;
+
+ while (*p && len-- > 2) {
+ if (*p == '\'') {
+ *q++ = '\'';
+ *q++ = '\'';
+ len --;
+ p++;
+ } else
+ *q++ = *p++;
+ }
+ *q = '\0';
+
+ return escaped;
+}
+
+LWS_VISIBLE LWS_EXTERN const char *
+lws_json_purify(char *escaped, const char *string, int len)
+{
+ const char *p = string;
+ char *q = escaped;
+
+ if (!p) {
+ escaped[0] = '\0';
+ return escaped;
+ }
+
+ while (*p && len-- > 6) {
+ if (*p == '\"' || *p == '\\' || *p < 0x20) {
+ *q++ = '\\';
+ *q++ = 'u';
+ *q++ = '0';
+ *q++ = '0';
+ *q++ = hex[((*p) >> 4) & 15];
+ *q++ = hex[(*p) & 15];
+ len -= 5;
+ p++;
+ } else
+ *q++ = *p++;
+ }
+ *q = '\0';
+
+ return escaped;
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_filename_purify_inplace(char *filename)
+{
+ while (*filename) {
+
+ if (*filename == '.' && filename[1] == '.') {
+ *filename = '_';
+ filename[1] = '_';
+ }
+
+ if (*filename == ':' ||
+ *filename == '/' ||
+ *filename == '\\' ||
+ *filename == '$' ||
+ *filename == '%')
+ *filename = '_';
+
+ filename++;
+ }
+}
+
+LWS_VISIBLE LWS_EXTERN const char *
+lws_urlencode(char *escaped, const char *string, int len)
+{
+ const char *p = string;
+ char *q = escaped;
+
+ while (*p && len-- > 3) {
+ if (*p == ' ') {
+ *q++ = '+';
+ p++;
+ continue;
+ }
+ if ((*p >= '0' && *p <= '9') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= 'a' && *p <= 'z')) {
+ *q++ = *p++;
+ continue;
+ }
+ *q++ = '%';
+ *q++ = hex[(*p >> 4) & 0xf];
+ *q++ = hex[*p & 0xf];
+
+ len -= 2;
+ p++;
+ }
+ *q = '\0';
+
+ return escaped;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_urldecode(char *string, const char *escaped, int len)
+{
+ int state = 0, n;
+ char sum = 0;
+
+ while (*escaped && len) {
+ switch (state) {
+ case 0:
+ if (*escaped == '%') {
+ state++;
+ escaped++;
+ continue;
+ }
+ if (*escaped == '+') {
+ escaped++;
+ *string++ = ' ';
+ len--;
+ continue;
+ }
+ *string++ = *escaped++;
+ len--;
+ break;
+ case 1:
+ n = char_to_hex(*escaped);
+ if (n < 0)
+ return -1;
+ escaped++;
+ sum = n << 4;
+ state++;
+ break;
+
+ case 2:
+ n = char_to_hex(*escaped);
+ if (n < 0)
+ return -1;
+ escaped++;
+ *string++ = sum | n;
+ len--;
+ state = 0;
+ break;
+ }
+
+ }
+ *string = '\0';
+
+ return 0;
+}
+
+LWS_VISIBLE LWS_EXTERN int
+lws_finalize_startup(struct lws_context *context)
+{
+ struct lws_context_creation_info info;
+
+ info.uid = context->uid;
+ info.gid = context->gid;
+
+#if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
+ memcpy(info.caps, context->caps, sizeof(info.caps));
+ info.count_caps = context->count_caps;
+#endif
+
+ if (lws_check_opt(context->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
+ lws_plat_drop_app_privileges(&info);
+
+ return 0;
+}
+
+int
+lws_snprintf(char *str, size_t size, const char *format, ...)
+{
+ va_list ap;
+ int n;
+
+ if (!size)
+ return 0;
+
+ va_start(ap, format);
+ n = vsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ if (n >= (int)size)
+ return (int)size;
+
+ return n;
+}
+
+char *
+lws_strncpy(char *dest, const char *src, size_t size)
+{
+ strncpy(dest, src, size - 1);
+ dest[size - 1] = '\0';
+
+ return dest;
+}
+
+
+LWS_VISIBLE LWS_EXTERN int
+lws_is_cgi(struct lws *wsi) {
+#ifdef LWS_WITH_CGI
+ return !!wsi->http.cgi;
+#else
+ return 0;
+#endif
+}
+
+const struct lws_protocol_vhost_options *
+lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name)
+{
+ while (pvo) {
+ if (!strcmp(pvo->name, name))
+ break;
+
+ pvo = pvo->next;
+ }
+
+ return pvo;
+}
+
+void
+lws_sum_stats(const struct lws_context *ctx, struct lws_conn_stats *cs)
+{
+ const struct lws_vhost *vh = ctx->vhost_list;
+
+ while (vh) {
+
+ cs->rx += vh->conn_stats.rx;
+ cs->tx += vh->conn_stats.tx;
+ cs->h1_conn += vh->conn_stats.h1_conn;
+ cs->h1_trans += vh->conn_stats.h1_trans;
+ cs->h2_trans += vh->conn_stats.h2_trans;
+ cs->ws_upg += vh->conn_stats.ws_upg;
+ cs->h2_upg += vh->conn_stats.h2_upg;
+ cs->h2_alpn += vh->conn_stats.h2_alpn;
+ cs->h2_subs += vh->conn_stats.h2_subs;
+ cs->rejected += vh->conn_stats.rejected;
+
+ vh = vh->vhost_next;
+ }
+}
+
+const char *
+lws_cmdline_option(int argc, const char **argv, const char *val)
+{
+ int n = (int)strlen(val), c = argc;
+
+ while (--c > 0) {
+
+ if (!strncmp(argv[c], val, n)) {
+ if (!*(argv[c] + n) && c < argc - 1) {
+ /* coverity treats unchecked argv as "tainted" */
+ if (!argv[c + 1] || strlen(argv[c + 1]) > 1024)
+ return NULL;
+ return argv[c + 1];
+ }
+
+ return argv[c] + n;
+ }
+ }
+
+ return NULL;
+}
+
+#ifdef LWS_WITH_SERVER_STATUS
+
+LWS_EXTERN int
+lws_json_dump_vhost(const struct lws_vhost *vh, char *buf, int len)
+{
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ static const char * const prots[] = {
+ "http://",
+ "https://",
+ "file://",
+ "cgi://",
+ ">http://",
+ ">https://",
+ "callback://"
+ };
+#endif
+ char *orig = buf, *end = buf + len - 1, first = 1;
+ int n = 0;
+
+ if (len < 100)
+ return 0;
+
+ buf += lws_snprintf(buf, end - buf,
+ "{\n \"name\":\"%s\",\n"
+ " \"port\":\"%d\",\n"
+ " \"use_ssl\":\"%d\",\n"
+ " \"sts\":\"%d\",\n"
+ " \"rx\":\"%llu\",\n"
+ " \"tx\":\"%llu\",\n"
+ " \"h1_conn\":\"%lu\",\n"
+ " \"h1_trans\":\"%lu\",\n"
+ " \"h2_trans\":\"%lu\",\n"
+ " \"ws_upg\":\"%lu\",\n"
+ " \"rejected\":\"%lu\",\n"
+ " \"h2_upg\":\"%lu\",\n"
+ " \"h2_alpn\":\"%lu\",\n"
+ " \"h2_subs\":\"%lu\""
+ ,
+ vh->name, vh->listen_port,
+#if defined(LWS_WITH_TLS)
+ vh->tls.use_ssl & LCCSCF_USE_SSL,
+#else
+ 0,
+#endif
+ !!(vh->options & LWS_SERVER_OPTION_STS),
+ vh->conn_stats.rx, vh->conn_stats.tx,
+ vh->conn_stats.h1_conn,
+ vh->conn_stats.h1_trans,
+ vh->conn_stats.h2_trans,
+ vh->conn_stats.ws_upg,
+ vh->conn_stats.rejected,
+ vh->conn_stats.h2_upg,
+ vh->conn_stats.h2_alpn,
+ vh->conn_stats.h2_subs
+ );
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ if (vh->http.mount_list) {
+ const struct lws_http_mount *m = vh->http.mount_list;
+
+ buf += lws_snprintf(buf, end - buf, ",\n \"mounts\":[");
+ while (m) {
+ if (!first)
+ buf += lws_snprintf(buf, end - buf, ",");
+ buf += lws_snprintf(buf, end - buf,
+ "\n {\n \"mountpoint\":\"%s\",\n"
+ " \"origin\":\"%s%s\",\n"
+ " \"cache_max_age\":\"%d\",\n"
+ " \"cache_reuse\":\"%d\",\n"
+ " \"cache_revalidate\":\"%d\",\n"
+ " \"cache_intermediaries\":\"%d\"\n"
+ ,
+ m->mountpoint,
+ prots[m->origin_protocol],
+ m->origin,
+ m->cache_max_age,
+ m->cache_reusable,
+ m->cache_revalidate,
+ m->cache_intermediaries);
+ if (m->def)
+ buf += lws_snprintf(buf, end - buf,
+ ",\n \"default\":\"%s\"",
+ m->def);
+ buf += lws_snprintf(buf, end - buf, "\n }");
+ first = 0;
+ m = m->mount_next;
+ }
+ buf += lws_snprintf(buf, end - buf, "\n ]");
+ }
+#endif
+ if (vh->protocols) {
+ n = 0;
+ first = 1;
+
+ buf += lws_snprintf(buf, end - buf, ",\n \"ws-protocols\":[");
+ while (n < vh->count_protocols) {
+ if (!first)
+ buf += lws_snprintf(buf, end - buf, ",");
+ buf += lws_snprintf(buf, end - buf,
+ "\n {\n \"%s\":{\n"
+ " \"status\":\"ok\"\n }\n }"
+ ,
+ vh->protocols[n].name);
+ first = 0;
+ n++;
+ }
+ buf += lws_snprintf(buf, end - buf, "\n ]");
+ }
+
+ buf += lws_snprintf(buf, end - buf, "\n}");
+
+ return buf - orig;
+}
+
+
+LWS_EXTERN LWS_VISIBLE int
+lws_json_dump_context(const struct lws_context *context, char *buf, int len,
+ int hide_vhosts)
+{
+ char *orig = buf, *end = buf + len - 1, first = 1;
+ const struct lws_vhost *vh = context->vhost_list;
+ const struct lws_context_per_thread *pt;
+ time_t t = time(NULL);
+ int n, listening = 0, cgi_count = 0;
+ struct lws_conn_stats cs;
+ double d = 0;
+#ifdef LWS_WITH_CGI
+ struct lws_cgi * const *pcgi;
+#endif
+
+#ifdef LWS_WITH_LIBUV
+ uv_uptime(&d);
+#endif
+
+ buf += lws_snprintf(buf, end - buf, "{ "
+ "\"version\":\"%s\",\n"
+ "\"uptime\":\"%ld\",\n",
+ lws_get_library_version(),
+ (long)d);
+
+#ifdef LWS_HAVE_GETLOADAVG
+ {
+ double d[3];
+ int m;
+
+ m = getloadavg(d, 3);
+ for (n = 0; n < m; n++) {
+ buf += lws_snprintf(buf, end - buf,
+ "\"l%d\":\"%.2f\",\n",
+ n + 1, d[n]);
+ }
+ }
+#endif
+
+ buf += lws_snprintf(buf, end - buf, "\"contexts\":[\n");
+
+ buf += lws_snprintf(buf, end - buf, "{ "
+ "\"context_uptime\":\"%ld\",\n"
+ "\"cgi_spawned\":\"%d\",\n"
+ "\"pt_fd_max\":\"%d\",\n"
+ "\"ah_pool_max\":\"%d\",\n"
+ "\"deprecated\":\"%d\",\n"
+ "\"wsi_alive\":\"%d\",\n",
+ (unsigned long)(t - context->time_up),
+ context->count_cgi_spawned,
+ context->fd_limit_per_thread,
+ context->max_http_header_pool,
+ context->deprecated,
+ context->count_wsi_allocated);
+
+ buf += lws_snprintf(buf, end - buf, "\"pt\":[\n ");
+ for (n = 0; n < context->count_threads; n++) {
+ pt = &context->pt[n];
+ if (n)
+ buf += lws_snprintf(buf, end - buf, ",");
+ buf += lws_snprintf(buf, end - buf,
+ "\n {\n"
+ " \"fds_count\":\"%d\",\n"
+ " \"ah_pool_inuse\":\"%d\",\n"
+ " \"ah_wait_list\":\"%d\"\n"
+ " }",
+ pt->fds_count,
+ pt->http.ah_count_in_use,
+ pt->http.ah_wait_list_length);
+ }
+
+ buf += lws_snprintf(buf, end - buf, "]");
+
+ buf += lws_snprintf(buf, end - buf, ", \"vhosts\":[\n ");
+
+ first = 1;
+ vh = context->vhost_list;
+ listening = 0;
+ cs = context->conn_stats;
+ lws_sum_stats(context, &cs);
+ while (vh) {
+
+ if (!hide_vhosts) {
+ if (!first)
+ if(buf != end)
+ *buf++ = ',';
+ buf += lws_json_dump_vhost(vh, buf, end - buf);
+ first = 0;
+ }
+ if (vh->lserv_wsi)
+ listening++;
+ vh = vh->vhost_next;
+ }
+
+ buf += lws_snprintf(buf, end - buf,
+ "],\n\"listen_wsi\":\"%d\",\n"
+ " \"rx\":\"%llu\",\n"
+ " \"tx\":\"%llu\",\n"
+ " \"h1_conn\":\"%lu\",\n"
+ " \"h1_trans\":\"%lu\",\n"
+ " \"h2_trans\":\"%lu\",\n"
+ " \"ws_upg\":\"%lu\",\n"
+ " \"rejected\":\"%lu\",\n"
+ " \"h2_alpn\":\"%lu\",\n"
+ " \"h2_subs\":\"%lu\",\n"
+ " \"h2_upg\":\"%lu\"",
+ listening, cs.rx, cs.tx,
+ cs.h1_conn,
+ cs.h1_trans,
+ cs.h2_trans,
+ cs.ws_upg,
+ cs.rejected,
+ cs.h2_alpn,
+ cs.h2_subs,
+ cs.h2_upg);
+
+#ifdef LWS_WITH_CGI
+ for (n = 0; n < context->count_threads; n++) {
+ pt = &context->pt[n];
+ pcgi = &pt->http.cgi_list;
+
+ while (*pcgi) {
+ pcgi = &(*pcgi)->cgi_list;
+
+ cgi_count++;
+ }
+ }
+#endif
+ buf += lws_snprintf(buf, end - buf, ",\n \"cgi_alive\":\"%d\"\n ",
+ cgi_count);
+
+ buf += lws_snprintf(buf, end - buf, "}");
+
+
+ buf += lws_snprintf(buf, end - buf, "]}\n ");
+
+ return buf - orig;
+}
+
+#endif
+
+#if defined(LWS_WITH_STATS)
+
+LWS_VISIBLE LWS_EXTERN uint64_t
+lws_stats_get(struct lws_context *context, int index)
+{
+ if (index >= LWSSTATS_SIZE)
+ return 0;
+
+ return context->lws_stats[index];
+}
+
+LWS_VISIBLE LWS_EXTERN void
+lws_stats_log_dump(struct lws_context *context)
+{
+ struct lws_vhost *v = context->vhost_list;
+ int n, m;
+
+ (void)m;
+
+ if (!context->updated)
+ return;
+
+ context->updated = 0;
+
+ lwsl_notice("\n");
+ lwsl_notice("LWS internal statistics dump ----->\n");
+ lwsl_notice("LWSSTATS_C_CONNECTIONS: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_CONNECTIONS));
+ lwsl_notice("LWSSTATS_C_API_CLOSE: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_API_CLOSE));
+ lwsl_notice("LWSSTATS_C_API_READ: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_API_READ));
+ lwsl_notice("LWSSTATS_C_API_LWS_WRITE: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_API_LWS_WRITE));
+ lwsl_notice("LWSSTATS_C_API_WRITE: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_API_WRITE));
+ lwsl_notice("LWSSTATS_C_WRITE_PARTIALS: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_WRITE_PARTIALS));
+ lwsl_notice("LWSSTATS_C_WRITEABLE_CB_REQ: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_WRITEABLE_CB_REQ));
+ lwsl_notice("LWSSTATS_C_WRITEABLE_CB_EFF_REQ: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_WRITEABLE_CB_EFF_REQ));
+ lwsl_notice("LWSSTATS_C_WRITEABLE_CB: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_WRITEABLE_CB));
+ lwsl_notice("LWSSTATS_C_SSL_CONNECTIONS_ACCEPT_SPIN: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_SSL_CONNECTIONS_ACCEPT_SPIN));
+ lwsl_notice("LWSSTATS_C_SSL_CONNECTIONS_FAILED: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_SSL_CONNECTIONS_FAILED));
+ lwsl_notice("LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED));
+ lwsl_notice("LWSSTATS_C_SSL_CONNS_HAD_RX: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_SSL_CONNS_HAD_RX));
+ lwsl_notice("LWSSTATS_C_PEER_LIMIT_AH_DENIED: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_PEER_LIMIT_AH_DENIED));
+ lwsl_notice("LWSSTATS_C_PEER_LIMIT_WSI_DENIED: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_PEER_LIMIT_WSI_DENIED));
+
+ lwsl_notice("LWSSTATS_C_TIMEOUTS: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_TIMEOUTS));
+ lwsl_notice("LWSSTATS_C_SERVICE_ENTRY: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_C_SERVICE_ENTRY));
+ lwsl_notice("LWSSTATS_B_READ: %8llu\n",
+ (unsigned long long)lws_stats_get(context, LWSSTATS_B_READ));
+ lwsl_notice("LWSSTATS_B_WRITE: %8llu\n",
+ (unsigned long long)lws_stats_get(context, LWSSTATS_B_WRITE));
+ lwsl_notice("LWSSTATS_B_PARTIALS_ACCEPTED_PARTS: %8llu\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_B_PARTIALS_ACCEPTED_PARTS));
+ lwsl_notice("LWSSTATS_MS_SSL_CONNECTIONS_ACCEPTED_DELAY: %8llums\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_MS_SSL_CONNECTIONS_ACCEPTED_DELAY) / 1000);
+ if (lws_stats_get(context, LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED))
+ lwsl_notice(" Avg accept delay: %8llums\n",
+ (unsigned long long)(lws_stats_get(context,
+ LWSSTATS_MS_SSL_CONNECTIONS_ACCEPTED_DELAY) /
+ lws_stats_get(context,
+ LWSSTATS_C_SSL_CONNECTIONS_ACCEPTED)) / 1000);
+ lwsl_notice("LWSSTATS_MS_SSL_RX_DELAY: %8llums\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_MS_SSL_RX_DELAY) / 1000);
+ if (lws_stats_get(context, LWSSTATS_C_SSL_CONNS_HAD_RX))
+ lwsl_notice(" Avg accept-rx delay: %8llums\n",
+ (unsigned long long)(lws_stats_get(context,
+ LWSSTATS_MS_SSL_RX_DELAY) /
+ lws_stats_get(context,
+ LWSSTATS_C_SSL_CONNS_HAD_RX)) / 1000);
+
+ lwsl_notice("LWSSTATS_MS_WRITABLE_DELAY: %8lluus\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_MS_WRITABLE_DELAY));
+ lwsl_notice("LWSSTATS_MS_WORST_WRITABLE_DELAY: %8lluus\n",
+ (unsigned long long)lws_stats_get(context,
+ LWSSTATS_MS_WORST_WRITABLE_DELAY));
+ if (lws_stats_get(context, LWSSTATS_C_WRITEABLE_CB))
+ lwsl_notice(" Avg writable delay: %8lluus\n",
+ (unsigned long long)(lws_stats_get(context,
+ LWSSTATS_MS_WRITABLE_DELAY) /
+ lws_stats_get(context, LWSSTATS_C_WRITEABLE_CB)));
+ lwsl_notice("Simultaneous SSL restriction: %8d/%d\n",
+ context->simultaneous_ssl,
+ context->simultaneous_ssl_restriction);
+
+ lwsl_notice("Live wsi: %8d\n",
+ context->count_wsi_allocated);
+
+ context->updated = 1;
+
+ while (v) {
+ if (v->lserv_wsi &&
+ v->lserv_wsi->position_in_fds_table != LWS_NO_FDS_POS) {
+
+ struct lws_context_per_thread *pt =
+ &context->pt[(int)v->lserv_wsi->tsi];
+ struct lws_pollfd *pfd;
+
+ pfd = &pt->fds[v->lserv_wsi->position_in_fds_table];
+
+ lwsl_notice(" Listen port %d actual POLLIN: %d\n",
+ v->listen_port,
+ (int)pfd->events & LWS_POLLIN);
+ }
+
+ v = v->vhost_next;
+ }
+
+ for (n = 0; n < context->count_threads; n++) {
+ struct lws_context_per_thread *pt = &context->pt[n];
+ struct lws *wl;
+ int m = 0;
+
+ lwsl_notice("PT %d\n", n + 1);
+
+ lws_pt_lock(pt, __func__);
+
+ lwsl_notice(" AH in use / max: %d / %d\n",
+ pt->http.ah_count_in_use,
+ context->max_http_header_pool);
+
+ wl = pt->http.ah_wait_list;
+ while (wl) {
+ m++;
+ wl = wl->http.ah_wait_list;
+ }
+
+ lwsl_notice(" AH wait list count / actual: %d / %d\n",
+ pt->http.ah_wait_list_length, m);
+
+ lws_pt_unlock(pt);
+ }
+
+#if defined(LWS_WITH_PEER_LIMITS)
+ m = 0;
+ for (n = 0; n < (int)context->pl_hash_elements; n++) {
+ lws_start_foreach_llp(struct lws_peer **, peer,
+ context->pl_hash_table[n]) {
+ m++;
+ } lws_end_foreach_llp(peer, next);
+ }
+
+ lwsl_notice(" Peers: total active %d\n", m);
+ if (m > 10) {
+ m = 10;
+ lwsl_notice(" (showing 10 peers only)\n");
+ }
+
+ if (m) {
+ for (n = 0; n < (int)context->pl_hash_elements; n++) {
+ char buf[72];
+
+ lws_start_foreach_llp(struct lws_peer **, peer,
+ context->pl_hash_table[n]) {
+ struct lws_peer *df = *peer;
+
+ if (!lws_plat_inet_ntop(df->af, df->addr, buf,
+ sizeof(buf) - 1))
+ strcpy(buf, "unknown");
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ lwsl_notice(" peer %s: count wsi: %d, count ah: %d\n",
+ buf, df->count_wsi,
+ df->http.count_ah);
+#else
+ lwsl_notice(" peer %s: count wsi: %d\n",
+ buf, df->count_wsi);
+#endif
+
+ if (!--m)
+ break;
+ } lws_end_foreach_llp(peer, next);
+ }
+ }
+#endif
+
+ lwsl_notice("\n");
+}
+
+void
+lws_stats_atomic_bump(struct lws_context * context,
+ struct lws_context_per_thread *pt, int index, uint64_t bump)
+{
+ lws_pt_stats_lock(pt);
+ context->lws_stats[index] += bump;
+ if (index != LWSSTATS_C_SERVICE_ENTRY)
+ context->updated = 1;
+ lws_pt_stats_unlock(pt);
+}
+
+void
+lws_stats_atomic_max(struct lws_context * context,
+ struct lws_context_per_thread *pt, int index, uint64_t val)
+{
+ lws_pt_stats_lock(pt);
+ if (val > context->lws_stats[index]) {
+ context->lws_stats[index] = val;
+ context->updated = 1;
+ }
+ lws_pt_stats_unlock(pt);
+}
+
+#endif
+
diff --git a/thirdparty/libwebsockets/core/output.c b/thirdparty/libwebsockets/core/output.c
new file mode 100644
index 0000000000..11965a06b9
--- /dev/null
+++ b/thirdparty/libwebsockets/core/output.c
@@ -0,0 +1,320 @@
+/*
+ * libwebsockets - small server side websockets and web server implementation
+ *
+ * Copyright (C) 2010-2017 Andy Green <andy@warmcat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation:
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include "core/private.h"
+
+/*
+ * notice this returns number of bytes consumed, or -1
+ */
+int lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len)
+{
+ struct lws_context *context = lws_get_context(wsi);
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ size_t real_len = len;
+ unsigned int n;
+
+ // lwsl_hexdump_err(buf, len);
+
+ /*
+ * Detect if we got called twice without going through the
+ * event loop to handle pending. This would be caused by either
+ * back-to-back writes in one WRITABLE (illegal) or calling lws_write()
+ * from outside the WRITABLE callback (illegal).
+ */
+ if (wsi->could_have_pending) {
+ lwsl_hexdump_level(LLL_ERR, buf, len);
+ lwsl_err("** %p: vh: %s, prot: %s, role %s: "
+ "Illegal back-to-back write of %lu detected...\n",
+ wsi, wsi->vhost->name, wsi->protocol->name,
+ wsi->role_ops->name,
+ (unsigned long)len);
+ // assert(0);
+
+ return -1;
+ }
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_WRITE, 1);
+
+ if (!len)
+ return 0;
+ /* just ignore sends after we cleared the truncation buffer */
+ if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE && !wsi->trunc_len)
+ return (int)len;
+
+ if (wsi->trunc_len && (buf < wsi->trunc_alloc ||
+ buf > (wsi->trunc_alloc + wsi->trunc_len + wsi->trunc_offset))) {
+ lwsl_hexdump_level(LLL_ERR, buf, len);
+ lwsl_err("** %p: vh: %s, prot: %s, Sending new %lu, pending truncated ...\n"
+ " It's illegal to do an lws_write outside of\n"
+ " the writable callback: fix your code\n",
+ wsi, wsi->vhost->name, wsi->protocol->name,
+ (unsigned long)len);
+ assert(0);
+
+ return -1;
+ }
+
+ if (!wsi->http2_substream && !lws_socket_is_valid(wsi->desc.sockfd))
+ lwsl_warn("** error invalid sock but expected to send\n");
+
+ /* limit sending */
+ if (wsi->protocol->tx_packet_size)
+ n = (int)wsi->protocol->tx_packet_size;
+ else {
+ n = (int)wsi->protocol->rx_buffer_size;
+ if (!n)
+ n = context->pt_serv_buf_size;
+ }
+ n += LWS_PRE + 4;
+ if (n > len)
+ n = (int)len;
+
+ /* nope, send it on the socket directly */
+ lws_latency_pre(context, wsi);
+ n = lws_ssl_capable_write(wsi, buf, n);
+ lws_latency(context, wsi, "send lws_issue_raw", n, n == len);
+
+ /* something got written, it can have been truncated now */
+ wsi->could_have_pending = 1;
+
+ switch (n) {
+ case LWS_SSL_CAPABLE_ERROR:
+ /* we're going to close, let close know sends aren't possible */
+ wsi->socket_is_permanently_unusable = 1;
+ return -1;
+ case LWS_SSL_CAPABLE_MORE_SERVICE:
+ /*
+ * nothing got sent, not fatal. Retry the whole thing later,
+ * ie, implying treat it was a truncated send so it gets
+ * retried
+ */
+ n = 0;
+ break;
+ }
+
+ /*
+ * we were already handling a truncated send?
+ */
+ if (wsi->trunc_len) {
+ lwsl_info("%p partial adv %d (vs %ld)\n", wsi, n, (long)real_len);
+ wsi->trunc_offset += n;
+ wsi->trunc_len -= n;
+
+ if (!wsi->trunc_len) {
+ lwsl_info("** %p partial send completed\n", wsi);
+ /* done with it, but don't free it */
+ n = (int)real_len;
+ if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
+ lwsl_info("** %p signalling to close now\n", wsi);
+ return -1; /* retry closing now */
+ }
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+#if !defined(LWS_WITHOUT_SERVER)
+ if (wsi->http.deferred_transaction_completed) {
+ lwsl_notice("%s: partial completed, doing "
+ "deferred transaction completed\n",
+ __func__);
+ wsi->http.deferred_transaction_completed = 0;
+ return lws_http_transaction_completed(wsi);
+ }
+#endif
+#endif
+ }
+ /* always callback on writeable */
+ lws_callback_on_writable(wsi);
+
+ return n;
+ }
+
+ if ((unsigned int)n == real_len)
+ /* what we just sent went out cleanly */
+ return n;
+
+ /*
+ * Newly truncated send. Buffer the remainder (it will get
+ * first priority next time the socket is writable).
+ */
+ lwsl_debug("%p new partial sent %d from %lu total\n", wsi, n,
+ (unsigned long)real_len);
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITE_PARTIALS, 1);
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_B_PARTIALS_ACCEPTED_PARTS, n);
+
+ /*
+ * - if we still have a suitable malloc lying around, use it
+ * - or, if too small, reallocate it
+ * - or, if no buffer, create it
+ */
+ if (!wsi->trunc_alloc || real_len - n > wsi->trunc_alloc_len) {
+ lws_free(wsi->trunc_alloc);
+
+ wsi->trunc_alloc_len = (unsigned int)(real_len - n);
+ wsi->trunc_alloc = lws_malloc(real_len - n,
+ "truncated send alloc");
+ if (!wsi->trunc_alloc) {
+ lwsl_err("truncated send: unable to malloc %lu\n",
+ (unsigned long)(real_len - n));
+ return -1;
+ }
+ }
+ wsi->trunc_offset = 0;
+ wsi->trunc_len = (unsigned int)(real_len - n);
+ memcpy(wsi->trunc_alloc, buf + n, real_len - n);
+
+#if !defined(LWS_WITH_ESP32)
+ if (lws_wsi_is_udp(wsi)) {
+ /* stash original destination for fulfilling UDP partials */
+ wsi->udp->sa_pending = wsi->udp->sa;
+ wsi->udp->salen_pending = wsi->udp->salen;
+ }
+#endif
+
+ /* since something buffered, force it to get another chance to send */
+ lws_callback_on_writable(wsi);
+
+ return (int)real_len;
+}
+
+LWS_VISIBLE int lws_write(struct lws *wsi, unsigned char *buf, size_t len,
+ enum lws_write_protocol wp)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+
+ if (wsi->parent_carries_io) {
+ struct lws_write_passthru pas;
+
+ pas.buf = buf;
+ pas.len = len;
+ pas.wp = wp;
+ pas.wsi = wsi;
+
+ if (wsi->parent->protocol->callback(wsi->parent,
+ LWS_CALLBACK_CHILD_WRITE_VIA_PARENT,
+ wsi->parent->user_space,
+ (void *)&pas, 0))
+ return 1;
+
+ return (int)len;
+ }
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_LWS_WRITE, 1);
+
+ if ((int)len < 0) {
+ lwsl_err("%s: suspicious len int %d, ulong %lu\n", __func__,
+ (int)len, (unsigned long)len);
+ return -1;
+ }
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_B_WRITE, len);
+
+#ifdef LWS_WITH_ACCESS_LOG
+ wsi->http.access_log.sent += len;
+#endif
+ if (wsi->vhost)
+ wsi->vhost->conn_stats.tx += len;
+
+ assert(wsi->role_ops);
+ if (!wsi->role_ops->write_role_protocol)
+ return lws_issue_raw(wsi, buf, len);
+
+ return wsi->role_ops->write_role_protocol(wsi, buf, len, &wp);
+}
+
+LWS_VISIBLE int
+lws_ssl_capable_read_no_ssl(struct lws *wsi, unsigned char *buf, int len)
+{
+ struct lws_context *context = wsi->context;
+ struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
+ int n = 0;
+
+ lws_stats_atomic_bump(context, pt, LWSSTATS_C_API_READ, 1);
+
+ if (lws_wsi_is_udp(wsi)) {
+#if !defined(LWS_WITH_ESP32)
+ wsi->udp->salen = sizeof(wsi->udp->sa);
+ n = recvfrom(wsi->desc.sockfd, (char *)buf, len, 0,
+ &wsi->udp->sa, &wsi->udp->salen);
+#endif
+ } else
+ n = recv(wsi->desc.sockfd, (char *)buf, len, 0);
+
+ if (n >= 0) {
+ if (wsi->vhost)
+ wsi->vhost->conn_stats.rx += n;
+ lws_stats_atomic_bump(context, pt, LWSSTATS_B_READ, n);
+
+ return n;
+ }
+
+ if (LWS_ERRNO == LWS_EAGAIN ||
+ LWS_ERRNO == LWS_EWOULDBLOCK ||
+ LWS_ERRNO == LWS_EINTR)
+ return LWS_SSL_CAPABLE_MORE_SERVICE;
+
+ lwsl_notice("error on reading from skt : %d\n", LWS_ERRNO);
+ return LWS_SSL_CAPABLE_ERROR;
+}
+
+LWS_VISIBLE int
+lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, int len)
+{
+ int n = 0;
+
+ if (lws_wsi_is_udp(wsi)) {
+#if !defined(LWS_WITH_ESP32)
+ if (wsi->trunc_len)
+ n = sendto(wsi->desc.sockfd, buf, len, 0, &wsi->udp->sa_pending, wsi->udp->salen_pending);
+ else
+ n = sendto(wsi->desc.sockfd, buf, len, 0, &wsi->udp->sa, wsi->udp->salen);
+#endif
+ } else
+ n = send(wsi->desc.sockfd, (char *)buf, len, MSG_NOSIGNAL);
+// lwsl_info("%s: sent len %d result %d", __func__, len, n);
+ if (n >= 0)
+ return n;
+
+ if (LWS_ERRNO == LWS_EAGAIN ||
+ LWS_ERRNO == LWS_EWOULDBLOCK ||
+ LWS_ERRNO == LWS_EINTR) {
+ if (LWS_ERRNO == LWS_EWOULDBLOCK) {
+ lws_set_blocking_send(wsi);
+ }
+
+ return LWS_SSL_CAPABLE_MORE_SERVICE;
+ }
+
+ lwsl_debug("ERROR writing len %d to skt fd %d err %d / errno %d\n",
+ len, wsi->desc.sockfd, n, LWS_ERRNO);
+
+ return LWS_SSL_CAPABLE_ERROR;
+}
+
+LWS_VISIBLE int
+lws_ssl_pending_no_ssl(struct lws *wsi)
+{
+ (void)wsi;
+#if defined(LWS_WITH_ESP32)
+ return 100;
+#else
+ return 0;
+#endif
+}
diff --git a/thirdparty/libwebsockets/core/pollfd.c b/thirdparty/libwebsockets/core/pollfd.c
new file mode 100644
index 0000000000..2a632ce8ec
--- /dev/null
+++ b/thirdparty/libwebsockets/core/pollfd.c
@@ -0,0 +1,616 @@
+/*
+ * libwebsockets - small server side websockets and web server implementation
+ *
+ * Copyright (C) 2010-2017 Andy Green <andy@warmcat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation:
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include "core/private.h"
+
+int
+_lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa)
+{
+#if !defined(LWS_WITH_LIBUV) && !defined(LWS_WITH_LIBEV) && !defined(LWS_WITH_LIBEVENT)
+ volatile struct lws_context_per_thread *vpt;
+#endif
+ struct lws_context_per_thread *pt;
+ struct lws_context *context;
+ int ret = 0, pa_events = 1;
+ struct lws_pollfd *pfd;
+ int sampled_tid, tid;
+
+ if (!wsi)
+ return 0;
+
+ assert(wsi->position_in_fds_table == LWS_NO_FDS_POS ||
+ wsi->position_in_fds_table >= 0);
+
+ if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
+ return 0;
+
+ if (((volatile struct lws *)wsi)->handling_pollout &&
+ !_and && _or == LWS_POLLOUT) {
+ /*
+ * Happening alongside service thread handling POLLOUT.
+ * The danger is when he is finished, he will disable POLLOUT,
+ * countermanding what we changed here.
+ *
+ * Instead of changing the fds, inform the service thread
+ * what happened, and ask it to leave POLLOUT active on exit
+ */
+ ((volatile struct lws *)wsi)->leave_pollout_active = 1;
+ /*
+ * by definition service thread is not in poll wait, so no need
+ * to cancel service
+ */
+
+ lwsl_debug("%s: using leave_pollout_active\n", __func__);
+
+ return 0;
+ }
+
+ context = wsi->context;
+ pt = &context->pt[(int)wsi->tsi];
+
+ assert(wsi->position_in_fds_table < (int)pt->fds_count);
+
+#if !defined(LWS_WITH_LIBUV) && \
+ !defined(LWS_WITH_LIBEV) && \
+ !defined(LWS_WITH_LIBEVENT)
+ /*
+ * This only applies when we use the default poll() event loop.
+ *
+ * BSD can revert pa->events at any time, when the kernel decides to
+ * exit from poll(). We can't protect against it using locking.
+ *
+ * Therefore we must check first if the service thread is in poll()
+ * wait; if so, we know we must be being called from a foreign thread,
+ * and we must keep a strictly ordered list of changes we made instead
+ * of trying to apply them, since when poll() exits, which may happen
+ * at any time it would revert our changes.
+ *
+ * The plat code will apply them when it leaves the poll() wait
+ * before doing anything else.
+ */
+
+ vpt = (volatile struct lws_context_per_thread *)pt;
+
+ vpt->foreign_spinlock = 1;
+ lws_memory_barrier();
+
+ if (vpt->inside_poll) {
+ struct lws_foreign_thread_pollfd *ftp, **ftp1;
+ /*
+ * We are certainly a foreign thread trying to change events
+ * while the service thread is in the poll() wait.
+ *
+ * Create a list of changes to be applied after poll() exit,
+ * instead of trying to apply them now.
+ */
+ ftp = lws_malloc(sizeof(*ftp), "ftp");
+ if (!ftp) {
+ vpt->foreign_spinlock = 0;
+ lws_memory_barrier();
+ ret = -1;
+ goto bail;
+ }
+
+ ftp->_and = _and;
+ ftp->_or = _or;
+ ftp->fd_index = wsi->position_in_fds_table;
+ ftp->next = NULL;
+
+ /* place at END of list to maintain order */
+ ftp1 = (struct lws_foreign_thread_pollfd **)
+ &vpt->foreign_pfd_list;
+ while (*ftp1)
+ ftp1 = &((*ftp1)->next);
+
+ *ftp1 = ftp;
+ vpt->foreign_spinlock = 0;
+ lws_memory_barrier();
+ lws_cancel_service_pt(wsi);
+
+ return 0;
+ }
+
+ vpt->foreign_spinlock = 0;
+ lws_memory_barrier();
+#endif
+
+ pfd = &pt->fds[wsi->position_in_fds_table];
+ pa->fd = wsi->desc.sockfd;
+ lwsl_debug("%s: wsi %p: fd %d events %d -> %d\n", __func__, wsi, pa->fd, pfd->events, (pfd->events & ~_and) | _or);
+ pa->prev_events = pfd->events;
+ pa->events = pfd->events = (pfd->events & ~_and) | _or;
+
+ if (wsi->http2_substream)
+ return 0;
+
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi,
+ LWS_CALLBACK_CHANGE_MODE_POLL_FD,
+ wsi->user_space, (void *)pa, 0)) {
+ ret = -1;
+ goto bail;
+ }
+
+ if (context->event_loop_ops->io) {
+ if (_and & LWS_POLLIN)
+ context->event_loop_ops->io(wsi,
+ LWS_EV_STOP | LWS_EV_READ);
+
+ if (_or & LWS_POLLIN)
+ context->event_loop_ops->io(wsi,
+ LWS_EV_START | LWS_EV_READ);
+
+ if (_and & LWS_POLLOUT)
+ context->event_loop_ops->io(wsi,
+ LWS_EV_STOP | LWS_EV_WRITE);
+
+ if (_or & LWS_POLLOUT)
+ context->event_loop_ops->io(wsi,
+ LWS_EV_START | LWS_EV_WRITE);
+ }
+
+ /*
+ * if we changed something in this pollfd...
+ * ... and we're running in a different thread context
+ * than the service thread...
+ * ... and the service thread is waiting ...
+ * then cancel it to force a restart with our changed events
+ */
+ pa_events = pa->prev_events != pa->events;
+
+ if (pa_events) {
+ if (lws_plat_change_pollfd(context, wsi, pfd)) {
+ lwsl_info("%s failed\n", __func__);
+ ret = -1;
+ goto bail;
+ }
+ sampled_tid = context->service_tid;
+ if (sampled_tid && wsi->vhost) {
+ tid = wsi->vhost->protocols[0].callback(wsi,
+ LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
+ if (tid == -1) {
+ ret = -1;
+ goto bail;
+ }
+ if (tid != sampled_tid)
+ lws_cancel_service_pt(wsi);
+ }
+ }
+
+bail:
+ return ret;
+}
+
+#ifndef LWS_NO_SERVER
+/*
+ * Enable or disable listen sockets on this pt globally...
+ * it's modulated according to the pt having space for a new accept.
+ */
+static void
+lws_accept_modulation(struct lws_context *context,
+ struct lws_context_per_thread *pt, int allow)
+{
+ struct lws_vhost *vh = context->vhost_list;
+ struct lws_pollargs pa1;
+
+ while (vh) {
+ if (vh->lserv_wsi) {
+ if (allow)
+ _lws_change_pollfd(vh->lserv_wsi,
+ 0, LWS_POLLIN, &pa1);
+ else
+ _lws_change_pollfd(vh->lserv_wsi,
+ LWS_POLLIN, 0, &pa1);
+ }
+ vh = vh->vhost_next;
+ }
+}
+#endif
+
+int
+__insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
+{
+ struct lws_pollargs pa = { wsi->desc.sockfd, LWS_POLLIN, 0 };
+ struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
+ int ret = 0;
+
+
+ lwsl_debug("%s: %p: tsi=%d, sock=%d, pos-in-fds=%d\n",
+ __func__, wsi, wsi->tsi, wsi->desc.sockfd, pt->fds_count);
+
+ if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) {
+ lwsl_err("Too many fds (%d vs %d)\n", context->max_fds,
+ context->fd_limit_per_thread );
+ return 1;
+ }
+
+#if !defined(_WIN32)
+ if (wsi->desc.sockfd - lws_plat_socket_offset() >= context->max_fds) {
+ lwsl_err("Socket fd %d is too high (%d) offset %d\n",
+ wsi->desc.sockfd, context->max_fds, lws_plat_socket_offset());
+ return 1;
+ }
+#endif
+
+ assert(wsi);
+ assert(wsi->event_pipe || wsi->vhost);
+ assert(lws_socket_is_valid(wsi->desc.sockfd));
+
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
+ wsi->user_space, (void *) &pa, 1))
+ return -1;
+
+ pt->count_conns++;
+ insert_wsi(context, wsi);
+ wsi->position_in_fds_table = pt->fds_count;
+
+ pt->fds[wsi->position_in_fds_table].fd = wsi->desc.sockfd;
+ pt->fds[wsi->position_in_fds_table].events = LWS_POLLIN;
+ pa.events = pt->fds[pt->fds_count].events;
+
+ lws_plat_insert_socket_into_fds(context, wsi);
+
+ /* external POLL support via protocol 0 */
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD,
+ wsi->user_space, (void *) &pa, 0))
+ ret = -1;
+#ifndef LWS_NO_SERVER
+ /* if no more room, defeat accepts on this thread */
+ if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1)
+ lws_accept_modulation(context, pt, 0);
+#endif
+
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
+ wsi->user_space, (void *)&pa, 1))
+ ret = -1;
+
+ return ret;
+}
+
+int
+__remove_wsi_socket_from_fds(struct lws *wsi)
+{
+ struct lws_context *context = wsi->context;
+ struct lws_pollargs pa = { wsi->desc.sockfd, 0, 0 };
+ struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
+ struct lws *end_wsi;
+ int v;
+ int m, ret = 0;
+
+ if (wsi->parent_carries_io) {
+ lws_same_vh_protocol_remove(wsi);
+ return 0;
+ }
+
+#if !defined(_WIN32)
+ if (wsi->desc.sockfd - lws_plat_socket_offset() > context->max_fds) {
+ lwsl_err("fd %d too high (%d)\n", wsi->desc.sockfd,
+ context->max_fds);
+ return 1;
+ }
+#endif
+
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
+ wsi->user_space, (void *)&pa, 1))
+ return -1;
+
+ lws_same_vh_protocol_remove(wsi);
+
+ /* the guy who is to be deleted's slot index in pt->fds */
+ m = wsi->position_in_fds_table;
+
+ /* these are the only valid possibilities for position_in_fds_table */
+ assert(m == LWS_NO_FDS_POS || (m >= 0 &&
+ (unsigned int)m < pt->fds_count));
+
+ if (context->event_loop_ops->io)
+ context->event_loop_ops->io(wsi,
+ LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE |
+ LWS_EV_PREPARE_DELETION);
+
+ lwsl_debug("%s: wsi=%p, sock=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
+ __func__, wsi, wsi->desc.sockfd, wsi->position_in_fds_table,
+ pt->fds_count, pt->fds[pt->fds_count].fd);
+
+ if (m != LWS_NO_FDS_POS) {
+
+ /* have the last guy take up the now vacant slot */
+ pt->fds[m] = pt->fds[pt->fds_count - 1];
+ /* this decrements pt->fds_count */
+ lws_plat_delete_socket_from_fds(context, wsi, m);
+ v = (int) pt->fds[m].fd;
+ /* end guy's "position in fds table" is now the deletion guy's old one */
+ end_wsi = wsi_from_fd(context, v);
+ if (!end_wsi) {
+ lwsl_err("no wsi for fd %d at pos %d, pt->fds_count=%d\n",
+ (int)pt->fds[m].fd, m, pt->fds_count);
+ assert(0);
+ } else
+ end_wsi->position_in_fds_table = m;
+
+ /* deletion guy's lws_lookup entry needs nuking */
+ delete_from_fd(context, wsi->desc.sockfd);
+
+ /* removed wsi has no position any more */
+ wsi->position_in_fds_table = LWS_NO_FDS_POS;
+ }
+
+ /* remove also from external POLL support via protocol 0 */
+ if (lws_socket_is_valid(wsi->desc.sockfd) && wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_DEL_POLL_FD,
+ wsi->user_space, (void *) &pa, 0))
+ ret = -1;
+
+#ifndef LWS_NO_SERVER
+ if (!context->being_destroyed &&
+ /* if this made some room, accept connects on this thread */
+ (unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
+ lws_accept_modulation(context, pt, 1);
+#endif
+
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
+ wsi->user_space, (void *) &pa, 1))
+ ret = -1;
+
+ return ret;
+}
+
+int
+__lws_change_pollfd(struct lws *wsi, int _and, int _or)
+{
+ struct lws_context *context;
+ struct lws_pollargs pa;
+ int ret = 0;
+
+ if (!wsi || (!wsi->protocol && !wsi->event_pipe) ||
+ wsi->position_in_fds_table == LWS_NO_FDS_POS)
+ return 0;
+
+ context = lws_get_context(wsi);
+ if (!context)
+ return 1;
+
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
+ wsi->user_space, (void *) &pa, 0))
+ return -1;
+
+ ret = _lws_change_pollfd(wsi, _and, _or, &pa);
+ if (wsi->vhost &&
+ wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
+ wsi->user_space, (void *) &pa, 0))
+ ret = -1;
+
+ return ret;
+}
+
+int
+lws_change_pollfd(struct lws *wsi, int _and, int _or)
+{
+ struct lws_context_per_thread *pt;
+ int ret = 0;
+
+ pt = &wsi->context->pt[(int)wsi->tsi];
+
+ lws_pt_lock(pt, __func__);
+ ret = __lws_change_pollfd(wsi, _and, _or);
+ lws_pt_unlock(pt);
+
+ return ret;
+}
+
+LWS_VISIBLE int
+lws_callback_on_writable(struct lws *wsi)
+{
+ struct lws_context_per_thread *pt;
+ int n;
+
+ if (lwsi_state(wsi) == LRS_SHUTDOWN)
+ return 0;
+
+ if (wsi->socket_is_permanently_unusable)
+ return 0;
+
+ pt = &wsi->context->pt[(int)wsi->tsi];
+
+ if (wsi->parent_carries_io) {
+#if defined(LWS_WITH_STATS)
+ if (!wsi->active_writable_req_us) {
+ wsi->active_writable_req_us = time_in_microseconds();
+ lws_stats_atomic_bump(wsi->context, pt,
+ LWSSTATS_C_WRITEABLE_CB_EFF_REQ, 1);
+ }
+#endif
+ n = lws_callback_on_writable(wsi->parent);
+ if (n < 0)
+ return n;
+
+ wsi->parent_pending_cb_on_writable = 1;
+ return 1;
+ }
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB_REQ, 1);
+#if defined(LWS_WITH_STATS)
+ if (!wsi->active_writable_req_us) {
+ wsi->active_writable_req_us = time_in_microseconds();
+ lws_stats_atomic_bump(wsi->context, pt,
+ LWSSTATS_C_WRITEABLE_CB_EFF_REQ, 1);
+ }
+#endif
+
+
+ if (wsi->role_ops->callback_on_writable) {
+ if (wsi->role_ops->callback_on_writable(wsi))
+ return 1;
+ wsi = lws_get_network_wsi(wsi);
+ }
+
+ if (wsi->position_in_fds_table == LWS_NO_FDS_POS) {
+ lwsl_debug("%s: failed to find socket %d\n", __func__,
+ wsi->desc.sockfd);
+ return -1;
+ }
+
+ if (__lws_change_pollfd(wsi, 0, LWS_POLLOUT))
+ return -1;
+
+ return 1;
+}
+
+
+/*
+ * stitch protocol choice into the vh protocol linked list
+ * We always insert ourselves at the start of the list
+ *
+ * X <-> B
+ * X <-> pAn <-> pB
+ *
+ * Illegal to attach more than once without detach inbetween
+ */
+void
+lws_same_vh_protocol_insert(struct lws *wsi, int n)
+{
+ if (wsi->same_vh_protocol_prev || wsi->same_vh_protocol_next) {
+ lws_same_vh_protocol_remove(wsi);
+ lwsl_notice("Attempted to attach wsi twice to same vh prot\n");
+ }
+
+ lws_vhost_lock(wsi->vhost);
+
+ wsi->same_vh_protocol_prev = &wsi->vhost->same_vh_protocol_list[n];
+ /* old first guy is our next */
+ wsi->same_vh_protocol_next = wsi->vhost->same_vh_protocol_list[n];
+ /* we become the new first guy */
+ wsi->vhost->same_vh_protocol_list[n] = wsi;
+
+ if (wsi->same_vh_protocol_next)
+ /* old first guy points back to us now */
+ wsi->same_vh_protocol_next->same_vh_protocol_prev =
+ &wsi->same_vh_protocol_next;
+
+ wsi->on_same_vh_list = 1;
+
+ lws_vhost_unlock(wsi->vhost);
+}
+
+void
+lws_same_vh_protocol_remove(struct lws *wsi)
+{
+ /*
+ * detach ourselves from vh protocol list if we're on one
+ * A -> B -> C
+ * A -> C , or, B -> C, or A -> B
+ *
+ * OK to call on already-detached wsi
+ */
+ lwsl_info("%s: removing same prot wsi %p\n", __func__, wsi);
+
+ if (!wsi->vhost || !wsi->on_same_vh_list)
+ return;
+
+ lws_vhost_lock(wsi->vhost);
+
+ if (wsi->same_vh_protocol_prev) {
+ assert (*(wsi->same_vh_protocol_prev) == wsi);
+ lwsl_info("have prev %p, setting him to our next %p\n",
+ wsi->same_vh_protocol_prev,
+ wsi->same_vh_protocol_next);
+
+ /* guy who pointed to us should point to our next */
+ *(wsi->same_vh_protocol_prev) = wsi->same_vh_protocol_next;
+ }
+
+ /* our next should point back to our prev */
+ if (wsi->same_vh_protocol_next)
+ wsi->same_vh_protocol_next->same_vh_protocol_prev =
+ wsi->same_vh_protocol_prev;
+
+ wsi->same_vh_protocol_prev = NULL;
+ wsi->same_vh_protocol_next = NULL;
+ wsi->on_same_vh_list = 0;
+
+ lws_vhost_unlock(wsi->vhost);
+}
+
+
+LWS_VISIBLE int
+lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost *vhost,
+ const struct lws_protocols *protocol)
+{
+ struct lws *wsi;
+
+ if (protocol < vhost->protocols ||
+ protocol >= (vhost->protocols + vhost->count_protocols)) {
+ lwsl_err("%s: protocol %p is not from vhost %p (%p - %p)\n",
+ __func__, protocol, vhost->protocols, vhost,
+ (vhost->protocols + vhost->count_protocols));
+
+ return -1;
+ }
+
+ wsi = vhost->same_vh_protocol_list[protocol - vhost->protocols];
+ while (wsi) {
+ assert(wsi->protocol == protocol);
+ assert(*wsi->same_vh_protocol_prev == wsi);
+ if (wsi->same_vh_protocol_next)
+ assert(wsi->same_vh_protocol_next->
+ same_vh_protocol_prev ==
+ &wsi->same_vh_protocol_next);
+
+ lws_callback_on_writable(wsi);
+ wsi = wsi->same_vh_protocol_next;
+ }
+
+ return 0;
+}
+
+LWS_VISIBLE int
+lws_callback_on_writable_all_protocol(const struct lws_context *context,
+ const struct lws_protocols *protocol)
+{
+ struct lws_vhost *vhost;
+ int n;
+
+ if (!context)
+ return 0;
+
+ vhost = context->vhost_list;
+
+ while (vhost) {
+ for (n = 0; n < vhost->count_protocols; n++)
+ if (protocol->callback ==
+ vhost->protocols[n].callback &&
+ !strcmp(protocol->name, vhost->protocols[n].name))
+ break;
+ if (n != vhost->count_protocols)
+ lws_callback_on_writable_all_protocol_vhost(
+ vhost, &vhost->protocols[n]);
+
+ vhost = vhost->vhost_next;
+ }
+
+ return 0;
+}
diff --git a/thirdparty/libwebsockets/core/private.h b/thirdparty/libwebsockets/core/private.h
new file mode 100644
index 0000000000..73748b0498
--- /dev/null
+++ b/thirdparty/libwebsockets/core/private.h
@@ -0,0 +1,1770 @@
+/*
+ * libwebsockets - small server side websockets and web server implementation
+ *
+ * Copyright (C) 2010 - 2018 Andy Green <andy@warmcat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation:
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include "lws_config.h"
+#include "lws_config_private.h"
+
+#if defined(LWS_WITH_CGI) && defined(LWS_HAVE_VFORK)
+ #define _GNU_SOURCE
+#endif
+
+#if defined(__COVERITY__) && !defined(LWS_COVERITY_WORKAROUND)
+ #define LWS_COVERITY_WORKAROUND
+ typedef float _Float32;
+ typedef float _Float64;
+ typedef float _Float128;
+ typedef float _Float32x;
+ typedef float _Float64x;
+ typedef float _Float128x;
+#endif
+
+#ifdef LWS_HAVE_SYS_TYPES_H
+ #include <sys/types.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <ctype.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#if defined(LWS_WITH_ESP32)
+ #define MSG_NOSIGNAL 0
+ #define SOMAXCONN 3
+#endif
+
+#define STORE_IN_ROM
+#include <assert.h>
+#if LWS_MAX_SMP > 1
+ #include <pthread.h>
+#endif
+
+#ifdef LWS_HAVE_SYS_STAT_H
+ #include <sys/stat.h>
+#endif
+
+#if defined(WIN32) || defined(_WIN32)
+
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+
+ #if (WINVER < 0x0501)
+ #undef WINVER
+ #undef _WIN32_WINNT
+ #define WINVER 0x0501
+ #define _WIN32_WINNT WINVER
+ #endif
+
+ #define LWS_NO_DAEMONIZE
+ #define LWS_ERRNO WSAGetLastError()
+ #define LWS_EAGAIN WSAEWOULDBLOCK
+ #define LWS_EALREADY WSAEALREADY
+ #define LWS_EINPROGRESS WSAEINPROGRESS
+ #define LWS_EINTR WSAEINTR
+ #define LWS_EISCONN WSAEISCONN
+ #define LWS_EWOULDBLOCK WSAEWOULDBLOCK
+ #define MSG_NOSIGNAL 0
+ #define SHUT_RDWR SD_BOTH
+ #define SOL_TCP IPPROTO_TCP
+ #define SHUT_WR SD_SEND
+
+ #define compatible_close(fd) closesocket(fd)
+ #define lws_set_blocking_send(wsi) wsi->sock_send_blocking = 1
+ #define LWS_SOCK_INVALID (INVALID_SOCKET)
+
+ #include <winsock2.h>
+ #include <ws2tcpip.h>
+ #include <windows.h>
+ #include <tchar.h>
+ #ifdef LWS_HAVE_IN6ADDR_H
+ #include <in6addr.h>
+ #endif
+ #include <mstcpip.h>
+ #include <io.h>
+
+ #if !defined(LWS_HAVE_ATOLL)
+ #if defined(LWS_HAVE__ATOI64)
+ #define atoll _atoi64
+ #else
+ #warning No atoll or _atoi64 available, using atoi
+ #define atoll atoi
+ #endif
+ #endif
+
+ #ifndef __func__
+ #define __func__ __FUNCTION__
+ #endif
+
+ #ifdef LWS_HAVE__VSNPRINTF
+ #define vsnprintf _vsnprintf
+ #endif
+
+ /* we don't have an implementation for this on windows... */
+ int kill(int pid, int sig);
+ int fork(void);
+ #ifndef SIGINT
+ #define SIGINT 2
+ #endif
+
+#else /* not windows --> */
+
+ #include <fcntl.h>
+ #include <strings.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+
+ #ifndef __cplusplus
+ #include <errno.h>
+ #endif
+ #include <netdb.h>
+ #include <signal.h>
+ #include <sys/socket.h>
+
+ #if defined(LWS_BUILTIN_GETIFADDRS)
+ #include "./misc/getifaddrs.h"
+ #else
+ #if !defined(LWS_WITH_ESP32)
+ #if defined(__HAIKU__)
+ #define _BSD_SOURCE
+ #endif
+ #include <ifaddrs.h>
+ #endif
+ #endif
+ #if defined (__ANDROID__)
+ #include <syslog.h>
+ #include <sys/resource.h>
+ #elif defined (__sun) || defined(__HAIKU__) || defined(__QNX__)
+ #include <syslog.h>
+ #else
+ #if !defined(LWS_WITH_ESP32)
+ #include <sys/syslog.h>
+ #endif
+ #endif
+ #include <netdb.h>
+ #if !defined(LWS_WITH_ESP32)
+ #include <sys/mman.h>
+ #include <sys/un.h>
+ #include <netinet/in.h>
+ #include <netinet/tcp.h>
+ #include <arpa/inet.h>
+ #include <poll.h>
+ #endif
+ #ifndef LWS_NO_FORK
+ #ifdef LWS_HAVE_SYS_PRCTL_H
+ #include <sys/prctl.h>
+ #endif
+ #endif
+
+ #include <sys/time.h>
+
+ #define LWS_ERRNO errno
+ #define LWS_EAGAIN EAGAIN
+ #define LWS_EALREADY EALREADY
+ #define LWS_EINPROGRESS EINPROGRESS
+ #define LWS_EINTR EINTR
+ #define LWS_EISCONN EISCONN
+ #define LWS_EWOULDBLOCK EWOULDBLOCK
+
+ #define lws_set_blocking_send(wsi)
+
+ #define LWS_SOCK_INVALID (-1)
+#endif /* not windows */
+
+#ifndef LWS_HAVE_BZERO
+ #ifndef bzero
+ #define bzero(b, len) (memset((b), '\0', (len)), (void) 0)
+ #endif
+#endif
+
+#ifndef LWS_HAVE_STRERROR
+ #define strerror(x) ""
+#endif
+
+
+#define lws_socket_is_valid(x) (x != LWS_SOCK_INVALID)
+
+#include "libwebsockets.h"
+
+#include "tls/private.h"
+
+#if defined(WIN32) || defined(_WIN32)
+ #include <gettimeofday.h>
+
+ #ifndef BIG_ENDIAN
+ #define BIG_ENDIAN 4321 /* to show byte order (taken from gcc) */
+ #endif
+ #ifndef LITTLE_ENDIAN
+ #define LITTLE_ENDIAN 1234
+ #endif
+ #ifndef BYTE_ORDER
+ #define BYTE_ORDER LITTLE_ENDIAN
+ #endif
+
+ #undef __P
+ #ifndef __P
+ #if __STDC__
+ #define __P(protos) protos
+ #else
+ #define __P(protos) ()
+ #endif
+ #endif
+
+#else /* not windows */
+ static LWS_INLINE int compatible_close(int fd) { return close(fd); }
+
+ #include <sys/stat.h>
+ #include <sys/time.h>
+
+ #if defined(__APPLE__)
+ #include <machine/endian.h>
+ #elif defined(__FreeBSD__)
+ #include <sys/endian.h>
+ #elif defined(__linux__)
+ #include <endian.h>
+ #endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__QNX__)
+ #include <gulliver.h>
+ #if defined(__LITTLEENDIAN__)
+ #define BYTE_ORDER __LITTLEENDIAN__
+ #define LITTLE_ENDIAN __LITTLEENDIAN__
+ #define BIG_ENDIAN 4321 /* to show byte order (taken from gcc); for suppres warning that BIG_ENDIAN is not defined. */
+ #endif
+ #if defined(__BIGENDIAN__)
+ #define BYTE_ORDER __BIGENDIAN__
+ #define LITTLE_ENDIAN 1234 /* to show byte order (taken from gcc); for suppres warning that LITTLE_ENDIAN is not defined. */
+ #define BIG_ENDIAN __BIGENDIAN__
+ #endif
+#endif
+
+#if defined(__sun) && defined(__GNUC__)
+
+ #include <arpa/nameser_compat.h>
+
+ #if !defined (BYTE_ORDER)
+ #define BYTE_ORDER __BYTE_ORDER__
+ #endif
+
+ #if !defined(LITTLE_ENDIAN)
+ #define LITTLE_ENDIAN __ORDER_LITTLE_ENDIAN__
+ #endif
+
+ #if !defined(BIG_ENDIAN)
+ #define BIG_ENDIAN __ORDER_BIG_ENDIAN__
+ #endif
+
+#endif /* sun + GNUC */
+
+#if !defined(BYTE_ORDER)
+ #define BYTE_ORDER __BYTE_ORDER
+#endif
+#if !defined(LITTLE_ENDIAN)
+ #define LITTLE_ENDIAN __LITTLE_ENDIAN
+#endif
+#if !defined(BIG_ENDIAN)
+ #define BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+
+/*
+ * Mac OSX as well as iOS do not define the MSG_NOSIGNAL flag,
+ * but happily have something equivalent in the SO_NOSIGPIPE flag.
+ */
+#ifdef __APPLE__
+#define MSG_NOSIGNAL SO_NOSIGPIPE
+#endif
+
+/*
+ * Solaris 11.X only supports POSIX 2001, MSG_NOSIGNAL appears in
+ * POSIX 2008.
+ */
+#ifdef __sun
+ #define MSG_NOSIGNAL 0
+#endif
+
+#ifdef _WIN32
+ #ifndef FD_HASHTABLE_MODULUS
+ #define FD_HASHTABLE_MODULUS 32
+ #endif
+#endif
+
+#ifndef LWS_DEF_HEADER_LEN
+#define LWS_DEF_HEADER_LEN 4096
+#endif
+#ifndef LWS_DEF_HEADER_POOL
+#define LWS_DEF_HEADER_POOL 4
+#endif
+#ifndef LWS_MAX_PROTOCOLS
+#define LWS_MAX_PROTOCOLS 5
+#endif
+#ifndef LWS_MAX_EXTENSIONS_ACTIVE
+#define LWS_MAX_EXTENSIONS_ACTIVE 1
+#endif
+#ifndef LWS_MAX_EXT_OFFERS
+#define LWS_MAX_EXT_OFFERS 8
+#endif
+#ifndef SPEC_LATEST_SUPPORTED
+#define SPEC_LATEST_SUPPORTED 13
+#endif
+#ifndef AWAITING_TIMEOUT
+#define AWAITING_TIMEOUT 20
+#endif
+#ifndef CIPHERS_LIST_STRING
+#define CIPHERS_LIST_STRING "DEFAULT"
+#endif
+#ifndef LWS_SOMAXCONN
+#define LWS_SOMAXCONN SOMAXCONN
+#endif
+
+#define MAX_WEBSOCKET_04_KEY_LEN 128
+
+#ifndef SYSTEM_RANDOM_FILEPATH
+#define SYSTEM_RANDOM_FILEPATH "/dev/urandom"
+#endif
+
+#define LWS_H2_RX_SCRATCH_SIZE 512
+
+#if defined(WIN32) || defined(_WIN32)
+ // Visual studio older than 2015 and WIN_CE has only _stricmp
+ #if (defined(_MSC_VER) && _MSC_VER < 1900) || defined(_WIN32_WCE)
+ #define strcasecmp _stricmp
+ #elif !defined(__MINGW32__)
+ #define strcasecmp stricmp
+ #endif
+ #define getdtablesize() 30000
+#endif
+
+/*
+ * All lws_tls...() functions must return this type, converting the
+ * native backend result and doing the extra work to determine which one
+ * as needed.
+ *
+ * Native TLS backend return codes are NOT ALLOWED outside the backend.
+ *
+ * Non-SSL mode also uses these types.
+ */
+enum lws_ssl_capable_status {
+ LWS_SSL_CAPABLE_ERROR = -1, /* it failed */
+ LWS_SSL_CAPABLE_DONE = 0, /* it succeeded */
+ LWS_SSL_CAPABLE_MORE_SERVICE_READ = -2, /* retry WANT_READ */
+ LWS_SSL_CAPABLE_MORE_SERVICE_WRITE = -3, /* retry WANT_WRITE */
+ LWS_SSL_CAPABLE_MORE_SERVICE = -4, /* general retry */
+};
+
+#if defined(__clang__)
+#define lws_memory_barrier() __sync_synchronize()
+#elif defined(__GNUC__)
+#define lws_memory_barrier() __sync_synchronize()
+#else
+#define lws_memory_barrier()
+#endif
+
+/*
+ *
+ * ------ roles ------
+ *
+ */
+
+#include "roles/private.h"
+
+/* null-terminated array of pointers to roles lws built with */
+extern const struct lws_role_ops *available_roles[];
+
+#define LWS_FOR_EVERY_AVAILABLE_ROLE_START(xx) { \
+ const struct lws_role_ops **ppxx = available_roles; \
+ while (*ppxx) { \
+ const struct lws_role_ops *xx = *ppxx++;
+
+#define LWS_FOR_EVERY_AVAILABLE_ROLE_END }}
+
+/*
+ *
+ * ------ event_loop ops ------
+ *
+ */
+
+#include "event-libs/private.h"
+
+/* enums of socks version */
+enum socks_version {
+ SOCKS_VERSION_4 = 4,
+ SOCKS_VERSION_5 = 5
+};
+
+/* enums of subnegotiation version */
+enum socks_subnegotiation_version {
+ SOCKS_SUBNEGOTIATION_VERSION_1 = 1,
+};
+
+/* enums of socks commands */
+enum socks_command {
+ SOCKS_COMMAND_CONNECT = 1,
+ SOCKS_COMMAND_BIND = 2,
+ SOCKS_COMMAND_UDP_ASSOCIATE = 3
+};
+
+/* enums of socks address type */
+enum socks_atyp {
+ SOCKS_ATYP_IPV4 = 1,
+ SOCKS_ATYP_DOMAINNAME = 3,
+ SOCKS_ATYP_IPV6 = 4
+};
+
+/* enums of socks authentication methods */
+enum socks_auth_method {
+ SOCKS_AUTH_NO_AUTH = 0,
+ SOCKS_AUTH_GSSAPI = 1,
+ SOCKS_AUTH_USERNAME_PASSWORD = 2
+};
+
+/* enums of subnegotiation status */
+enum socks_subnegotiation_status {
+ SOCKS_SUBNEGOTIATION_STATUS_SUCCESS = 0,
+};
+
+/* enums of socks request reply */
+enum socks_request_reply {
+ SOCKS_REQUEST_REPLY_SUCCESS = 0,
+ SOCKS_REQUEST_REPLY_FAILURE_GENERAL = 1,
+ SOCKS_REQUEST_REPLY_CONNECTION_NOT_ALLOWED = 2,
+ SOCKS_REQUEST_REPLY_NETWORK_UNREACHABLE = 3,
+ SOCKS_REQUEST_REPLY_HOST_UNREACHABLE = 4,
+ SOCKS_REQUEST_REPLY_CONNECTION_REFUSED = 5,
+ SOCKS_REQUEST_REPLY_TTL_EXPIRED = 6,
+ SOCKS_REQUEST_REPLY_COMMAND_NOT_SUPPORTED = 7,
+ SOCKS_REQUEST_REPLY_ATYP_NOT_SUPPORTED = 8
+};
+
+/* enums used to generate socks messages */
+enum socks_msg_type {
+ /* greeting */
+ SOCKS_MSG_GREETING,
+ /* credential, user name and password */
+ SOCKS_MSG_USERNAME_PASSWORD,
+ /* connect command */
+ SOCKS_MSG_CONNECT
+};
+
+enum {
+ LWS_RXFLOW_ALLOW = (1 << 0),
+ LWS_RXFLOW_PENDING_CHANGE = (1 << 1),
+};
+
+struct lws_ring {
+ void *buf;
+ void (*destroy_element)(void *element);
+ uint32_t buflen;
+ uint32_t element_len;
+ uint32_t head;
+ uint32_t oldest_tail;
+};
+
+struct lws_protocols;
+struct lws;
+
+struct lws_io_watcher {
+#ifdef LWS_WITH_LIBEV
+ struct lws_io_watcher_libev ev;
+#endif
+#ifdef LWS_WITH_LIBUV
+ struct lws_io_watcher_libuv uv;
+#endif
+#ifdef LWS_WITH_LIBEVENT
+ struct lws_io_watcher_libevent event;
+#endif
+ struct lws_context *context;
+
+ uint8_t actual_events;
+};
+
+struct lws_signal_watcher {
+#ifdef LWS_WITH_LIBEV
+ struct lws_signal_watcher_libev ev;
+#endif
+#ifdef LWS_WITH_LIBUV
+ struct lws_signal_watcher_libuv uv;
+#endif
+#ifdef LWS_WITH_LIBEVENT
+ struct lws_signal_watcher_libevent event;
+#endif
+ struct lws_context *context;
+};
+
+#ifdef _WIN32
+#define LWS_FD_HASH(fd) ((fd ^ (fd >> 8) ^ (fd >> 16)) % FD_HASHTABLE_MODULUS)
+struct lws_fd_hashtable {
+ struct lws **wsi;
+ int length;
+};
+#endif
+
+struct lws_foreign_thread_pollfd {
+ struct lws_foreign_thread_pollfd *next;
+ int fd_index;
+ int _and;
+ int _or;
+};
+
+
+#define LWS_HRTIMER_NOWAIT (0x7fffffffffffffffll)
+
+/*
+ * so we can have n connections being serviced simultaneously,
+ * these things need to be isolated per-thread.
+ */
+
+struct lws_context_per_thread {
+#if LWS_MAX_SMP > 1
+ pthread_mutex_t lock;
+ pthread_mutex_t lock_stats;
+ pthread_t lock_owner;
+ const char *last_lock_reason;
+#endif
+
+ struct lws_context *context;
+
+ /*
+ * usable by anything in the service code, but only if the scope
+ * does not last longer than the service action (since next service
+ * of any socket can likewise use it and overwrite)
+ */
+ unsigned char *serv_buf;
+
+ struct lws_dll_lws dll_head_timeout;
+ struct lws_dll_lws dll_head_hrtimer;
+ struct lws_dll_lws dll_head_buflist; /* guys with pending rxflow */
+
+#if defined(LWS_WITH_TLS)
+ struct lws_pt_tls tls;
+#endif
+
+ struct lws_pollfd *fds;
+ volatile struct lws_foreign_thread_pollfd * volatile foreign_pfd_list;
+#ifdef _WIN32
+ WSAEVENT *events;
+#endif
+ lws_sockfd_type dummy_pipe_fds[2];
+ struct lws *pipe_wsi;
+
+ /* --- role based members --- */
+
+#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
+ struct lws_pt_role_ws ws;
+#endif
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ struct lws_pt_role_http http;
+#endif
+
+ /* --- event library based members --- */
+
+#if defined(LWS_WITH_LIBEV)
+ struct lws_pt_eventlibs_libev ev;
+#endif
+#if defined(LWS_WITH_LIBUV)
+ struct lws_pt_eventlibs_libuv uv;
+#endif
+#if defined(LWS_WITH_LIBEVENT)
+ struct lws_pt_eventlibs_libevent event;
+#endif
+
+#if defined(LWS_WITH_LIBEV) || defined(LWS_WITH_LIBUV) || defined(LWS_WITH_LIBEVENT)
+ struct lws_signal_watcher w_sigint;
+#endif
+
+ /* --- */
+
+ unsigned long count_conns;
+ unsigned int fds_count;
+
+ volatile unsigned char inside_poll;
+ volatile unsigned char foreign_spinlock;
+
+ unsigned char tid;
+
+ unsigned char lock_depth;
+ unsigned char inside_service:1;
+ unsigned char event_loop_foreign:1;
+ unsigned char event_loop_destroy_processing_done:1;
+};
+
+struct lws_conn_stats {
+ unsigned long long rx, tx;
+ unsigned long h1_conn, h1_trans, h2_trans, ws_upg, h2_alpn, h2_subs,
+ h2_upg, rejected;
+};
+
+void
+lws_sum_stats(const struct lws_context *ctx, struct lws_conn_stats *cs);
+
+struct lws_timed_vh_protocol {
+ struct lws_timed_vh_protocol *next;
+ const struct lws_protocols *protocol;
+ time_t time;
+ int reason;
+};
+
+/*
+ * virtual host -related context information
+ * vhostwide SSL context
+ * vhostwide proxy
+ *
+ * hierarchy:
+ *
+ * context -> vhost -> wsi
+ *
+ * incoming connection non-SSL vhost binding:
+ *
+ * listen socket -> wsi -> select vhost after first headers
+ *
+ * incoming connection SSL vhost binding:
+ *
+ * SSL SNI -> wsi -> bind after SSL negotiation
+ */
+
+
+struct lws_vhost {
+#if !defined(LWS_WITHOUT_CLIENT)
+ char proxy_basic_auth_token[128];
+#endif
+#if LWS_MAX_SMP > 1
+ pthread_mutex_t lock;
+#endif
+
+#if defined(LWS_ROLE_H2)
+ struct lws_vhost_role_h2 h2;
+#endif
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ struct lws_vhost_role_http http;
+#endif
+#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
+ struct lws_vhost_role_ws ws;
+#endif
+
+#if defined(LWS_WITH_SOCKS5)
+ char socks_proxy_address[128];
+ char socks_user[96];
+ char socks_password[96];
+#endif
+#if defined(LWS_WITH_LIBEV)
+ struct lws_io_watcher w_accept;
+#endif
+ struct lws_conn_stats conn_stats;
+ struct lws_context *context;
+ struct lws_vhost *vhost_next;
+
+ struct lws *lserv_wsi;
+ const char *name;
+ const char *iface;
+
+#if !defined(LWS_WITH_ESP32) && !defined(OPTEE_TA) && !defined(WIN32)
+ int bind_iface;
+#endif
+ const struct lws_protocols *protocols;
+ void **protocol_vh_privs;
+ const struct lws_protocol_vhost_options *pvo;
+ const struct lws_protocol_vhost_options *headers;
+ struct lws **same_vh_protocol_list;
+ struct lws_vhost *no_listener_vhost_list;
+#if !defined(LWS_NO_CLIENT)
+ struct lws_dll_lws dll_active_client_conns;
+#endif
+
+#if defined(LWS_WITH_TLS)
+ struct lws_vhost_tls tls;
+#endif
+
+ struct lws_timed_vh_protocol *timed_vh_protocol_list;
+ void *user;
+
+ int listen_port;
+
+#if defined(LWS_WITH_SOCKS5)
+ unsigned int socks_proxy_port;
+#endif
+ unsigned int options;
+ int count_protocols;
+ int ka_time;
+ int ka_probes;
+ int ka_interval;
+ int keepalive_timeout;
+ int timeout_secs_ah_idle;
+
+#ifdef LWS_WITH_ACCESS_LOG
+ int log_fd;
+#endif
+
+ unsigned int created_vhost_protocols:1;
+ unsigned int being_destroyed:1;
+
+ unsigned char default_protocol_index;
+ unsigned char raw_protocol_index;
+};
+
+struct lws_deferred_free
+{
+ struct lws_deferred_free *next;
+ time_t deadline;
+ void *payload;
+};
+
+typedef union {
+#ifdef LWS_WITH_IPV6
+ struct sockaddr_in6 sa6;
+#endif
+ struct sockaddr_in sa4;
+} sockaddr46;
+
+
+#if defined(LWS_WITH_PEER_LIMITS)
+struct lws_peer {
+ struct lws_peer *next;
+ struct lws_peer *peer_wait_list;
+
+ time_t time_created;
+ time_t time_closed_all;
+
+ uint8_t addr[32];
+ uint32_t hash;
+ uint32_t count_wsi;
+ uint32_t total_wsi;
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ struct lws_peer_role_http http;
+#endif
+
+ uint8_t af;
+};
+#endif
+
+/*
+ * the rest is managed per-context, that includes
+ *
+ * - processwide single fd -> wsi lookup
+ * - contextwide headers pool
+ */
+
+struct lws_context {
+ time_t last_timeout_check_s;
+ time_t last_ws_ping_pong_check_s;
+ time_t time_up;
+ time_t time_discontiguity;
+ time_t time_fixup;
+ const struct lws_plat_file_ops *fops;
+ struct lws_plat_file_ops fops_platform;
+ struct lws_context **pcontext_finalize;
+
+ const struct lws_tls_ops *tls_ops;
+
+#if defined(LWS_WITH_HTTP2)
+ struct http2_settings set;
+#endif
+#if defined(LWS_WITH_ZIP_FOPS)
+ struct lws_plat_file_ops fops_zip;
+#endif
+ struct lws_context_per_thread pt[LWS_MAX_SMP];
+ struct lws_conn_stats conn_stats;
+#if LWS_MAX_SMP > 1
+ pthread_mutex_t lock;
+ int lock_depth;
+#endif
+#ifdef _WIN32
+/* different implementation between unix and windows */
+ struct lws_fd_hashtable fd_hashtable[FD_HASHTABLE_MODULUS];
+#else
+ struct lws **lws_lookup; /* fd to wsi */
+#endif
+ struct lws_vhost *vhost_list;
+ struct lws_vhost *no_listener_vhost_list;
+ struct lws_vhost *vhost_pending_destruction_list;
+ struct lws_plugin *plugin_list;
+ struct lws_deferred_free *deferred_free_list;
+#if defined(LWS_WITH_PEER_LIMITS)
+ struct lws_peer **pl_hash_table;
+ struct lws_peer *peer_wait_list;
+ time_t next_cull;
+#endif
+
+ void *external_baggage_free_on_destroy;
+ const struct lws_token_limits *token_limits;
+ void *user_space;
+ const struct lws_protocol_vhost_options *reject_service_keywords;
+ lws_reload_func deprecation_cb;
+ void (*eventlib_signal_cb)(void *event_lib_handle, int signum);
+
+#if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
+ cap_value_t caps[4];
+ char count_caps;
+#endif
+
+#if defined(LWS_WITH_LIBEV)
+ struct lws_context_eventlibs_libev ev;
+#endif
+#if defined(LWS_WITH_LIBUV)
+ struct lws_context_eventlibs_libuv uv;
+#endif
+#if defined(LWS_WITH_LIBEVENT)
+ struct lws_context_eventlibs_libevent event;
+#endif
+ struct lws_event_loop_ops *event_loop_ops;
+
+
+#if defined(LWS_WITH_TLS)
+ struct lws_context_tls tls;
+#endif
+
+ char canonical_hostname[128];
+ const char *server_string;
+
+#ifdef LWS_LATENCY
+ unsigned long worst_latency;
+ char worst_latency_info[256];
+#endif
+
+#if defined(LWS_WITH_STATS)
+ uint64_t lws_stats[LWSSTATS_SIZE];
+ uint64_t last_dump;
+ int updated;
+#endif
+#if defined(LWS_WITH_ESP32)
+ unsigned long time_last_state_dump;
+ uint32_t last_free_heap;
+#endif
+
+ int max_fds;
+ int count_event_loop_static_asset_handles;
+ int started_with_parent;
+ int uid, gid;
+
+ int fd_random;
+
+ int count_wsi_allocated;
+ int count_cgi_spawned;
+ unsigned int options;
+ unsigned int fd_limit_per_thread;
+ unsigned int timeout_secs;
+ unsigned int pt_serv_buf_size;
+ int max_http_header_data;
+ int max_http_header_pool;
+ int simultaneous_ssl_restriction;
+ int simultaneous_ssl;
+#if defined(LWS_WITH_PEER_LIMITS)
+ uint32_t pl_hash_elements; /* protected by context->lock */
+ uint32_t count_peers; /* protected by context->lock */
+ unsigned short ip_limit_ah;
+ unsigned short ip_limit_wsi;
+#endif
+ unsigned int deprecated:1;
+ unsigned int being_destroyed:1;
+ unsigned int being_destroyed1:1;
+ unsigned int being_destroyed2:1;
+ unsigned int requested_kill:1;
+ unsigned int protocol_init_done:1;
+ unsigned int doing_protocol_init:1;
+ unsigned int done_protocol_destroy_cb:1;
+ unsigned int finalize_destroy_after_internal_loops_stopped:1;
+ /*
+ * set to the Thread ID that's doing the service loop just before entry
+ * to poll indicates service thread likely idling in poll()
+ * volatile because other threads may check it as part of processing
+ * for pollfd event change.
+ */
+ volatile int service_tid;
+ int service_tid_detected;
+
+ short count_threads;
+ short plugin_protocol_count;
+ short plugin_extension_count;
+ short server_string_len;
+ unsigned short ws_ping_pong_interval;
+ unsigned short deprecation_pending_listen_close_count;
+
+ uint8_t max_fi;
+};
+
+int
+lws_check_deferred_free(struct lws_context *context, int force);
+
+#define lws_get_context_protocol(ctx, x) ctx->vhost_list->protocols[x]
+#define lws_get_vh_protocol(vh, x) vh->protocols[x]
+
+LWS_EXTERN void
+__lws_close_free_wsi_final(struct lws *wsi);
+LWS_EXTERN void
+lws_libuv_closehandle(struct lws *wsi);
+LWS_EXTERN int
+lws_libuv_check_watcher_active(struct lws *wsi);
+
+LWS_VISIBLE LWS_EXTERN int
+lws_plat_plugins_init(struct lws_context * context, const char * const *d);
+
+LWS_VISIBLE LWS_EXTERN int
+lws_plat_plugins_destroy(struct lws_context * context);
+
+LWS_EXTERN void
+lws_restart_ws_ping_pong_timer(struct lws *wsi);
+
+struct lws *
+lws_adopt_socket_vhost(struct lws_vhost *vh, lws_sockfd_type accept_fd);
+
+int
+lws_jws_base64_enc(const char *in, size_t in_len, char *out, size_t out_max);
+
+void
+lws_vhost_destroy1(struct lws_vhost *vh);
+
+enum {
+ LWS_EV_READ = (1 << 0),
+ LWS_EV_WRITE = (1 << 1),
+ LWS_EV_START = (1 << 2),
+ LWS_EV_STOP = (1 << 3),
+
+ LWS_EV_PREPARE_DELETION = (1 << 31),
+};
+
+
+#if defined(LWS_WITH_ESP32)
+LWS_EXTERN int
+lws_find_string_in_file(const char *filename, const char *string, int stringlen);
+#endif
+
+#ifdef LWS_WITH_IPV6
+#define LWS_IPV6_ENABLED(vh) \
+ (!lws_check_opt(vh->context->options, LWS_SERVER_OPTION_DISABLE_IPV6) && \
+ !lws_check_opt(vh->options, LWS_SERVER_OPTION_DISABLE_IPV6))
+#else
+#define LWS_IPV6_ENABLED(context) (0)
+#endif
+
+#ifdef LWS_WITH_UNIX_SOCK
+#define LWS_UNIX_SOCK_ENABLED(vhost) \
+ (vhost->options & LWS_SERVER_OPTION_UNIX_SOCK)
+#else
+#define LWS_UNIX_SOCK_ENABLED(vhost) (0)
+#endif
+
+enum uri_path_states {
+ URIPS_IDLE,
+ URIPS_SEEN_SLASH,
+ URIPS_SEEN_SLASH_DOT,
+ URIPS_SEEN_SLASH_DOT_DOT,
+};
+
+enum uri_esc_states {
+ URIES_IDLE,
+ URIES_SEEN_PERCENT,
+ URIES_SEEN_PERCENT_H1,
+};
+
+
+#ifndef LWS_NO_CLIENT
+struct client_info_stash {
+ char *address;
+ char *path;
+ char *host;
+ char *origin;
+ char *protocol;
+ char *method;
+ char *iface;
+ char *alpn;
+};
+#endif
+
+
+signed char char_to_hex(const char c);
+
+
+struct lws_buflist {
+ struct lws_buflist *next;
+
+ size_t len;
+ size_t pos;
+
+ uint8_t buf[1]; /* true length of this is set by the oversize malloc */
+};
+
+#define lws_wsi_is_udp(___wsi) (!!___wsi->udp)
+
+#define LWS_H2_FRAME_HEADER_LENGTH 9
+
+
+struct lws {
+ /* structs */
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ struct _lws_http_mode_related http;
+#endif
+#if defined(LWS_ROLE_H2)
+ struct _lws_h2_related h2;
+#endif
+#if defined(LWS_ROLE_WS)
+ struct _lws_websocket_related *ws; /* allocated if we upgrade to ws */
+#endif
+
+ const struct lws_role_ops *role_ops;
+ lws_wsi_state_t wsistate;
+ lws_wsi_state_t wsistate_pre_close;
+
+ /* lifetime members */
+
+#if defined(LWS_WITH_LIBEV) || defined(LWS_WITH_LIBUV) || defined(LWS_WITH_LIBEVENT)
+ struct lws_io_watcher w_read;
+#endif
+#if defined(LWS_WITH_LIBEV) || defined(LWS_WITH_LIBEVENT)
+ struct lws_io_watcher w_write;
+#endif
+
+ /* pointers */
+
+ struct lws_context *context;
+ struct lws_vhost *vhost;
+ struct lws *parent; /* points to parent, if any */
+ struct lws *child_list; /* points to first child */
+ struct lws *sibling_list; /* subsequent children at same level */
+
+ const struct lws_protocols *protocol;
+ struct lws **same_vh_protocol_prev, *same_vh_protocol_next;
+
+ struct lws_dll_lws dll_timeout;
+ struct lws_dll_lws dll_hrtimer;
+ struct lws_dll_lws dll_buflist; /* guys with pending rxflow */
+
+#if defined(LWS_WITH_PEER_LIMITS)
+ struct lws_peer *peer;
+#endif
+
+ struct lws_udp *udp;
+#ifndef LWS_NO_CLIENT
+ struct client_info_stash *stash;
+ char *client_hostname_copy;
+ struct lws_dll_lws dll_active_client_conns;
+ struct lws_dll_lws dll_client_transaction_queue_head;
+ struct lws_dll_lws dll_client_transaction_queue;
+#endif
+ void *user_space;
+ void *opaque_parent_data;
+
+ struct lws_buflist *buflist;
+
+ /* truncated send handling */
+ unsigned char *trunc_alloc; /* non-NULL means buffering in progress */
+
+#if defined(LWS_WITH_TLS)
+ struct lws_lws_tls tls;
+#endif
+
+ lws_sock_file_fd_type desc; /* .filefd / .sockfd */
+#if defined(LWS_WITH_STATS)
+ uint64_t active_writable_req_us;
+#if defined(LWS_WITH_TLS)
+ uint64_t accept_start_us;
+#endif
+#endif
+
+ lws_usec_t pending_timer; /* hrtimer fires */
+ time_t pending_timeout_set; /* second-resolution timeout start */
+
+#ifdef LWS_LATENCY
+ unsigned long action_start;
+ unsigned long latency_start;
+#endif
+
+ /* ints */
+#define LWS_NO_FDS_POS (-1)
+ int position_in_fds_table;
+ unsigned int trunc_alloc_len; /* size of malloc */
+ unsigned int trunc_offset; /* where we are in terms of spilling */
+ unsigned int trunc_len; /* how much is buffered */
+#ifndef LWS_NO_CLIENT
+ int chunk_remaining;
+#endif
+ unsigned int cache_secs;
+
+ unsigned int hdr_parsing_completed:1;
+ unsigned int http2_substream:1;
+ unsigned int upgraded_to_http2:1;
+ unsigned int h2_stream_carries_ws:1;
+ unsigned int seen_nonpseudoheader:1;
+ unsigned int listener:1;
+ unsigned int user_space_externally_allocated:1;
+ unsigned int socket_is_permanently_unusable:1;
+ unsigned int rxflow_change_to:2;
+ unsigned int conn_stat_done:1;
+ unsigned int cache_reuse:1;
+ unsigned int cache_revalidate:1;
+ unsigned int cache_intermediaries:1;
+ unsigned int favoured_pollin:1;
+ unsigned int sending_chunked:1;
+ unsigned int interpreting:1;
+ unsigned int already_did_cce:1;
+ unsigned int told_user_closed:1;
+ unsigned int told_event_loop_closed:1;
+ unsigned int waiting_to_send_close_frame:1;
+ unsigned int close_needs_ack:1;
+ unsigned int ipv6:1;
+ unsigned int parent_carries_io:1;
+ unsigned int parent_pending_cb_on_writable:1;
+ unsigned int cgi_stdout_zero_length:1;
+ unsigned int seen_zero_length_recv:1;
+ unsigned int rxflow_will_be_applied:1;
+ unsigned int event_pipe:1;
+ unsigned int on_same_vh_list:1;
+ unsigned int handling_404:1;
+ unsigned int protocol_bind_balance:1;
+
+ unsigned int could_have_pending:1; /* detect back-to-back writes */
+ unsigned int outer_will_close:1;
+
+#ifdef LWS_WITH_ACCESS_LOG
+ unsigned int access_log_pending:1;
+#endif
+#ifndef LWS_NO_CLIENT
+ unsigned int do_ws:1; /* whether we are doing http or ws flow */
+ unsigned int chunked:1; /* if the clientside connection is chunked */
+ unsigned int client_rx_avail:1;
+ unsigned int client_http_body_pending:1;
+ unsigned int transaction_from_pipeline_queue:1;
+ unsigned int keepalive_active:1;
+ unsigned int keepalive_rejected:1;
+ unsigned int client_pipeline:1;
+ unsigned int client_h2_alpn:1;
+ unsigned int client_h2_substream:1;
+#endif
+
+#ifdef _WIN32
+ unsigned int sock_send_blocking:1;
+#endif
+
+#ifndef LWS_NO_CLIENT
+ unsigned short c_port;
+#endif
+ unsigned short pending_timeout_limit;
+
+ /* chars */
+
+ char lws_rx_parse_state; /* enum lws_rx_parse_state */
+ char rx_frame_type; /* enum lws_write_protocol */
+ char pending_timeout; /* enum pending_timeout */
+ char tsi; /* thread service index we belong to */
+ char protocol_interpret_idx;
+ char redirects;
+ uint8_t rxflow_bitmap;
+#ifdef LWS_WITH_CGI
+ char cgi_channel; /* which of stdin/out/err */
+ char hdr_state;
+#endif
+#ifndef LWS_NO_CLIENT
+ char chunk_parser; /* enum lws_chunk_parser */
+#endif
+#if defined(LWS_WITH_CGI) || !defined(LWS_NO_CLIENT)
+ char reason_bf; /* internal writeable callback reason bitfield */
+#endif
+#if defined(LWS_WITH_STATS) && defined(LWS_WITH_TLS)
+ char seen_rx;
+#endif
+ uint8_t ws_over_h2_count;
+ /* volatile to make sure code is aware other thread can change */
+ volatile char handling_pollout;
+ volatile char leave_pollout_active;
+};
+
+#define lws_is_flowcontrolled(w) (!!(wsi->rxflow_bitmap))
+
+void
+lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt);
+
+LWS_EXTERN int log_level;
+
+LWS_EXTERN int
+lws_socket_bind(struct lws_vhost *vhost, lws_sockfd_type sockfd, int port,
+ const char *iface);
+
+#if defined(LWS_WITH_IPV6)
+LWS_EXTERN unsigned long
+lws_get_addr_scope(const char *ipaddr);
+#endif
+
+LWS_EXTERN void
+lws_close_free_wsi(struct lws *wsi, enum lws_close_status, const char *caller);
+LWS_EXTERN void
+__lws_close_free_wsi(struct lws *wsi, enum lws_close_status, const char *caller);
+
+LWS_EXTERN void
+__lws_free_wsi(struct lws *wsi);
+
+LWS_EXTERN int
+__remove_wsi_socket_from_fds(struct lws *wsi);
+LWS_EXTERN int
+lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len);
+
+#ifndef LWS_LATENCY
+static LWS_INLINE void
+lws_latency(struct lws_context *context, struct lws *wsi, const char *action,
+ int ret, int completion) {
+ do {
+ (void)context; (void)wsi; (void)action; (void)ret;
+ (void)completion;
+ } while (0);
+}
+static LWS_INLINE void
+lws_latency_pre(struct lws_context *context, struct lws *wsi) {
+ do { (void)context; (void)wsi; } while (0);
+}
+#else
+#define lws_latency_pre(_context, _wsi) lws_latency(_context, _wsi, NULL, 0, 0)
+extern void
+lws_latency(struct lws_context *context, struct lws *wsi, const char *action,
+ int ret, int completion);
+#endif
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_ws_client_rx_sm(struct lws *wsi, unsigned char c);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_parse(struct lws *wsi, unsigned char *buf, int *len);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_parse_urldecode(struct lws *wsi, uint8_t *_c);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_http_action(struct lws *wsi);
+
+LWS_EXTERN int
+lws_b64_selftest(void);
+
+LWS_EXTERN int
+lws_service_flag_pending(struct lws_context *context, int tsi);
+
+LWS_EXTERN int
+lws_timed_callback_remove(struct lws_vhost *vh, struct lws_timed_vh_protocol *p);
+
+#if defined(_WIN32)
+LWS_EXTERN struct lws *
+wsi_from_fd(const struct lws_context *context, lws_sockfd_type fd);
+
+LWS_EXTERN int
+insert_wsi(struct lws_context *context, struct lws *wsi);
+
+LWS_EXTERN int
+delete_from_fd(struct lws_context *context, lws_sockfd_type fd);
+#else
+#define wsi_from_fd(A,B) A->lws_lookup[B - lws_plat_socket_offset()]
+#define insert_wsi(A,B) assert(A->lws_lookup[B->desc.sockfd - lws_plat_socket_offset()] == 0); A->lws_lookup[B->desc.sockfd - lws_plat_socket_offset()]=B
+#define delete_from_fd(A,B) A->lws_lookup[B - lws_plat_socket_offset()]=0
+#endif
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+__insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len);
+
+LWS_EXTERN void
+lws_remove_from_timeout_list(struct lws *wsi);
+
+LWS_EXTERN struct lws * LWS_WARN_UNUSED_RESULT
+lws_client_connect_2(struct lws *wsi);
+
+LWS_VISIBLE struct lws * LWS_WARN_UNUSED_RESULT
+lws_client_reset(struct lws **wsi, int ssl, const char *address, int port,
+ const char *path, const char *host);
+
+LWS_EXTERN struct lws * LWS_WARN_UNUSED_RESULT
+lws_create_new_server_wsi(struct lws_vhost *vhost, int fixed_tsi);
+
+LWS_EXTERN char * LWS_WARN_UNUSED_RESULT
+lws_generate_client_handshake(struct lws *wsi, char *pkt);
+
+LWS_EXTERN int
+lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd);
+
+LWS_EXTERN struct lws *
+lws_client_connect_via_info2(struct lws *wsi);
+
+
+
+LWS_EXTERN void
+lws_client_stash_destroy(struct lws *wsi);
+
+/*
+ * EXTENSIONS
+ */
+
+#if defined(LWS_WITHOUT_EXTENSIONS)
+#define lws_any_extension_handled(_a, _b, _c, _d) (0)
+#define lws_ext_cb_active(_a, _b, _c, _d) (0)
+#define lws_ext_cb_all_exts(_a, _b, _c, _d, _e) (0)
+#define lws_issue_raw_ext_access lws_issue_raw
+#define lws_context_init_extensions(_a, _b)
+#endif
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_client_interpret_server_handshake(struct lws *wsi);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_ws_rx_sm(struct lws *wsi, char already_processed, unsigned char c);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_issue_raw_ext_access(struct lws *wsi, unsigned char *buf, size_t len);
+
+LWS_EXTERN void
+lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state,
+ struct lws_role_ops *ops);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+user_callback_handle_rxflow(lws_callback_function, struct lws *wsi,
+ enum lws_callback_reasons reason, void *user,
+ void *in, size_t len);
+
+LWS_EXTERN int
+lws_plat_socket_offset(void);
+
+LWS_EXTERN int
+lws_plat_set_socket_options(struct lws_vhost *vhost, lws_sockfd_type fd);
+
+LWS_EXTERN int
+lws_plat_check_connection_error(struct lws *wsi);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_header_table_attach(struct lws *wsi, int autoservice);
+
+LWS_EXTERN int
+lws_header_table_detach(struct lws *wsi, int autoservice);
+LWS_EXTERN int
+__lws_header_table_detach(struct lws *wsi, int autoservice);
+
+LWS_EXTERN void
+lws_header_table_reset(struct lws *wsi, int autoservice);
+
+void
+__lws_header_table_reset(struct lws *wsi, int autoservice);
+
+LWS_EXTERN char * LWS_WARN_UNUSED_RESULT
+lws_hdr_simple_ptr(struct lws *wsi, enum lws_token_indexes h);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_hdr_simple_create(struct lws *wsi, enum lws_token_indexes h, const char *s);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_ensure_user_space(struct lws *wsi);
+
+LWS_EXTERN int
+lws_change_pollfd(struct lws *wsi, int _and, int _or);
+
+#ifndef LWS_NO_SERVER
+ int _lws_vhost_init_server(const struct lws_context_creation_info *info,
+ struct lws_vhost *vhost);
+ LWS_EXTERN struct lws_vhost *
+ lws_select_vhost(struct lws_context *context, int port, const char *servername);
+ LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+ lws_parse_ws(struct lws *wsi, unsigned char **buf, size_t len);
+ LWS_EXTERN void
+ lws_server_get_canonical_hostname(struct lws_context *context,
+ const struct lws_context_creation_info *info);
+#else
+ #define _lws_vhost_init_server(_a, _b) (0)
+ #define lws_parse_ws(_a, _b, _c) (0)
+ #define lws_server_get_canonical_hostname(_a, _b)
+#endif
+
+#ifndef LWS_NO_DAEMONIZE
+ LWS_EXTERN int get_daemonize_pid();
+#else
+ #define get_daemonize_pid() (0)
+#endif
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+interface_to_sa(struct lws_vhost *vh, const char *ifname,
+ struct sockaddr_in *addr, size_t addrlen);
+LWS_EXTERN void lwsl_emit_stderr(int level, const char *line);
+
+#if !defined(LWS_WITH_TLS)
+ #define LWS_SSL_ENABLED(context) (0)
+ #define lws_context_init_server_ssl(_a, _b) (0)
+ #define lws_ssl_destroy(_a)
+ #define lws_context_init_alpn(_a)
+ #define lws_ssl_capable_read lws_ssl_capable_read_no_ssl
+ #define lws_ssl_capable_write lws_ssl_capable_write_no_ssl
+ #define lws_ssl_pending lws_ssl_pending_no_ssl
+ #define lws_server_socket_service_ssl(_b, _c) (0)
+ #define lws_ssl_close(_a) (0)
+ #define lws_ssl_context_destroy(_a)
+ #define lws_ssl_SSL_CTX_destroy(_a)
+ #define lws_ssl_remove_wsi_from_buffered_list(_a)
+ #define __lws_ssl_remove_wsi_from_buffered_list(_a)
+ #define lws_context_init_ssl_library(_a)
+ #define lws_tls_check_all_cert_lifetimes(_a)
+ #define lws_tls_acme_sni_cert_destroy(_a)
+#endif
+
+
+#if LWS_MAX_SMP > 1
+
+static LWS_INLINE void
+lws_pt_mutex_init(struct lws_context_per_thread *pt)
+{
+ pthread_mutex_init(&pt->lock, NULL);
+ pthread_mutex_init(&pt->lock_stats, NULL);
+}
+
+static LWS_INLINE void
+lws_pt_mutex_destroy(struct lws_context_per_thread *pt)
+{
+ pthread_mutex_destroy(&pt->lock_stats);
+ pthread_mutex_destroy(&pt->lock);
+}
+
+static LWS_INLINE void
+lws_pt_lock(struct lws_context_per_thread *pt, const char *reason)
+{
+ if (pt->lock_owner == pthread_self()) {
+ pt->lock_depth++;
+ return;
+ }
+ pthread_mutex_lock(&pt->lock);
+ pt->last_lock_reason = reason;
+ pt->lock_owner = pthread_self();
+ //lwsl_notice("tid %d: lock %s\n", pt->tid, reason);
+}
+
+static LWS_INLINE void
+lws_pt_unlock(struct lws_context_per_thread *pt)
+{
+ if (pt->lock_depth) {
+ pt->lock_depth--;
+ return;
+ }
+ pt->last_lock_reason = "free";
+ pt->lock_owner = 0;
+ //lwsl_notice("tid %d: unlock %s\n", pt->tid, pt->last_lock_reason);
+ pthread_mutex_unlock(&pt->lock);
+}
+
+static LWS_INLINE void
+lws_pt_stats_lock(struct lws_context_per_thread *pt)
+{
+ pthread_mutex_lock(&pt->lock_stats);
+}
+
+static LWS_INLINE void
+lws_pt_stats_unlock(struct lws_context_per_thread *pt)
+{
+ pthread_mutex_unlock(&pt->lock_stats);
+}
+
+static LWS_INLINE void
+lws_context_lock(struct lws_context *context)
+{
+ pthread_mutex_lock(&context->lock);
+}
+
+static LWS_INLINE void
+lws_context_unlock(struct lws_context *context)
+{
+ pthread_mutex_unlock(&context->lock);
+}
+
+static LWS_INLINE void
+lws_vhost_lock(struct lws_vhost *vhost)
+{
+ pthread_mutex_lock(&vhost->lock);
+}
+
+static LWS_INLINE void
+lws_vhost_unlock(struct lws_vhost *vhost)
+{
+ pthread_mutex_unlock(&vhost->lock);
+}
+
+
+#else
+#define lws_pt_mutex_init(_a) (void)(_a)
+#define lws_pt_mutex_destroy(_a) (void)(_a)
+#define lws_pt_lock(_a, b) (void)(_a)
+#define lws_pt_unlock(_a) (void)(_a)
+#define lws_context_lock(_a) (void)(_a)
+#define lws_context_unlock(_a) (void)(_a)
+#define lws_vhost_lock(_a) (void)(_a)
+#define lws_vhost_unlock(_a) (void)(_a)
+#define lws_pt_stats_lock(_a) (void)(_a)
+#define lws_pt_stats_unlock(_a) (void)(_a)
+#endif
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_ssl_capable_read_no_ssl(struct lws *wsi, unsigned char *buf, int len);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, int len);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_ssl_pending_no_ssl(struct lws *wsi);
+
+int
+lws_tls_check_cert_lifetime(struct lws_vhost *vhost);
+
+int lws_jws_selftest(void);
+
+
+#ifndef LWS_NO_CLIENT
+LWS_EXTERN int lws_client_socket_service(struct lws *wsi,
+ struct lws_pollfd *pollfd,
+ struct lws *wsi_conn);
+LWS_EXTERN struct lws *
+lws_client_wsi_effective(struct lws *wsi);
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_http_transaction_completed_client(struct lws *wsi);
+#if !defined(LWS_WITH_TLS)
+ #define lws_context_init_client_ssl(_a, _b) (0)
+#endif
+LWS_EXTERN void
+lws_decode_ssl_error(void);
+#else
+#define lws_context_init_client_ssl(_a, _b) (0)
+#endif
+
+LWS_EXTERN int
+__lws_rx_flow_control(struct lws *wsi);
+
+LWS_EXTERN int
+_lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa);
+
+#ifndef LWS_NO_SERVER
+LWS_EXTERN int
+lws_handshake_server(struct lws *wsi, unsigned char **buf, size_t len);
+#else
+#define lws_server_socket_service(_b, _c) (0)
+#define lws_handshake_server(_a, _b, _c) (0)
+#endif
+
+#ifdef LWS_WITH_ACCESS_LOG
+LWS_EXTERN int
+lws_access_log(struct lws *wsi);
+LWS_EXTERN void
+lws_prepare_access_log_info(struct lws *wsi, char *uri_ptr, int meth);
+#else
+#define lws_access_log(_a)
+#endif
+
+LWS_EXTERN int
+lws_cgi_kill_terminated(struct lws_context_per_thread *pt);
+
+LWS_EXTERN void
+lws_cgi_remove_and_kill(struct lws *wsi);
+
+int
+lws_protocol_init(struct lws_context *context);
+
+int
+lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p);
+
+const struct lws_http_mount *
+lws_find_mount(struct lws *wsi, const char *uri_ptr, int uri_len);
+
+/*
+ * custom allocator
+ */
+LWS_EXTERN void *
+lws_realloc(void *ptr, size_t size, const char *reason);
+
+LWS_EXTERN void * LWS_WARN_UNUSED_RESULT
+lws_zalloc(size_t size, const char *reason);
+
+#ifdef LWS_PLAT_OPTEE
+void *lws_malloc(size_t size, const char *reason);
+void lws_free(void *p);
+#define lws_free_set_NULL(P) do { lws_free(P); (P) = NULL; } while(0)
+#else
+#define lws_malloc(S, R) lws_realloc(NULL, S, R)
+#define lws_free(P) lws_realloc(P, 0, "lws_free")
+#define lws_free_set_NULL(P) do { lws_realloc(P, 0, "free"); (P) = NULL; } while(0)
+#endif
+
+char *
+lws_strdup(const char *s);
+
+int
+lws_plat_pipe_create(struct lws *wsi);
+int
+lws_plat_pipe_signal(struct lws *wsi);
+void
+lws_plat_pipe_close(struct lws *wsi);
+int
+lws_create_event_pipes(struct lws_context *context);
+
+int lws_open(const char *__file, int __oflag, ...);
+void lws_plat_apply_FD_CLOEXEC(int n);
+
+const struct lws_plat_file_ops *
+lws_vfs_select_fops(const struct lws_plat_file_ops *fops, const char *vfs_path,
+ const char **vpath);
+
+/* lws_plat_ */
+LWS_EXTERN void
+lws_plat_delete_socket_from_fds(struct lws_context *context,
+ struct lws *wsi, int m);
+LWS_EXTERN void
+lws_plat_insert_socket_into_fds(struct lws_context *context,
+ struct lws *wsi);
+LWS_EXTERN void
+lws_plat_service_periodic(struct lws_context *context);
+
+LWS_EXTERN int
+lws_plat_change_pollfd(struct lws_context *context, struct lws *wsi,
+ struct lws_pollfd *pfd);
+LWS_EXTERN void
+lws_add_wsi_to_draining_ext_list(struct lws *wsi);
+LWS_EXTERN void
+lws_remove_wsi_from_draining_ext_list(struct lws *wsi);
+LWS_EXTERN int
+lws_plat_context_early_init(void);
+LWS_EXTERN void
+lws_plat_context_early_destroy(struct lws_context *context);
+LWS_EXTERN void
+lws_plat_context_late_destroy(struct lws_context *context);
+LWS_EXTERN int
+lws_poll_listen_fd(struct lws_pollfd *fd);
+LWS_EXTERN int
+lws_plat_service(struct lws_context *context, int timeout_ms);
+LWS_EXTERN LWS_VISIBLE int
+_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi);
+LWS_EXTERN int
+lws_plat_init(struct lws_context *context,
+ const struct lws_context_creation_info *info);
+LWS_EXTERN void
+lws_plat_drop_app_privileges(const struct lws_context_creation_info *info);
+LWS_EXTERN unsigned long long
+time_in_microseconds(void);
+LWS_EXTERN const char * LWS_WARN_UNUSED_RESULT
+lws_plat_inet_ntop(int af, const void *src, char *dst, int cnt);
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_plat_inet_pton(int af, const char *src, void *dst);
+
+LWS_EXTERN int LWS_WARN_UNUSED_RESULT
+lws_check_utf8(unsigned char *state, unsigned char *buf, size_t len);
+LWS_EXTERN int alloc_file(struct lws_context *context, const char *filename, uint8_t **buf,
+ lws_filepos_t *amount);
+
+
+LWS_EXTERN void
+lws_same_vh_protocol_remove(struct lws *wsi);
+LWS_EXTERN void
+lws_same_vh_protocol_insert(struct lws *wsi, int n);
+
+LWS_EXTERN int
+lws_broadcast(struct lws_context *context, int reason, void *in, size_t len);
+
+#if defined(LWS_WITH_STATS)
+ void
+ lws_stats_atomic_bump(struct lws_context * context,
+ struct lws_context_per_thread *pt, int index, uint64_t bump);
+ void
+ lws_stats_atomic_max(struct lws_context * context,
+ struct lws_context_per_thread *pt, int index, uint64_t val);
+#else
+ static LWS_INLINE uint64_t lws_stats_atomic_bump(struct lws_context * context,
+ struct lws_context_per_thread *pt, int index, uint64_t bump) {
+ (void)context; (void)pt; (void)index; (void)bump; return 0; }
+ static LWS_INLINE uint64_t lws_stats_atomic_max(struct lws_context * context,
+ struct lws_context_per_thread *pt, int index, uint64_t val) {
+ (void)context; (void)pt; (void)index; (void)val; return 0; }
+#endif
+
+/* socks */
+void socks_generate_msg(struct lws *wsi, enum socks_msg_type type,
+ ssize_t *msg_len);
+
+#if defined(LWS_WITH_PEER_LIMITS)
+void
+lws_peer_track_wsi_close(struct lws_context *context, struct lws_peer *peer);
+int
+lws_peer_confirm_ah_attach_ok(struct lws_context *context, struct lws_peer *peer);
+void
+lws_peer_track_ah_detach(struct lws_context *context, struct lws_peer *peer);
+void
+lws_peer_cull_peer_wait_list(struct lws_context *context);
+struct lws_peer *
+lws_get_or_create_peer(struct lws_vhost *vhost, lws_sockfd_type sockfd);
+void
+lws_peer_add_wsi(struct lws_context *context, struct lws_peer *peer,
+ struct lws *wsi);
+void
+lws_peer_dump_from_wsi(struct lws *wsi);
+#endif
+
+#ifdef LWS_WITH_HTTP_PROXY
+hubbub_error
+html_parser_cb(const hubbub_token *token, void *pw);
+#endif
+
+
+void
+__lws_remove_from_timeout_list(struct lws *wsi);
+
+lws_usec_t
+__lws_hrtimer_service(struct lws_context_per_thread *pt);
+
+void
+__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs);
+int
+__lws_change_pollfd(struct lws *wsi, int _and, int _or);
+
+
+int
+lws_callback_as_writeable(struct lws *wsi);
+int
+lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
+ struct lws_tokens *ebuf);
+int
+lws_buflist_aware_consume(struct lws *wsi, struct lws_tokens *ebuf, int used,
+ int buffered);
+
+
+char *
+lws_generate_client_ws_handshake(struct lws *wsi, char *p);
+int
+lws_client_ws_upgrade(struct lws *wsi, const char **cce);
+int
+lws_create_client_ws_object(struct lws_client_connect_info *i, struct lws *wsi);
+int
+lws_alpn_comma_to_openssl(const char *comma, uint8_t *os, int len);
+int
+lws_role_call_alpn_negotiated(struct lws *wsi, const char *alpn);
+int
+lws_tls_server_conn_alpn(struct lws *wsi);
+
+int
+lws_ws_client_rx_sm_block(struct lws *wsi, unsigned char **buf, size_t len);
+void
+lws_destroy_event_pipe(struct lws *wsi);
+void
+lws_context_destroy2(struct lws_context *context);
+
+#ifdef __cplusplus
+};
+#endif
diff --git a/thirdparty/libwebsockets/core/service.c b/thirdparty/libwebsockets/core/service.c
new file mode 100644
index 0000000000..6523058814
--- /dev/null
+++ b/thirdparty/libwebsockets/core/service.c
@@ -0,0 +1,987 @@
+/*
+ * libwebsockets - small server side websockets and web server implementation
+ *
+ * Copyright (C) 2010-2018 Andy Green <andy@warmcat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation:
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include "core/private.h"
+
+int
+lws_callback_as_writeable(struct lws *wsi)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ int n, m;
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB, 1);
+#if defined(LWS_WITH_STATS)
+ if (wsi->active_writable_req_us) {
+ uint64_t ul = time_in_microseconds() -
+ wsi->active_writable_req_us;
+
+ lws_stats_atomic_bump(wsi->context, pt,
+ LWSSTATS_MS_WRITABLE_DELAY, ul);
+ lws_stats_atomic_max(wsi->context, pt,
+ LWSSTATS_MS_WORST_WRITABLE_DELAY, ul);
+ wsi->active_writable_req_us = 0;
+ }
+#endif
+
+ n = wsi->role_ops->writeable_cb[lwsi_role_server(wsi)];
+
+ m = user_callback_handle_rxflow(wsi->protocol->callback,
+ wsi, (enum lws_callback_reasons) n,
+ wsi->user_space, NULL, 0);
+
+ return m;
+}
+
+LWS_VISIBLE int
+lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
+{
+ volatile struct lws *vwsi = (volatile struct lws *)wsi;
+ int n;
+
+ //lwsl_notice("%s: %p\n", __func__, wsi);
+
+ vwsi->leave_pollout_active = 0;
+ vwsi->handling_pollout = 1;
+ /*
+ * if another thread wants POLLOUT on us, from here on while
+ * handling_pollout is set, he will only set leave_pollout_active.
+ * If we are going to disable POLLOUT, we will check that first.
+ */
+ wsi->could_have_pending = 0; /* clear back-to-back write detection */
+
+ /*
+ * user callback is lowest priority to get these notifications
+ * actually, since other pending things cannot be disordered
+ *
+ * Priority 1: pending truncated sends are incomplete ws fragments
+ * If anything else sent first the protocol would be
+ * corrupted.
+ */
+
+ if (wsi->trunc_len) {
+ //lwsl_notice("%s: completing partial\n", __func__);
+ if (lws_issue_raw(wsi, wsi->trunc_alloc + wsi->trunc_offset,
+ wsi->trunc_len) < 0) {
+ lwsl_info("%s signalling to close\n", __func__);
+ goto bail_die;
+ }
+ /* leave POLLOUT active either way */
+ goto bail_ok;
+ } else
+ if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
+ wsi->socket_is_permanently_unusable = 1;
+ goto bail_die; /* retry closing now */
+ }
+
+#ifdef LWS_WITH_CGI
+ /*
+ * A cgi master's wire protocol remains h1 or h2. He is just getting
+ * his data from his child cgis.
+ */
+ if (wsi->http.cgi) {
+ /* also one shot */
+ if (pollfd)
+ if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
+ lwsl_info("failed at set pollfd\n");
+ return 1;
+ }
+ goto user_service_go_again;
+ }
+#endif
+
+ /* if we got here, we should have wire protocol ops set on the wsi */
+ assert(wsi->role_ops);
+
+ if (!wsi->role_ops->handle_POLLOUT)
+ goto bail_ok;
+
+ switch ((wsi->role_ops->handle_POLLOUT)(wsi)) {
+ case LWS_HP_RET_BAIL_OK:
+ goto bail_ok;
+ case LWS_HP_RET_BAIL_DIE:
+ goto bail_die;
+ case LWS_HP_RET_USER_SERVICE:
+ break;
+ default:
+ assert(0);
+ }
+
+ /* one shot */
+
+ if (wsi->parent_carries_io) {
+ vwsi->handling_pollout = 0;
+ vwsi->leave_pollout_active = 0;
+
+ return lws_callback_as_writeable(wsi);
+ }
+
+ if (pollfd) {
+ int eff = vwsi->leave_pollout_active;
+
+ if (!eff) {
+ if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
+ lwsl_info("failed at set pollfd\n");
+ goto bail_die;
+ }
+ }
+
+ vwsi->handling_pollout = 0;
+
+ /* cannot get leave_pollout_active set after the above */
+ if (!eff && wsi->leave_pollout_active) {
+ /*
+ * got set inbetween sampling eff and clearing
+ * handling_pollout, force POLLOUT on
+ */
+ lwsl_debug("leave_pollout_active\n");
+ if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) {
+ lwsl_info("failed at set pollfd\n");
+ goto bail_die;
+ }
+ }
+
+ vwsi->leave_pollout_active = 0;
+ }
+
+ if (lwsi_role_client(wsi) &&
+ !wsi->hdr_parsing_completed &&
+ lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS &&
+ lwsi_state(wsi) != LRS_ISSUE_HTTP_BODY
+ )
+ goto bail_ok;
+
+
+#ifdef LWS_WITH_CGI
+user_service_go_again:
+#endif
+
+ if (wsi->role_ops->perform_user_POLLOUT) {
+ if (wsi->role_ops->perform_user_POLLOUT(wsi) == -1)
+ goto bail_die;
+ else
+ goto bail_ok;
+ }
+
+ lwsl_debug("%s: %p: non mux: wsistate 0x%x, ops %s\n", __func__, wsi,
+ wsi->wsistate, wsi->role_ops->name);
+
+ vwsi = (volatile struct lws *)wsi;
+ vwsi->leave_pollout_active = 0;
+
+ n = lws_callback_as_writeable(wsi);
+ vwsi->handling_pollout = 0;
+
+ if (vwsi->leave_pollout_active)
+ lws_change_pollfd(wsi, 0, LWS_POLLOUT);
+
+ return n;
+
+ /*
+ * since these don't disable the POLLOUT, they are always doing the
+ * right thing for leave_pollout_active whether it was set or not.
+ */
+
+bail_ok:
+ vwsi->handling_pollout = 0;
+ vwsi->leave_pollout_active = 0;
+
+ return 0;
+
+bail_die:
+ vwsi->handling_pollout = 0;
+ vwsi->leave_pollout_active = 0;
+
+ return -1;
+}
+
+static int
+__lws_service_timeout_check(struct lws *wsi, time_t sec)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ int n = 0;
+
+ (void)n;
+
+ /*
+ * if we went beyond the allowed time, kill the
+ * connection
+ */
+ if (wsi->dll_timeout.prev &&
+ lws_compare_time_t(wsi->context, sec, wsi->pending_timeout_set) >
+ wsi->pending_timeout_limit) {
+
+ if (wsi->desc.sockfd != LWS_SOCK_INVALID &&
+ wsi->position_in_fds_table >= 0)
+ n = pt->fds[wsi->position_in_fds_table].events;
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_TIMEOUTS, 1);
+
+ /* no need to log normal idle keepalive timeout */
+ if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ lwsl_info("wsi %p: TIMEDOUT WAITING on %d "
+ "(did hdr %d, ah %p, wl %d, pfd "
+ "events %d) %llu vs %llu\n",
+ (void *)wsi, wsi->pending_timeout,
+ wsi->hdr_parsing_completed, wsi->http.ah,
+ pt->http.ah_wait_list_length, n,
+ (unsigned long long)sec,
+ (unsigned long long)wsi->pending_timeout_limit);
+#if defined(LWS_WITH_CGI)
+ if (wsi->http.cgi)
+ lwsl_notice("CGI timeout: %s\n", wsi->http.cgi->summary);
+#endif
+#else
+ lwsl_info("wsi %p: TIMEDOUT WAITING on %d ", (void *)wsi,
+ wsi->pending_timeout);
+#endif
+
+ /*
+ * Since he failed a timeout, he already had a chance to do
+ * something and was unable to... that includes situations like
+ * half closed connections. So process this "failed timeout"
+ * close as a violent death and don't try to do protocol
+ * cleanup like flush partials.
+ */
+ wsi->socket_is_permanently_unusable = 1;
+ if (lwsi_state(wsi) == LRS_WAITING_SSL && wsi->protocol)
+ wsi->protocol->callback(wsi,
+ LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
+ wsi->user_space,
+ (void *)"Timed out waiting SSL", 21);
+
+ __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
+
+ return 1;
+ }
+
+ return 0;
+}
+
+int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ uint8_t *buffered;
+ size_t blen;
+ int ret = 0, m;
+
+ /* his RX is flowcontrolled, don't send remaining now */
+ blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered);
+ if (blen) {
+ if (buf >= buffered && buf + len <= buffered + blen) {
+ /* rxflow while we were spilling prev rxflow */
+ lwsl_info("%s: staying in rxflow buf\n", __func__);
+
+ return 1;
+ }
+ ret = 1;
+ }
+
+ /* a new rxflow, buffer it and warn caller */
+
+ m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n);
+
+ if (m < 0)
+ return -1;
+ if (m) {
+ lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi);
+ lws_dll_lws_add_front(&wsi->dll_buflist, &pt->dll_head_buflist);
+ }
+
+ return ret;
+}
+
+/* this is used by the platform service code to stop us waiting for network
+ * activity in poll() when we have something that already needs service
+ */
+
+LWS_VISIBLE LWS_EXTERN int
+lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+
+ /* Figure out if we really want to wait in poll()
+ * We only need to wait if really nothing already to do and we have
+ * to wait for something from network
+ */
+#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
+ /* 1) if we know we are draining rx ext, do not wait in poll */
+ if (pt->ws.rx_draining_ext_list)
+ return 0;
+#endif
+
+ /* 2) if we know we have non-network pending data, do not wait in poll */
+
+ if (pt->context->tls_ops &&
+ pt->context->tls_ops->fake_POLLIN_for_buffered)
+ if (pt->context->tls_ops->fake_POLLIN_for_buffered(pt))
+ return 0;
+
+ /* 3) If there is any wsi with rxflow buffered and in a state to process
+ * it, we should not wait in poll
+ */
+
+ lws_start_foreach_dll(struct lws_dll_lws *, d, pt->dll_head_buflist.next) {
+ struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
+
+ if (lwsi_state(wsi) != LRS_DEFERRING_ACTION)
+ return 0;
+
+ } lws_end_foreach_dll(d);
+
+ return timeout_ms;
+}
+
+/*
+ * POLLIN said there is something... we must read it, and either use it; or
+ * if other material already in the buflist append it and return the buflist
+ * head material.
+ */
+int
+lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
+ struct lws_tokens *ebuf)
+{
+ int n, prior = (int)lws_buflist_next_segment_len(&wsi->buflist, NULL);
+
+ ebuf->token = (char *)pt->serv_buf;
+ ebuf->len = lws_ssl_capable_read(wsi, pt->serv_buf,
+ wsi->context->pt_serv_buf_size);
+
+ if (ebuf->len == LWS_SSL_CAPABLE_MORE_SERVICE && prior)
+ goto get_from_buflist;
+
+ if (ebuf->len <= 0)
+ return 0;
+
+ /* nothing in buflist already? Then just use what we read */
+
+ if (!prior)
+ return 0;
+
+ /* stash what we read */
+
+ n = lws_buflist_append_segment(&wsi->buflist, (uint8_t *)ebuf->token,
+ ebuf->len);
+ if (n < 0)
+ return -1;
+ if (n) {
+ lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi);
+ lws_dll_lws_add_front(&wsi->dll_buflist, &pt->dll_head_buflist);
+ }
+
+ /* get the first buflist guy in line */
+
+get_from_buflist:
+
+ ebuf->len = (int)lws_buflist_next_segment_len(&wsi->buflist,
+ (uint8_t **)&ebuf->token);
+
+ return 1; /* came from buflist */
+}
+
+int
+lws_buflist_aware_consume(struct lws *wsi, struct lws_tokens *ebuf, int used,
+ int buffered)
+{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ int m;
+
+ /* it's in the buflist; we didn't use any */
+
+ if (!used && buffered)
+ return 0;
+
+ if (used && buffered) {
+ m = lws_buflist_use_segment(&wsi->buflist, used);
+ lwsl_info("%s: draining rxflow: used %d, next %d\n",
+ __func__, used, m);
+ if (m)
+ return 0;
+
+ lwsl_info("%s: removed %p from dll_buflist\n", __func__, wsi);
+ lws_dll_lws_remove(&wsi->dll_buflist);
+
+ return 0;
+ }
+
+ /* any remainder goes on the buflist */
+
+ if (used != ebuf->len) {
+ m = lws_buflist_append_segment(&wsi->buflist,
+ (uint8_t *)ebuf->token + used,
+ ebuf->len - used);
+ if (m < 0)
+ return 1; /* OOM */
+ if (m) {
+ lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi);
+ lws_dll_lws_add_front(&wsi->dll_buflist, &pt->dll_head_buflist);
+ }
+ }
+
+ return 0;
+}
+
+void
+lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
+{
+ struct lws_pollfd pfd;
+
+ if (!pt->dll_head_buflist.next)
+ return;
+
+ /*
+ * service all guys with pending rxflow that reached a state they can
+ * accept the pending data
+ */
+
+ lws_pt_lock(pt, __func__);
+
+ lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
+ pt->dll_head_buflist.next) {
+ struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
+
+ pfd.events = LWS_POLLIN;
+ pfd.revents = LWS_POLLIN;
+ pfd.fd = -1;
+
+ lwsl_debug("%s: rxflow processing: %p 0x%x\n", __func__, wsi,
+ wsi->wsistate);
+
+ if (!lws_is_flowcontrolled(wsi) &&
+ lwsi_state(wsi) != LRS_DEFERRING_ACTION &&
+ (wsi->role_ops->handle_POLLIN)(pt, wsi, &pfd) ==
+ LWS_HPI_RET_PLEASE_CLOSE_ME)
+ lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
+ "close_and_handled");
+
+ } lws_end_foreach_dll_safe(d, d1);
+
+ lws_pt_unlock(pt);
+}
+
+/*
+ * guys that need POLLIN service again without waiting for network action
+ * can force POLLIN here if not flowcontrolled, so they will get service.
+ *
+ * Return nonzero if anybody got their POLLIN faked
+ */
+int
+lws_service_flag_pending(struct lws_context *context, int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+
+#if defined(LWS_WITH_TLS)
+ struct lws *wsi, *wsi_next;
+#endif
+ int forced = 0;
+
+ lws_pt_lock(pt, __func__);
+
+ /*
+ * 1) If there is any wsi with a buflist and in a state to process
+ * it, we should not wait in poll
+ */
+
+ lws_start_foreach_dll(struct lws_dll_lws *, d, pt->dll_head_buflist.next) {
+ struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
+
+ if (lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
+ forced = 1;
+ break;
+ }
+ } lws_end_foreach_dll(d);
+
+#if defined(LWS_ROLE_WS)
+ forced |= role_ops_ws.service_flag_pending(context, tsi);
+#endif
+
+#if defined(LWS_WITH_TLS)
+ /*
+ * 2) For all guys with buffered SSL read data already saved up, if they
+ * are not flowcontrolled, fake their POLLIN status so they'll get
+ * service to use up the buffered incoming data, even though their
+ * network socket may have nothing
+ */
+ wsi = pt->tls.pending_read_list;
+ while (wsi) {
+ wsi_next = wsi->tls.pending_read_list_next;
+ pt->fds[wsi->position_in_fds_table].revents |=
+ pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
+ if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
+ forced = 1;
+ /*
+ * he's going to get serviced now, take him off the
+ * list of guys with buffered SSL. If he still has some
+ * at the end of the service, he'll get put back on the
+ * list then.
+ */
+ __lws_ssl_remove_wsi_from_buffered_list(wsi);
+ }
+
+ wsi = wsi_next;
+ }
+#endif
+
+ lws_pt_unlock(pt);
+
+ return forced;
+}
+
+static int
+lws_service_periodic_checks(struct lws_context *context,
+ struct lws_pollfd *pollfd, int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+ lws_sockfd_type our_fd = 0, tmp_fd;
+ struct lws *wsi;
+ int timed_out = 0;
+ time_t now;
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ struct allocated_headers *ah;
+ int m;
+#endif
+
+ if (!context->protocol_init_done)
+ if (lws_protocol_init(context))
+ return -1;
+
+ time(&now);
+
+ /*
+ * handle case that system time was uninitialized when lws started
+ * at boot, and got initialized a little later
+ */
+ if (context->time_up < 1464083026 && now > 1464083026)
+ context->time_up = now;
+
+ if (context->last_timeout_check_s &&
+ now - context->last_timeout_check_s > 100) {
+ /*
+ * There has been a discontiguity. Any stored time that is
+ * less than context->time_discontiguity should have context->
+ * time_fixup added to it.
+ *
+ * Some platforms with no RTC will experience this as a normal
+ * event when ntp sets their clock, but we can have started
+ * long before that with a 0-based unix time.
+ */
+
+ context->time_discontiguity = now;
+ context->time_fixup = now - context->last_timeout_check_s;
+
+ lwsl_notice("time discontiguity: at old time %llus, "
+ "new time %llus: +%llus\n",
+ (unsigned long long)context->last_timeout_check_s,
+ (unsigned long long)context->time_discontiguity,
+ (unsigned long long)context->time_fixup);
+
+ context->last_timeout_check_s = now - 1;
+ }
+
+ if (!lws_compare_time_t(context, context->last_timeout_check_s, now))
+ return 0;
+
+ context->last_timeout_check_s = now;
+
+#if defined(LWS_WITH_STATS)
+ if (!tsi && now - context->last_dump > 10) {
+ lws_stats_log_dump(context);
+ context->last_dump = now;
+ }
+#endif
+
+ lws_plat_service_periodic(context);
+ lws_check_deferred_free(context, 0);
+
+#if defined(LWS_WITH_PEER_LIMITS)
+ lws_peer_cull_peer_wait_list(context);
+#endif
+
+ /* retire unused deprecated context */
+#if !defined(LWS_PLAT_OPTEE) && !defined(LWS_WITH_ESP32)
+#if !defined(_WIN32)
+ if (context->deprecated && !context->count_wsi_allocated) {
+ lwsl_notice("%s: ending deprecated context\n", __func__);
+ kill(getpid(), SIGINT);
+ return 0;
+ }
+#endif
+#endif
+ /* global timeout check once per second */
+
+ if (pollfd)
+ our_fd = pollfd->fd;
+
+ /*
+ * Phase 1: check every wsi on the timeout check list
+ */
+
+ lws_pt_lock(pt, __func__);
+
+ lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
+ context->pt[tsi].dll_head_timeout.next) {
+ wsi = lws_container_of(d, struct lws, dll_timeout);
+ tmp_fd = wsi->desc.sockfd;
+ if (__lws_service_timeout_check(wsi, now)) {
+ /* he did time out... */
+ if (tmp_fd == our_fd)
+ /* it was the guy we came to service! */
+ timed_out = 1;
+ /* he's gone, no need to mark as handled */
+ }
+ } lws_end_foreach_dll_safe(d, d1);
+
+#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
+ /*
+ * Phase 2: double-check active ah timeouts independent of wsi
+ * timeout status
+ */
+
+ ah = pt->http.ah_list;
+ while (ah) {
+ int len;
+ char buf[256];
+ const unsigned char *c;
+
+ if (!ah->in_use || !ah->wsi || !ah->assigned ||
+ (ah->wsi->vhost &&
+ lws_compare_time_t(context, now, ah->assigned) <
+ ah->wsi->vhost->timeout_secs_ah_idle + 360)) {
+ ah = ah->next;
+ continue;
+ }
+
+ /*
+ * a single ah session somehow got held for
+ * an unreasonable amount of time.
+ *
+ * Dump info on the connection...
+ */
+ wsi = ah->wsi;
+ buf[0] = '\0';
+#if !defined(LWS_PLAT_OPTEE)
+ lws_get_peer_simple(wsi, buf, sizeof(buf));
+#else
+ buf[0] = '\0';
+#endif
+ lwsl_notice("ah excessive hold: wsi %p\n"
+ " peer address: %s\n"
+ " ah pos %u\n",
+ wsi, buf, ah->pos);
+ buf[0] = '\0';
+ m = 0;
+ do {
+ c = lws_token_to_string(m);
+ if (!c)
+ break;
+ if (!(*c))
+ break;
+
+ len = lws_hdr_total_length(wsi, m);
+ if (!len || len > (int)sizeof(buf) - 1) {
+ m++;
+ continue;
+ }
+
+ if (lws_hdr_copy(wsi, buf,
+ sizeof buf, m) > 0) {
+ buf[sizeof(buf) - 1] = '\0';
+
+ lwsl_notice(" %s = %s\n",
+ (const char *)c, buf);
+ }
+ m++;
+ } while (1);
+
+ /* explicitly detach the ah */
+ lws_header_table_detach(wsi, 0);
+
+ /* ... and then drop the connection */
+
+ m = 0;
+ if (wsi->desc.sockfd == our_fd) {
+ m = timed_out;
+
+ /* it was the guy we came to service! */
+ timed_out = 1;
+ }
+
+ if (!m) /* if he didn't already timeout */
+ __lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
+ "excessive ah");
+
+ ah = pt->http.ah_list;
+ }
+#endif
+ lws_pt_unlock(pt);
+
+#if 0
+ {
+ char s[300], *p = s;
+
+ for (n = 0; n < context->count_threads; n++)
+ p += sprintf(p, " %7lu (%5d), ",
+ context->pt[n].count_conns,
+ context->pt[n].fds_count);
+
+ lwsl_notice("load: %s\n", s);
+ }
+#endif
+ /*
+ * Phase 3: vhost / protocol timer callbacks
+ */
+
+ wsi = NULL;
+ lws_start_foreach_ll(struct lws_vhost *, v, context->vhost_list) {
+ struct lws_timed_vh_protocol *nx;
+ if (v->timed_vh_protocol_list) {
+ lws_start_foreach_ll(struct lws_timed_vh_protocol *,
+ q, v->timed_vh_protocol_list) {
+ if (now >= q->time) {
+ if (!wsi)
+ wsi = lws_zalloc(sizeof(*wsi), "cbwsi");
+ wsi->context = context;
+ wsi->vhost = v;
+ wsi->protocol = q->protocol;
+ lwsl_debug("timed cb: vh %s, protocol %s, reason %d\n", v->name, q->protocol->name, q->reason);
+ q->protocol->callback(wsi, q->reason, NULL, NULL, 0);
+ nx = q->next;
+ lws_timed_callback_remove(v, q);
+ q = nx;
+ continue; /* we pointed ourselves to the next from the now-deleted guy */
+ }
+ } lws_end_foreach_ll(q, next);
+ }
+ } lws_end_foreach_ll(v, vhost_next);
+ if (wsi)
+ lws_free(wsi);
+
+ /*
+ * Phase 4: check for unconfigured vhosts due to required
+ * interface missing before
+ */
+
+ lws_context_lock(context);
+ lws_start_foreach_llp(struct lws_vhost **, pv,
+ context->no_listener_vhost_list) {
+ struct lws_vhost *v = *pv;
+ lwsl_debug("deferred iface: checking if on vh %s\n", (*pv)->name);
+ if (_lws_vhost_init_server(NULL, *pv) == 0) {
+ /* became happy */
+ lwsl_notice("vh %s: became connected\n", v->name);
+ *pv = v->no_listener_vhost_list;
+ v->no_listener_vhost_list = NULL;
+ break;
+ }
+ } lws_end_foreach_llp(pv, no_listener_vhost_list);
+ lws_context_unlock(context);
+
+ /*
+ * Phase 5: role periodic checks
+ */
+#if defined(LWS_ROLE_WS)
+ role_ops_ws.periodic_checks(context, tsi, now);
+#endif
+#if defined(LWS_ROLE_CGI)
+ role_ops_cgi.periodic_checks(context, tsi, now);
+#endif
+
+ /*
+ * Phase 6: check the remaining cert lifetime daily
+ */
+
+ if (context->tls_ops &&
+ context->tls_ops->periodic_housekeeping)
+ context->tls_ops->periodic_housekeeping(context, now);
+
+ return timed_out;
+}
+
+LWS_VISIBLE int
+lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
+ int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+ struct lws *wsi;
+
+ if (!context || context->being_destroyed1)
+ return -1;
+
+ /* the socket we came to service timed out, nothing to do */
+ if (lws_service_periodic_checks(context, pollfd, tsi) || !pollfd)
+ return 0;
+
+ /* no, here to service a socket descriptor */
+ wsi = wsi_from_fd(context, pollfd->fd);
+ if (!wsi)
+ /* not lws connection ... leave revents alone and return */
+ return 0;
+
+ /*
+ * so that caller can tell we handled, past here we need to
+ * zero down pollfd->revents after handling
+ */
+
+ /* handle session socket closed */
+
+ if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
+ (pollfd->revents & LWS_POLLHUP)) {
+ wsi->socket_is_permanently_unusable = 1;
+ lwsl_debug("Session Socket %p (fd=%d) dead\n",
+ (void *)wsi, pollfd->fd);
+
+ goto close_and_handled;
+ }
+
+#ifdef _WIN32
+ if (pollfd->revents & LWS_POLLOUT)
+ wsi->sock_send_blocking = FALSE;
+#endif
+
+ if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
+ (pollfd->revents & LWS_POLLHUP)) {
+ lwsl_debug("pollhup\n");
+ wsi->socket_is_permanently_unusable = 1;
+ goto close_and_handled;
+ }
+
+#if defined(LWS_WITH_TLS)
+ if (lwsi_state(wsi) == LRS_SHUTDOWN &&
+ lws_is_ssl(wsi) && wsi->tls.ssl) {
+ switch (__lws_tls_shutdown(wsi)) {
+ case LWS_SSL_CAPABLE_DONE:
+ case LWS_SSL_CAPABLE_ERROR:
+ goto close_and_handled;
+
+ case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
+ case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
+ case LWS_SSL_CAPABLE_MORE_SERVICE:
+ goto handled;
+ }
+ }
+#endif
+ wsi->could_have_pending = 0; /* clear back-to-back write detection */
+
+ /* okay, what we came here to do... */
+
+ /* if we got here, we should have wire protocol ops set on the wsi */
+ assert(wsi->role_ops);
+
+ // lwsl_notice("%s: %s: wsistate 0x%x\n", __func__, wsi->role_ops->name,
+ // wsi->wsistate);
+
+ switch ((wsi->role_ops->handle_POLLIN)(pt, wsi, pollfd)) {
+ case LWS_HPI_RET_WSI_ALREADY_DIED:
+ return 1;
+ case LWS_HPI_RET_HANDLED:
+ break;
+ case LWS_HPI_RET_PLEASE_CLOSE_ME:
+close_and_handled:
+ lwsl_debug("%p: Close and handled\n", wsi);
+ lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
+ "close_and_handled");
+#if defined(_DEBUG) && defined(LWS_WITH_LIBUV)
+ /*
+ * confirm close has no problem being called again while
+ * it waits for libuv service to complete the first async
+ * close
+ */
+ if (context->event_loop_ops == &event_loop_ops_uv)
+ lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
+ "close_and_handled uv repeat test");
+#endif
+ /*
+ * pollfd may point to something else after the close
+ * due to pollfd swapping scheme on delete on some platforms
+ * we can't clear revents now because it'd be the wrong guy's
+ * revents
+ */
+ return 1;
+ default:
+ assert(0);
+ }
+#if defined(LWS_WITH_TLS)
+handled:
+#endif
+ pollfd->revents = 0;
+
+ lws_pt_lock(pt, __func__);
+ __lws_hrtimer_service(pt);
+ lws_pt_unlock(pt);
+
+ return 0;
+}
+
+LWS_VISIBLE int
+lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
+{
+ return lws_service_fd_tsi(context, pollfd, 0);
+}
+
+LWS_VISIBLE int
+lws_service(struct lws_context *context, int timeout_ms)
+{
+ struct lws_context_per_thread *pt = &context->pt[0];
+ int n;
+
+ if (!context)
+ return 1;
+
+ pt->inside_service = 1;
+
+ if (context->event_loop_ops->run_pt) {
+ /* we are configured for an event loop */
+ context->event_loop_ops->run_pt(context, 0);
+
+ pt->inside_service = 0;
+
+ return 1;
+ }
+ n = lws_plat_service(context, timeout_ms);
+
+ pt->inside_service = 0;
+
+ return n;
+}
+
+LWS_VISIBLE int
+lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+ int n;
+
+ pt->inside_service = 1;
+
+ if (context->event_loop_ops->run_pt) {
+ /* we are configured for an event loop */
+ context->event_loop_ops->run_pt(context, tsi);
+
+ pt->inside_service = 0;
+
+ return 1;
+ }
+
+ n = _lws_plat_service_tsi(context, timeout_ms, tsi);
+
+ pt->inside_service = 0;
+
+ return n;
+}