Skip to content
This repository has been archived by the owner on Apr 22, 2023. It is now read-only.

Commit

Permalink
deps: upgrade libuv to 5b9c451
Browse files Browse the repository at this point in the history
  • Loading branch information
bnoordhuis committed May 23, 2012
1 parent dff467d commit 1358bac
Show file tree
Hide file tree
Showing 12 changed files with 448 additions and 210 deletions.
23 changes: 16 additions & 7 deletions deps/uv/include/uv-private/uv-unix.h
Expand Up @@ -70,6 +70,16 @@ typedef struct {
char* errmsg;
} uv_lib_t;

struct uv__io_s;
struct uv_loop_s;

typedef struct uv__io_s uv__io_t;
typedef void (*uv__io_cb)(struct uv_loop_s* loop, uv__io_t* handle, int events);

struct uv__io_s {
ev_io io_watcher;
};

#define UV_REQ_TYPE_PRIVATE /* empty */

#if __linux__
Expand All @@ -78,7 +88,7 @@ typedef struct {
struct uv__inotify_watchers { \
struct uv_fs_event_s* rbh_root; \
} inotify_watchers; \
ev_io inotify_read_watcher; \
uv__io_t inotify_read_watcher; \
int inotify_fd;
#elif defined(PORT_SOURCE_FILE)
# define UV_LOOP_PRIVATE_PLATFORM_FIELDS \
Expand Down Expand Up @@ -142,8 +152,8 @@ typedef struct {
#define UV_STREAM_PRIVATE_FIELDS \
uv_connect_t *connect_req; \
uv_shutdown_t *shutdown_req; \
ev_io read_watcher; \
ev_io write_watcher; \
uv__io_t read_watcher; \
uv__io_t write_watcher; \
ngx_queue_t write_queue; \
ngx_queue_t write_completed_queue; \
int delayed_error; \
Expand All @@ -160,8 +170,8 @@ typedef struct {
#define UV_UDP_PRIVATE_FIELDS \
uv_alloc_cb alloc_cb; \
uv_udp_recv_cb recv_cb; \
ev_io read_watcher; \
ev_io write_watcher; \
uv__io_t read_watcher; \
uv__io_t write_watcher; \
ngx_queue_t write_queue; \
ngx_queue_t write_completed_queue; \

Expand All @@ -173,7 +183,7 @@ typedef struct {

/* UV_POLL */
#define UV_POLL_PRIVATE_FIELDS \
ev_io io_watcher;
uv__io_t io_watcher;


/* UV_PREPARE */ \
Expand Down Expand Up @@ -238,7 +248,6 @@ typedef struct {
struct uv_fs_event_s* rbe_parent; \
int rbe_color; \
} node; \
ev_io read_watcher; \
uv_fs_event_cb cb;

#elif defined(__APPLE__) \
Expand Down
225 changes: 225 additions & 0 deletions deps/uv/src/cares.c
@@ -0,0 +1,225 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/

#include "uv.h"
#include "tree.h"
#include "uv-common.h"

#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>


struct uv_ares_task_s {
UV_HANDLE_FIELDS
ares_socket_t sock;
uv_poll_t poll_watcher;
RB_ENTRY(uv_ares_task_s) node;
};


static int cmp_ares_tasks(const uv_ares_task_t* a, const uv_ares_task_t* b) {
if (a->sock < b->sock) return -1;
if (a->sock > b->sock) return 1;
return 0;
}


RB_GENERATE_STATIC(uv__ares_tasks, uv_ares_task_s, node, cmp_ares_tasks)


/* Add ares handle to list. */
static void uv_add_ares_handle(uv_loop_t* loop, uv_ares_task_t* handle) {
assert(loop == handle->loop);
RB_INSERT(uv__ares_tasks, &loop->ares_handles, handle);
}


/* Find matching ares handle in list. */
static uv_ares_task_t* uv_find_ares_handle(uv_loop_t* loop, ares_socket_t sock) {
uv_ares_task_t handle;
handle.sock = sock;
return RB_FIND(uv__ares_tasks, &loop->ares_handles, &handle);
}


/* Remove ares handle from list. */
static void uv_remove_ares_handle(uv_ares_task_t* handle) {
RB_REMOVE(uv__ares_tasks, &handle->loop->ares_handles, handle);
}


/* Returns 1 if the ares_handles list is empty, 0 otherwise. */
static int uv_ares_handles_empty(uv_loop_t* loop) {
return RB_EMPTY(&loop->ares_handles);
}


/* This is called once per second by loop->timer. It is used to constantly */
/* call back into c-ares for possibly processing timeouts. */
static void uv__ares_timeout(uv_timer_t* handle, int status) {
assert(!uv_ares_handles_empty(handle->loop));
ares_process_fd(handle->loop->channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
}


static void uv__ares_poll_cb(uv_poll_t* watcher, int status, int events) {
uv_loop_t* loop = watcher->loop;
uv_ares_task_t* task = container_of(watcher, uv_ares_task_t, poll_watcher);

/* Reset the idle timer */
uv_timer_again(&loop->ares_timer);

if (status < 0) {
/* An error happened. Just pretend that the socket is both readable and */
/* writable. */
ares_process_fd(loop->channel, task->sock, task->sock);
return;
}

/* Process DNS responses */
ares_process_fd(loop->channel,
events & UV_READABLE ? task->sock : ARES_SOCKET_BAD,
events & UV_WRITABLE ? task->sock : ARES_SOCKET_BAD);
}


static void uv__ares_poll_close_cb(uv_handle_t* watcher) {
uv_ares_task_t* task = container_of(watcher, uv_ares_task_t, poll_watcher);
free(task);
}


/* Allocates and returns a new uv_ares_task_t */
static uv_ares_task_t* uv__ares_task_create(uv_loop_t* loop, ares_socket_t sock) {
uv_ares_task_t* task = (uv_ares_task_t*) malloc(sizeof *task);

if (task == NULL) {
/* Out of memory. */
return NULL;
}

task->loop = loop;
task->sock = sock;

if (uv_poll_init_socket(loop, &task->poll_watcher, sock) < 0) {
/* This should never happen. */
free(task);
return NULL;
}

return task;
}


/* Callback from ares when socket operation is started */
static void uv__ares_sockstate_cb(void* data, ares_socket_t sock,
int read, int write) {
uv_loop_t* loop = (uv_loop_t*) data;
uv_ares_task_t* task;

task = uv_find_ares_handle(loop, sock);

if (read || write) {
if (!task) {
/* New socket */

/* If this is the first socket then start the timer. */
if (!uv_is_active((uv_handle_t*) &loop->ares_timer)) {
assert(uv_ares_handles_empty(loop));
uv_timer_start(&loop->ares_timer, uv__ares_timeout, 1000, 1000);
}

task = uv__ares_task_create(loop, sock);
if (task == NULL) {
/* This should never happen unless we're out of memory or something */
/* is seriously wrong. The socket won't be polled, but the the query */
/* will eventually time out. */
return;
}

uv_add_ares_handle(loop, task);
}

/* This should never fail. If it fails anyway, the query will eventually */
/* time out. */
uv_poll_start(&task->poll_watcher,
(read ? UV_READABLE : 0) | (write ? UV_WRITABLE : 0),
uv__ares_poll_cb);

} else {
/* read == 0 and write == 0 this is c-ares's way of notifying us that */
/* the socket is now closed. We must free the data associated with */
/* socket. */
assert(task &&
"When an ares socket is closed we should have a handle for it");

uv_remove_ares_handle(task);
uv_close((uv_handle_t*) &task->poll_watcher, uv__ares_poll_close_cb);

if (uv_ares_handles_empty(loop)) {
uv_timer_stop(&loop->ares_timer);
}
}
}


/* C-ares integration initialize and terminate */
int uv_ares_init_options(uv_loop_t* loop, ares_channel *channelptr,
struct ares_options *options, int optmask) {
int rc;

/* only allow single init at a time */
if (loop->channel != NULL) {
uv__set_artificial_error(loop, UV_EALREADY);
return -1;
}

/* set our callback as an option */
options->sock_state_cb = uv__ares_sockstate_cb;
options->sock_state_cb_data = loop;
optmask |= ARES_OPT_SOCK_STATE_CB;

/* We do the call to ares_init_option for caller. */
rc = ares_init_options(channelptr, options, optmask);

/* if success, save channel */
if (rc == ARES_SUCCESS) {
loop->channel = *channelptr;
}

/* Initialize the timeout timer. The timer won't be started until the */
/* first socket is opened. */
uv_timer_init(loop, &loop->ares_timer);

return rc;
}


void uv_ares_destroy(uv_loop_t* loop, ares_channel channel) {
/* Only allow destroy if did init. */
if (loop->channel) {
uv_timer_stop(&loop->ares_timer);
ares_destroy(channel);
loop->channel = NULL;
}
}
48 changes: 48 additions & 0 deletions deps/uv/src/unix/core.c
Expand Up @@ -592,3 +592,51 @@ uv_err_t uv_chdir(const char* dir) {
return uv__new_sys_error(errno);
}
}


static void uv__io_set_cb(uv__io_t* handle, uv__io_cb cb) {
union { void* data; uv__io_cb cb; } u;
u.cb = cb;
handle->io_watcher.data = u.data;
}


static void uv__io_rw(struct ev_loop* ev, ev_io* w, int events) {
union { void* data; uv__io_cb cb; } u;
uv_loop_t* loop = ev_userdata(ev);
uv__io_t* handle = container_of(w, uv__io_t, io_watcher);
u.data = handle->io_watcher.data;
u.cb(loop, handle, events & (EV_READ|EV_WRITE|EV_ERROR));
}


void uv__io_init(uv__io_t* handle, uv__io_cb cb, int fd, int events) {
ev_io_init(&handle->io_watcher, uv__io_rw, fd, events & (EV_READ|EV_WRITE));
uv__io_set_cb(handle, cb);
}


void uv__io_set(uv__io_t* handle, uv__io_cb cb, int fd, int events) {
ev_io_set(&handle->io_watcher, fd, events);
uv__io_set_cb(handle, cb);
}


void uv__io_start(uv_loop_t* loop, uv__io_t* handle) {
ev_io_start(loop->ev, &handle->io_watcher);
}


void uv__io_stop(uv_loop_t* loop, uv__io_t* handle) {
ev_io_stop(loop->ev, &handle->io_watcher);
}


void uv__io_feed(uv_loop_t* loop, uv__io_t* handle, int event) {
ev_feed_event(loop->ev, &handle->io_watcher, event);
}


int uv__io_active(uv__io_t* handle) {
return ev_is_active(&handle->io_watcher);
}
16 changes: 12 additions & 4 deletions deps/uv/src/unix/internal.h
Expand Up @@ -78,6 +78,10 @@
} \
while (0)

#define UV__IO_READ EV_READ
#define UV__IO_WRITE EV_WRITE
#define UV__IO_ERROR EV_ERROR

/* flags */
enum {
UV_CLOSING = 0x01, /* uv_close() called but not finished. */
Expand Down Expand Up @@ -127,6 +131,13 @@ int uv__socket(int domain, int type, int protocol);
int uv__dup(int fd);
int uv_async_stop(uv_async_t* handle);

void uv__io_init(uv__io_t* handle, uv__io_cb cb, int fd, int events);
void uv__io_set(uv__io_t* handle, uv__io_cb cb, int fd, int events);
void uv__io_start(uv_loop_t* loop, uv__io_t* handle);
void uv__io_stop(uv_loop_t* loop, uv__io_t* handle);
void uv__io_feed(uv_loop_t* loop, uv__io_t* handle, int event);
int uv__io_active(uv__io_t* handle);

/* loop */
int uv__loop_init(uv_loop_t* loop, int default_loop);
void uv__loop_delete(uv_loop_t* loop);
Expand All @@ -143,8 +154,7 @@ void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream,
uv_handle_type type);
int uv__stream_open(uv_stream_t*, int fd, int flags);
void uv__stream_destroy(uv_stream_t* stream);
void uv__stream_io(EV_P_ ev_io* watcher, int revents);
void uv__server_io(EV_P_ ev_io* watcher, int revents);
void uv__server_io(uv_loop_t* loop, uv__io_t* watcher, int events);
int uv__accept(int sockfd, struct sockaddr* saddr, socklen_t len);
int uv__connect(uv_connect_t* req, uv_stream_t* stream, struct sockaddr* addr,
socklen_t addrlen, uv_connect_cb cb);
Expand All @@ -156,11 +166,9 @@ int uv__tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay);

/* pipe */
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
void uv__pipe_accept(EV_P_ ev_io* watcher, int revents);

/* poll */
void uv__poll_close(uv_poll_t* handle);
int uv__poll_active(const uv_poll_t* handle);

/* various */
void uv__async_close(uv_async_t* handle);
Expand Down

0 comments on commit 1358bac

Please sign in to comment.