Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
improves Mojo::Server::Prefork to keep sending heartbeat messages whe…
…n stopping gracefully
  • Loading branch information
kraih committed Jun 28, 2014
1 parent 8c3985d commit b81aff9
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 29 deletions.
4 changes: 3 additions & 1 deletion Changes
@@ -1,5 +1,7 @@

5.10 2014-06-27
5.10 2014-06-28
- Improved Mojo::Server::Prefork to keep sending heartbeat messages when
stopping gracefully.
- Fixed small bug where Mojo::Server::Daemon was too eager to reconfigure
Mojo::IOLoop.

Expand Down
3 changes: 1 addition & 2 deletions lib/Mojo/Server/Hypnotoad.pm
Expand Up @@ -255,8 +255,7 @@ Maximum number of connections a worker is allowed to accept before stopping
gracefully, defaults to the value of L<Mojo::Server::Prefork/"accepts">.
Setting the value to C<0> will allow workers to accept new connections
indefinitely. Note that up to half of this value can be subtracted randomly to
improve load balancing, and that worker processes will stop sending heartbeat
messages once the limit has been reached.
improve load balancing.
=head2 backlog
Expand Down
50 changes: 26 additions & 24 deletions lib/Mojo/Server/Prefork.pm
Expand Up @@ -105,10 +105,13 @@ sub _heartbeat {
return unless $poll->handles(POLLIN | POLLPRI);
return unless $self->{reader}->sysread(my $chunk, 4194304);

# Update heartbeats
# Update heartbeats (and stop gracefully if necessary)
my $time = steady_time;
$self->{pool}{$1} and $self->emit(heartbeat => $1)->{pool}{$1}{time} = $time
while $chunk =~ /(\d+)\n/g;
while ($chunk =~ /(\d+):(\d)\n/g) {
next unless my $w = $self->{pool}{$1};
$self->emit(heartbeat => $1) and $w->{time} = $time;
$w->{graceful} ||= $time if $2;
}
}

sub _manage {
Expand All @@ -123,34 +126,33 @@ sub _manage {
# Shutdown
elsif (!keys %{$self->{pool}}) { return delete $self->{running} }

# Manage workers
# Wait for heartbeats
$self->emit('wait')->_heartbeat;
my $log = $self->app->log;

my $interval = $self->heartbeat_interval;
my $ht = $self->heartbeat_timeout;
my $gt = $self->graceful_timeout;
my $time = steady_time;
my $log = $self->app->log;

for my $pid (keys %{$self->{pool}}) {
next unless my $w = $self->{pool}{$pid};

# No heartbeat (graceful stop)
my $interval = $self->heartbeat_interval;
my $timeout = $self->heartbeat_timeout;
my $time = steady_time;
if (!$w->{graceful} && ($w->{time} + $interval + $timeout <= $time)) {
$log->info("Worker $pid has no heartbeat, restarting.");
$w->{graceful} = $time;
}
$log->error("Worker $pid has no heartbeat, restarting.")
and $w->{graceful} = $time
if !$w->{graceful} && ($w->{time} + $interval + $ht <= $time);

# Graceful stop with timeout
$w->{graceful} ||= $time if $self->{graceful};
if ($w->{graceful}) {
$log->debug("Trying to stop worker $pid gracefully.");
kill 'QUIT', $pid;
$w->{force} = 1 if $w->{graceful} + $self->graceful_timeout <= $time;
}
my $graceful = $w->{graceful} ||= $self->{graceful} ? $time : undef;
$log->debug("Trying to stop worker $pid gracefully.")
and kill 'QUIT', $pid
if $graceful && !$w->{quit}++;
$w->{force} = 1 if $graceful && $graceful + $gt <= $time;

# Normal stop
if (($self->{finished} && !$self->{graceful}) || $w->{force}) {
$log->debug("Stopping worker $pid.");
kill 'KILL', $pid;
}
$log->debug("Stopping worker $pid.") and kill 'KILL', $pid
if $w->{force} || ($self->{finished} && !$graceful);
}
}

Expand Down Expand Up @@ -198,8 +200,8 @@ sub _spawn {
weaken $self;
$loop->recurring(
$self->heartbeat_interval => sub {
return unless shift->max_connections;
$self->{writer}->syswrite("$$\n") or exit 0;
my $graceful = shift->max_connections ? 0 : 1;
$self->{writer}->syswrite("$$:$graceful\n") or exit 0;
}
);

Expand Down
3 changes: 1 addition & 2 deletions lib/Mojolicious/Guides/FAQ.pod
Expand Up @@ -209,8 +209,7 @@ be closed immediately.
As long as they are accepting new connections, worker processes of all
built-in preforking web servers send heartbeat messages to the manager process
at regular intervals, to signal that they are still responsive. A blocking
operation such as an infinite loop in your application (or active connections
after a worker has stopped accepting new connections) can prevent this, and
operation such as an infinite loop in your application can prevent this, and
will force the affected worker to be restarted after a timeout. This timeout
defaults to C<20> seconds and can be extended with the attribute
L<Mojo::Server::Prefork/"heartbeat_interval"> if your application requires it.
Expand Down

0 comments on commit b81aff9

Please sign in to comment.