Skip to content

Commit 3939c78

Browse files
committed
Attempt to improve congestion control.
This fixes two problems that were not immediately evident from porting the NaCl code: * Last-sent times should only be updated when a real block is sent, not for messages that are just acknowledgments. * 1ms should not be arbitrarily added to every timeout to prevent spinning; it should only be added when there are no messages to be sent.
1 parent c2337dc commit 3939c78

File tree

2 files changed

+19
-10
lines changed

2 files changed

+19
-10
lines changed

libcurvecpr/lib/chicago.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -143,10 +143,9 @@ void curvecpr_chicago_new (struct curvecpr_chicago *chicago)
143143

144144
chicago->rtt_phase = 0;
145145

146-
/* FIXME: This should be 1 second? */
147-
chicago->wr_rate = 0;
146+
chicago->wr_rate = 1000000000;
148147

149-
chicago->ns_last_update = 0;
148+
chicago->ns_last_update = chicago->clock;
150149
chicago->ns_last_edge = 0;
151150
chicago->ns_last_doubling = 0;
152151
chicago->ns_last_panic = 0;

libcurvecpr/lib/messager.c

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -400,6 +400,9 @@ static int _send_block (struct curvecpr_messager *messager, struct curvecpr_bloc
400400
sent because it could fail for any other arbitrary reason as well and need
401401
to be reinvoked. */
402402
}
403+
404+
/* Update the last sent time for timeout calcuations. */
405+
messager->my_sent_clock = messager->chicago.clock;
403406
}
404407

405408
/* Remove all the acknowledged ranges from the pending queue. */
@@ -418,9 +421,6 @@ static int _send_block (struct curvecpr_messager *messager, struct curvecpr_bloc
418421
if (messager->their_eof && messager->their_contiguous_sent_bytes >= messager->their_total_bytes)
419422
messager->their_final = 1;
420423

421-
/* Update the last sent time for timeout calcuations. */
422-
messager->my_sent_clock = messager->chicago.clock;
423-
424424
/* Reset last received ID so we don't acknowledge an old message. */
425425
messager->their_sent_id = 0;
426426

@@ -490,13 +490,19 @@ long long curvecpr_messager_next_timeout (struct curvecpr_messager *messager)
490490

491491
long long at, timeout;
492492

493+
/* If we have anything to be written, we wouldn't spin at all, so don't include an
494+
adjustment in the timeout for it in that case. */
495+
int would_spin = 1;
496+
493497
curvecpr_chicago_refresh_clock(chicago);
494498

495499
at = chicago->clock + 60000000000LL; /* 60 seconds. */
496500

497501
if (!cf->ops.sendmarkq_is_full(messager)) {
498502
/* If we have pending data, we might write it. */
499503
if (!cf->ops.sendq_is_empty(messager)) {
504+
would_spin = 0;
505+
500506
/* Write at the write rate. */
501507
if (at > messager->my_sent_clock + chicago->wr_rate)
502508
at = messager->my_sent_clock + chicago->wr_rate;
@@ -507,16 +513,20 @@ long long curvecpr_messager_next_timeout (struct curvecpr_messager *messager)
507513
if (cf->ops.sendmarkq_head(messager, &block)) {
508514
/* No earliest block. */
509515
} else {
516+
would_spin = 0;
517+
510518
if (at > block->clock + chicago->rtt_timeout)
511519
at = block->clock + chicago->rtt_timeout;
512520
}
513521

514-
/* If the current time is after the next action time, the timeout is 0. However, we
515-
always have at least a 1 millisecond timeout to prevent the CPU from spinning. */
516522
if (chicago->clock > at)
517-
timeout = 1000000;
523+
timeout = 0;
518524
else
519-
timeout = at - chicago->clock + 1000000;
525+
timeout = at - chicago->clock;
526+
527+
/* Apply spinning adjustment if necessary. */
528+
if (would_spin)
529+
timeout += 1000000;
520530

521531
if (cf->ops.put_next_timeout)
522532
cf->ops.put_next_timeout(messager, timeout);

0 commit comments

Comments
 (0)