view src/os/unix/ngx_gcc_atomic_x86.h @ 4667:d05ab8793a69 stable-1.2

Merge of r4622, r4623: balancing changes. *) Upstream: smooth weighted round-robin balancing. For edge case weights like { 5, 1, 1 } we now produce { a, a, b, a, c, a, a } sequence instead of { c, b, a, a, a, a, a } produced previously. Algorithm is as follows: on each peer selection we increase current_weight of each eligible peer by its weight, select peer with greatest current_weight and reduce its current_weight by total number of weight points distributed among peers. In case of { 5, 1, 1 } weights this gives the following sequence of current_weight's: a b c 0 0 0 (initial state) 5 1 1 (a selected) -2 1 1 3 2 2 (a selected) -4 2 2 1 3 3 (b selected) 1 -4 3 6 -3 4 (a selected) -1 -3 4 4 -2 5 (c selected) 4 -2 -2 9 -1 -1 (a selected) 2 -1 -1 7 0 0 (a selected) 0 0 0 To preserve weight reduction in case of failures the effective_weight variable was introduced, which usually matches peer's weight, but is reduced temporarily on peer failures. This change also fixes loop with backup servers and proxy_next_upstream http_404 (ticket #47), and skipping alive upstreams in some cases if there are multiple dead ones (ticket #64). *) Upstream: fixed ip_hash rebalancing with the "down" flag. Due to weight being set to 0 for down peers, order of peers after sorting wasn't the same as without the "down" flag (with down peers at the end), resulting in client rebalancing for clients on other servers. The only rebalancing which should happen after adding "down" to a server is one for clients on the server. The problem was introduced in r1377 (which fixed endless loop by setting weight to 0 for down servers). The loop is no longer possible with new smooth algorithm, so preserving original weight is safe.
author Maxim Dounin <mdounin@mdounin.ru>
date Mon, 04 Jun 2012 11:21:58 +0000
parents d620f497c50f
children
line wrap: on
line source


/*
 * Copyright (C) Igor Sysoev
 * Copyright (C) Nginx, Inc.
 */


#if (NGX_SMP)
#define NGX_SMP_LOCK  "lock;"
#else
#define NGX_SMP_LOCK
#endif


/*
 * "cmpxchgl  r, [m]":
 *
 *     if (eax == [m]) {
 *         zf = 1;
 *         [m] = r;
 *     } else {
 *         zf = 0;
 *         eax = [m];
 *     }
 *
 *
 * The "r" means the general register.
 * The "=a" and "a" are the %eax register.
 * Although we can return result in any register, we use "a" because it is
 * used in cmpxchgl anyway.  The result is actually in %al but not in %eax,
 * however, as the code is inlined gcc can test %al as well as %eax,
 * and icc adds "movzbl %al, %eax" by itself.
 *
 * The "cc" means that flags were changed.
 */

static ngx_inline ngx_atomic_uint_t
ngx_atomic_cmp_set(ngx_atomic_t *lock, ngx_atomic_uint_t old,
    ngx_atomic_uint_t set)
{
    u_char  res;

    __asm__ volatile (

         NGX_SMP_LOCK
    "    cmpxchgl  %3, %1;   "
    "    sete      %0;       "

    : "=a" (res) : "m" (*lock), "a" (old), "r" (set) : "cc", "memory");

    return res;
}


/*
 * "xaddl  r, [m]":
 *
 *     temp = [m];
 *     [m] += r;
 *     r = temp;
 *
 *
 * The "+r" means the general register.
 * The "cc" means that flags were changed.
 */


#if !(( __GNUC__ == 2 && __GNUC_MINOR__ <= 7 ) || ( __INTEL_COMPILER >= 800 ))

/*
 * icc 8.1 and 9.0 compile broken code with -march=pentium4 option:
 * ngx_atomic_fetch_add() always return the input "add" value,
 * so we use the gcc 2.7 version.
 *
 * icc 8.1 and 9.0 with -march=pentiumpro option or icc 7.1 compile
 * correct code.
 */

static ngx_inline ngx_atomic_int_t
ngx_atomic_fetch_add(ngx_atomic_t *value, ngx_atomic_int_t add)
{
    __asm__ volatile (

         NGX_SMP_LOCK
    "    xaddl  %0, %1;   "

    : "+r" (add) : "m" (*value) : "cc", "memory");

    return add;
}


#else

/*
 * gcc 2.7 does not support "+r", so we have to use the fixed
 * %eax ("=a" and "a") and this adds two superfluous instructions in the end
 * of code, something like this: "mov %eax, %edx / mov %edx, %eax".
 */

static ngx_inline ngx_atomic_int_t
ngx_atomic_fetch_add(ngx_atomic_t *value, ngx_atomic_int_t add)
{
    ngx_atomic_uint_t  old;

    __asm__ volatile (

         NGX_SMP_LOCK
    "    xaddl  %2, %1;   "

    : "=a" (old) : "m" (*value), "a" (add) : "cc", "memory");

    return old;
}

#endif


/*
 * on x86 the write operations go in a program order, so we need only
 * to disable the gcc reorder optimizations
 */

#define ngx_memory_barrier()    __asm__ volatile ("" ::: "memory")

/* old "as" does not support "pause" opcode */
#define ngx_cpu_pause()         __asm__ (".byte 0xf3, 0x90")