• running application via tsocks

    • Install tsocks:
      sudo apt-get install tsocks

    • Setup socket by openssh
      sudo ssh -p 443 -D4567 my.server

    • Config tosks
      cat /etc/tsocks.conf

      local = 192.168.0.0/255.255.0.0
      local = 10.0.0.0/255.0.0.0
      local = 172.24.0.0/255.255.0.0
      server = 127.0.0.1
      sever_type=5
      sever_port=4567

    • Launch application under tsocks
      tsocks
      skype
  • Setup Socks Server

    Install dante in server (centos):

  • cd /tmp
    wget http://www.inet.no/dante/files/dante-1.3.2.tar.gz
    tar xvfz dante-*.tar.gz
    cd dante-*
    ./configure
    make
    make install

  • Config dante in server:
    cat /etc/sockd.conf
    #logoutput: syslog stdout /var/log/sockd.log
    #logoutput: stderr
    #logoutput: syslog
    logoutput: /var/log/sockd.log

    internal: 127.0.0.1 port = 1080
    external: venet0

    method: none
    clientmethod: none

    user.privileged: sockd
    user.unprivileged: nobody
    #user.libwrap: nobody

    #compatibility: sameport
    #compatibility: reuseaddr

    extension: bind
    timeout.negotiate: 30
    timeout.io: 86400

    #srchost: nounknown nomismatch
    #client pass {
    # from: 10.0.0.0/8 port 1-65535 to: 0.0.0.0/0
    # method: rfc931 # match all idented users that also are in passwordfile
    #}

    client pass {
    from: 127.0.0.1/8 to: 127.0.0.1/0
    log: connect error
    method: none
    }

    client block {
    from: 0.0.0.0/0 to: 0.0.0.0/0
    method: none
    }

    pass {
    from: 127.0.0.1/8 to: 0.0.0.0/0
    command: bind connect udpassociate
    log: connect error
    method: none
    }

    pass {
    from: 0.0.0.0/0 to: 127.0.0.1/8
    command: bindreply udpreply
    log: connect error
    method: none
    }

    block {
    from: 0.0.0.0/0 to: 0.0.0.0/0
    log: connect error
    }

  • Add init script:
    cat /etc/init.d/sockd
    #!/bin/sh
    #
    # chkconfig: – 91 35
    # description: Starts and stops the sockd(dante) daemon \
    # used to provide socks services.
    #

    PID=”/var/run/sockd/sockd.pid”
    CONFIG=”/etc/sockd.conf”

    # Source function library.
    if [ -f /etc/init.d/functions ] ; then
    . /etc/init.d/functions
    elif [ -f /etc/rc.d/init.d/functions ] ; then
    . /etc/rc.d/init.d/functions
    else
    exit 1
    fi

    # Avoid using root’s TMPDIR
    unset TMPDIR

    # Source networking configuration.
    . /etc/sysconfig/network

    # Check that networking is up.
    [ ${NETWORKING} = “no” ] && exit 1

    # Check that sockd.conf exists.
    [ -f ${CONFIG} ] || exit 6

    RETVAL=0
    OPTIONS=”-D -p ${PID} -f ${CONFIG}”

    start() {
    KIND=”SOCKD”
    echo -n $”Starting $KIND services: ”
    /usr/local/sbin/sockd ${OPTIONS}
    RETVAL=$?
    echo
    [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sockd || \
    RETVAL=1
    return $RETVAL
    }

    stop() {
    KIND=”SOCKD”
    echo -n $”Shutting down $KIND services: ”
    killproc sockd
    RETVAL=$?
    echo
    [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sockd
    return $RETVAL
    }

    restart() {
    stop
    start
    }

    rhstatus() {
    status -l sockd sockd
    return $?
    }

    # Allow status as non-root.
    if [ “$1” = status ]; then
    rhstatus
    exit $?
    fi

    case “$1″ in
    start)
    start
    ;;
    stop)
    stop
    ;;
    restart)
    restart
    ;;
    status)
    rhstatus
    ;;
    condrestart)
    [ -f /var/lock/subsys/sockd ] && restart || :
    ;;
    *)
    echo $”Usage: $0 {start|stop|restart|status|condrestart}”
    exit 2
    esac

    exit $?

  • enable auto launch during boot

    chkconfig –add sockd

  • dante
    http://www.inet.no/dante/

    SS5
    http://ss5.sourceforge.net/

    DeleGate
    http://www.delegate.org/delegate/

    Srelay
    http://socks-relay.sourceforge.net/

  • Managing EFI Boot Loaders for Linux

  • mirror debian with rsync

    sudo useradd -d /home/ftpsync -m ftpsync
    sudo passwd ftpsync

    sudo su ftpsync
    bash
    export http_proxy=…
    wget http://ftp-master.debian.org/ftpsync.tar.gz
    tar xvfz ftpsync.tar.gz

    mkdir ~/bin ~/etc ~/log
    cp distrib/bin/ftpsync ~/bin/
    cp distrib/etc/ftpsync.conf.sample ~/etc/ftpsync.conf
    cp distrib/etc/common ~/etc/

    cat /home/ftpsync/etc/ftpsync.conf

    MIRRORNAME=`hostname -f`

    RSYNC_DIR=”/home/ftpsync”

    TO=”${RSYNC_DIR}/mirrors/debian/”

    RSYNC_PATH=”debian”

    # amd64 i386 armel armhf
    RSYNC_HOST=debian.ustc.edu.cn
    #RSYNC_HOST=ftp.cn.debian.org
    #RSYNC_HOST=debian.bjtu.edu.cn

    # amd64 i386 armhf
    #RSYNC_HOST=www.anheng.com.cn

    # amd64 i386
    #RSYNC_HOST=mirrors.163.com
    #RSYNC_HOST=mirrors.sohu.com

    LOGDIR=”${RSYNC_DIR}/log”

    LOG=”${LOGDIR}/ftpsync.log”

    EXCLUDE=””

    ARCH_EXCLUDE=”\
    alpha \
    hppa \
    hurd-i386 \
    ia64 \
    kfreebsd-amd64 \
    kfreebsd-i386 \
    m68k \
    mipsel \
    mips \
    powerpc \
    s390 \
    s390x \
    sh \
    sparc”

    LOGROTATE=14

    UIPRETRIES=3

    RSYNC_PROXY=172.24.61.252:8080

    RSYNC=rsync

    #RSYNC_BW=””

  • DNS/firefox tunnel over ssh socket

    tunnel-dns-through-ssh-d-socks-proxy

    open firefox at URL of about:config

    change the item of network.proxy.socks_remote_dns to TRUE

  • Download android source code over SSH

    1) Install http proxy with socks support
    sudo apt-get install privoxy

    2) Install DNS cache server
    sudo apt-get install dnsmasq

    3) Install DNS socks proxy
    git clone https://github.com/jtRIPper/dns-tcp-socks-proxy.git
    cd dns-tcp-socks-proxy
    make

    4) Setup ssh connection to the external server, and listen on port 127.0.0.1:4567 (any one free)

    ssh -p proto -D4567 user@ssh_server

    5) Config privoxy listen on 127.0.0.1:8080, and forward to ssh socks at 127.0.0.1:4567:

    cat /etc/privoxy/config
    listen-address 127.0.0.1:8080
    forward-socks5 / 127.0.0.1:4567 .

    forward 192.168.*.*/ .
    forward 10.*.*.*/ .
    forward 127.*.*.*/ .

    6) Config dnsmasq with upstream server point to dns-tcp-socks-proxy first

    cat /etc/dnsmasq.conf

    resolv-file=/etc/resolv-dnsmasq.conf
    strict-order
    server=/221.24.172.in-addr.arpa/internal_dns_ip
    server=/61.24.172.in-addr.arpa/internal_dns_ip
    server=/62.24.172.in-addr.arpa/internal_dns_ip
    server=/63.24.172.in-addr.arpa/internal_dns_ip
    interface=eth0
    bind-interfaces
    log-queries
    log-dhcp

    With such configuration, dnsmasq will listen on eth0 interface and 127.0.0.1, and forward the query to the DNS resolver defined in /etc/resolv-dnsmasq.conf.
    the strict-order directory force dnsmasq to select the dns server according to the order of item defined in /etc/resolv-dnsmasq.conf

    cat /etc/resolv-dnsmasq.conf
    nameserver 127.0.0.2
    nameserver 172.24.63.211
    nameserver 172.24.63.212

    7) Config dns-tcp-socks-proxy listen to dnsmasq, and forward the query to the ssh socks, here 127.0.0.1:4567

    cat dns_proxy.conf
    socks_port = 4567
    socks_addr = 127.0.0.1

    listen_addr = 127.0.0.2
    listen_port = 53

    set_user = nobody
    set_group = nobody

    resolv_conf = /etc/dns_proxy/resolv.txt
    log_file = /dev/null

    8) Config the local dns resolver to 127.0.0.1
    cat /etc/resolv.conf
    nameserver 127.0.0.1

    9) Sync android source code over SSH
    repo sync

    10) Note, with such configuration, we have:
    http/https local proxy at 127.0.0.1:8080, forward to the external server via socks
    dns local cache proxy at 127.0.0.1:53, forward to the external server vis socks

  • FWD: Weak vs. Strong Memory Models

    Weak vs. Strong Memory Models

    Sep 30, 2012
    Weak vs. Strong Memory Models

    There are many types of memory reordering, and not all types of reordering occur equally often. It all depends on processor you’re targeting and/or the toolchain you’re using for development.

    A memory model tells you, for a given processor or toolchain, exactly what types of memory reordering to expect at runtime relative to a given source code listing. Keep in mind that the effects of memory reordering can only be observed when lock-free programming techniques are used.

    After studying memory models for a while – mostly by reading various online sources and verifying through experimentation – I’ve gone ahead and organized them into the following four categories. Below, each memory model makes all the guarantees of the ones to the left, plus some additional ones. I’ve drawn a clear line between weak memory models and strong ones, to capture the way most people appear to use these terms. Read on for my justification for doing so.

    Each physical device pictured above represents a hardware memory model. A hardware memory model tells you what kind of memory ordering to expect at runtime relative to an assembly (or machine) code listing.

    Every processor family has different habits when it comes to memory reordering, and those habits can only be observed in multicore or multiprocessor configurations. Given that multicore is now mainstream, it’s worth having some familiarity with them.

    There are software memory models as well. Technically, once you’ve written (and debugged) portable lock-free code in C11, C++11 or Java, only the software memory model is supposed to matter. Nonetheless, a general understanding of hardware memory models may come in handy. It can help you explain unexpected behavior while debugging, and — perhaps just as importantly — appreciate how incorrect code may function correctly on a specific processor and toolchain out of luck.
    Weak Memory Models

    In the weakest memory model, it’s possible to experience all four types of memory reordering I described using a source control analogy in a previous post. Any load or store operation can effectively be reordered with any other load or store operation, as long as it would never modify the behavior of a single, isolated thread. In reality, the reordering may be due to either compiler reordering of instructions, or memory reordering on the processor itself.

    When a processor has a weak hardware memory model, we tend to say it’s weakly-ordered or that it has weak ordering. We may also say it has a relaxed memory model. The venerable DEC Alpha is everybody’s favorite example of a weakly-ordered processor. There’s really no mainstream processor with weaker ordering.

    The C11 and C++11 programming languages expose a weak software memory model which was in many ways influenced by the Alpha. When using low-level atomic operations in these languages, it doesn’t matter if you’re actually targeting a strong processor family such as x86/64. As I demonstrated previously, you must still specify the correct memory ordering constraints, if only to prevent compiler reordering.
    Weak With Data Dependency Ordering

    Though the Alpha has become less relevant with time, we still have several modern CPU families which carry on in the same tradition of weak hardware ordering:

    ARM, which is currently found in hundreds of millions of smartphones and tablets, and is increasingly popular in multicore configurations.
    PowerPC, which the Xbox 360 in particular has already delivered to 70 million living rooms in a multicore configuration.
    Itanium, which Microsoft no longer supports in Windows, but which is still supported in Linux and found in HP servers.

    These families have memory models which are, in various ways, almost as weak as the Alpha’s, except for one common detail of particular interest to programmers: they maintain data dependency ordering. What does that mean? It means that if you write A->B in C/C++, you are always guaranteed to load a value of B which is at least as new as the value of A. The Alpha doesn’t guarantee that. I won’t dwell on data dependency ordering too much here, except to mention that the Linux RCU mechanism relies on it heavily.
    Strong Memory Models

    Let’s look at hardware memory models first. What, exactly, is the difference between a strong one and a weak one? There is actually a little disagreement over this question, but my feeling is that in 80% of the cases, most people mean the same thing. Therefore, I’d like to propose the following definition:

    A strong hardware memory model is one in which every machine instruction comes implicitly with acquire and release semantics. As a result, when one CPU core performs a sequence of writes, every other CPU core sees those values change in the same order that they were written.

    It’s not too hard to visualize. Just imagine a refinement of the source control analogy where all modifications are committed to shared memory in-order (no StoreStore reordering), pulled from shared memory in-order (no LoadLoad reordering), and instructions are always executed in-order (no LoadStore reordering). StoreLoad reordering, however, still remains possible.

    Under the above definition, the x86/64 family of processors is usually strongly-ordered. There are certain cases in which some of x86/64’s strong ordering guarantees are lost, but for the most part, as application programmers, we can ignore those cases. It’s true that a x86/64 processor can execute instructions out-of-order, but that’s a hardware implementation detail – what matters is that it still keeps its memory interactions in-order, so in a multicore environment, we can still consider it strongly-ordered. Historically, there has also been a little confusion due to evolving specs.

    Apparently SPARC processors, when running in TSO mode, are another example of a strong hardware ordering. TSO stands for “total store order”, which in a subtle way, is different from the definition I gave above. It means that there is always a single, global order of writes to shared memory from all cores. The x86/64 has this property too: See Volume 3, sections 8.2.3.6-8 of Intel’s x86/64 Architecture Specification for some examples. From what I can tell, the TSO property isn’t usually of direct interest to low-level lock-free programmers, but it is a step towards sequential consistency.
    Sequential Consistency

    In a sequentially consistent memory model, there is no memory reordering. It’s as if the entire program execution is reduced to a sequential interleaving of instructions from each thread. In particular, the result r1 = r2 = 0 from Memory Reordering Caught in the Act becomes impossible.

    These days, you won’t easily find a modern multicore device which guarantees sequential consistency at the hardware level. However, it seems at least one sequentially consistent, dual-processor machine existed back in 1989: The 386-based Compaq SystemPro. According to Intel’s docs, the 386 wasn’t advanced enough to perform any memory reordering at runtime.

    In any case, sequential consistency only really becomes interesting as a software memory model, when working in higher-level programming languages. In Java 5 and higher, you can declare shared variables as volatile. In C++11, you can use the default ordering constraint, memory_order_seq_cst, when performing operations on atomic library types. If you do those things, the toolchain will restrict compiler reordering and emit CPU-specific instructions which act as the appropriate memory barrier types. In this way, a sequentially consistent memory model can be “emulated” even on weakly-ordered multicore devices. If you read Herlihy & Shavit’s The Art of Multiprocessor Programming, be aware that most of their examples assume a sequentially consistent software memory model.
    Further Details

    There are many other subtle details filling out the spectrum of memory models, but in my experience, they haven’t proved quite as interesting when writing lock-free code at the application level. There are things like control dependencies, causal consistency, and different memory types. Still, most discussions come back the four main categories I’ve outlined here.

    If you really want to nitpick the fine details of processor memory models, and you enjoy eating formal logic for breakfast, you can check out the admirably detailed work done at the University of Cambridge. Paul McKinney has written an accessible overview of some of their work and its associated tools.

    « Acquire and Release Semantics This Is Why They Call It a Weakly-Ordered CPU »
    Recent Posts

    Acquire and Release Fences
    The Synchronizes-With Relation
    The Happens-Before Relation
    Atomic vs. Non-Atomic Operations
    The World’s Simplest Lock-Free Hash Table
    A Lock-Free… Linear Search?
    Introducing Mintomic: A Small, Portable Lock-Free API
    View Your Filesystem History Using Python
    This Hash Table Is Faster Than a Judy Array
    How to Generate a Sequence of Unique Random Integers

  • 内存一致性模型

    内存一致性模型 wiki

    http://preshing.com/20120913/acquire-and-release-semantics/

    Weak vs. Strong Memory Models

    那么为什么程序员需要关心SC呢

    锁的本质

    在现代多核多级缓存的处理器架构下,当一个cpu修改了其缓存的一个数据单元,主内存是如何感知,其他cpu又是如何感知的呢?

    目标,cpu内存模型最主要分两种内存模型

    强内存模型和弱内存模型,两者的主要区别在于是否允许cpu对主内存的修改即时可见,以及是否允许编译器内存访问指令重排序,目前大多cpu架构采 用弱模型,非即时可见,允许内存访问指令重排序。

    如下图:

    线程对共享变量的访问,并非直接访问共享内存,而是访问共享内存的一份copy,这份copy我们称它为工作内存(工作内存是概念上的区域,可以是寄存器上、cpu缓存、或者主内存,看具体的虚拟机实现及运行时优化)。

    l working memory

    线程访问实例变量、静态变量、数据对象元素(除局部变量和参数之外)都通过访问各自工作内存copy

    线程的工作内存之间是不可见

    线程之间变量的传递通过主内存

    既然在多线程下的内存模型类似于cpu->cpu缓存->主内存的三级模型,那么同样也存在上面内存共享的问题。换句话说同样存在以下问题:

    ü 一个线程在操作一个共享变量时,不受其它线程干扰(原子性)

    ü 一个线程改变了一个共享变量,其它线程如何及时看到(可见性)

    ü 如何保证多个线程有序地去操作共享变量(有序性)

  • Link: Add items to Settings.apk

    http://forum.xda-developers.com/showthread.php?t=2184207

  • setup git repository mirror

    ## Create empty repository on server

    cd /repo
    GIT_DIR=avxsynth.git git init
    cd avxsynth.git
    git –bare update-server-info
    cp hooks/post-update.sample hooks/post-update

    ## Clone origin repo on workstation
    git clone –mirror https://github.com/avxsynth/avxsynth.git

    ## Setup push URL to the new repo
    git remote set-url –push origin ssh://git.zhenglei.net/repo/avxsynth.git

    ##Push back to the new repo
    git fetch origin
    git push –mirror