2011年2月27日 星期日

Linux Modules(14.1)- Read Copy Update


RCU (Read-Copy Update)是kernel同步機制之一,允許多個reader在writer更新資料的同時讀取資料,reader可能讀到更新前或更新後的,但是資料內容是一致的(不是新的就是舊的,這是因為RCU利用指標的dereference和assign達成的),另外,RCU也能確保資料在read-side使用時不會將之free(下一篇介紹RCU的原理在提吧)。


這裡有一張圖用來描述RCU再經典不過了。首先,藍色的reader的開始就是rcu_read_lock(),結束就是rcu_read_unlock(),下面的removal、grace period和reclamation代表著writer的狀態,這邊只要保證讀到舊資料的reader(就是開頭落在removal的reader),都能在grace period結束之前,離開read-side就可以了,聰明的你一定可以看出在grace period開始之後的reader都是讀到新資料,所以RCU就不管他想用多久。
    RCU的三個階段
  1. removal:更新指標。
  2. grace period:等待所有持有舊資料的reader都離開RCU read-side。
  3. reclamation:回收舊資料。

RCU本身就是read-write lock的一種,所以我們介紹一下RCU的reader和writer的形式。
struct foo {
    int x;
};

static struct foo *foo = NULL;

// Reader的形式
static int reader(void)
{
    int ret;

    rcu_read_lock();
    ret = rcu_dereference(foo)->x;
    rcu_read_unlock();

    return ret;
}

// Writer的形式
static void writer(int x)
{
    struct foo *new_foo, *old_foo = foo;

    // 建立新的資料內容new_foo
    new_foo = kmalloc(sizeof(struct foo), GFP_KERNEL);

    // 複製原本的內容
    *new_foo = *old_foo;

    // 修改內容
    new_foo->x = x;

    // removal
    rcu_assign_pointer(foo, new_foo);

    // grace period
    synchronize_rcu();

    // reclamation: 
    kfree(old_foo);

}

synchronize_rcu()就是在等待所謂的grace period,等所有持舊資料的reader都離開RCU read-side才會往下執行kfree(old_foo)。




2011年2月26日 星期六

Linux Kernel(8.1)- Notifier機制剖析


Linux Kernel(8)- Notification可以學會運用notifier,而這一篇會概述如何實現,基本上所謂的publish-and-subscribe pattern都是註冊callback function到某個list上去,某事件發生時,再將整個list的callback function執行過一次。


include/lunux/notifier.h
#define BLOCKING_NOTIFIER_HEAD(name)                \
        struct blocking_notifier_head name =            \
        BLOCKING_NOTIFIER_INIT(name)

struct blocking_notifier_head {
    // 用於blocking機制時使用
    // 可以於kernel/notifier.c看到以下註解
    /*
     * Blocking notifier chain routines.  All access to the chain is
     * synchronized by an rwsem.
     */
    struct rw_semaphore rwsem;
    // callback function之linking-list的頭
    struct notifier_block __rcu *head;
};

// linking-list之node結構
struct notifier_block {
    // callback function
    int (*notifier_call)(struct notifier_block *, unsigned long, void *);
    struct notifier_block __rcu *next;
    // 用於註冊到list之優先順序, 數字越大 priority越高
    int priority;
};


kernel/notifier.c
/**
 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
 * @nh: Pointer to head of the blocking notifier chain
 * @n: New entry in notifier chain
 *
 * Adds a notifier to a blocking notifier chain.
 * Must be called in process context.
 * // 因為semaphore只能用在process context
 *
 * Currently always returns zero.
 */
int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
                struct notifier_block *n)
{
    int ret;

    /*
     * This code gets used during boot-up, when task switching is
     * not yet working and interrupts must remain disabled.  At
     * such times we must not call down_write().
     */
    if (unlikely(system_state == SYSTEM_BOOTING))
        return notifier_chain_register(&nh->head, n);

    // 使用writer semaphore保護, 確保kernel synchronization
    down_write(&nh->rwsem);

    // 真正掛callback function到list的function
    ret = notifier_chain_register(&nh->head, n);

    up_write(&nh->rwsem);
    return ret;
}


/*
 *  Notifier chain core routines.  The exported routines below
 *  are layered on top of these, with appropriate locking added.
 */

static int notifier_chain_register(struct notifier_block **nl,
                struct notifier_block *n)
{
    // nl指向list中的第一個node
    while ((*nl) != NULL) {
        // 比較list中的每一個node之priority,
        // 如果發現新的比較大, 就break準備插到這個(*nl)的前面
        if (n->priority > (*nl)->priority)
            break;
        nl = &((*nl)->next);
    }
    // 將(*nl)串到新的後面
    n->next = *nl;
    // 將(*nl)取代成n
    rcu_assign_pointer(*nl, n);
    return 0;
}

/**
 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
 * @nh: Pointer to head of the blocking notifier chain
 * @n: Entry to remove from notifier chain
 *
 * Removes a notifier from a blocking notifier chain.
 * Must be called from process context.
 *
 * Returns zero on success or %-ENOENT on failure.
 */
int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
                struct notifier_block *n)
{
    // 基本上這個function和blocking_notifier_chain_register()相同

    int ret;

    /*
     * This code gets used during boot-up, when task switching is
     * not yet working and interrupts must remain disabled.  At
     * such times we must not call down_write().
     */
    if (unlikely(system_state == SYSTEM_BOOTING))
        return notifier_chain_unregister(&nh->head, n);

    // 使用writer semaphore保護, 確保kernel synchronization
    down_write(&nh->rwsem);

    // 真正移除callback function
    ret = notifier_chain_unregister(&nh->head, n);

    up_write(&nh->rwsem);
    return ret;
}

static int notifier_chain_unregister(struct notifier_block **nl,
                struct notifier_block *n)
{
    while ((*nl) != NULL) {
        if ((*nl) == n) {
            // 找到n在list中的位置, 然後將之移除
            rcu_assign_pointer(*nl, n->next);
            return 0;
        }
        // 將nl往下一個移動
        nl = &((*nl)->next);
    }
    return -ENOENT;
}


int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
                unsigned long val, void *v)
{
    return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
}


/**
 *  __blocking_notifier_call_chain - Call functions in a blocking notifier chain
 *  @nh: Pointer to head of the blocking notifier chain
 *  @val: Value passed unmodified to notifier function
 *  @v: Pointer passed unmodified to notifier function
 *  @nr_to_call: See comment for notifier_call_chain.
 *  @nr_calls: See comment for notifier_call_chain.
 *
 *  Calls each function in a notifier chain in turn.  The functions
 *  run in a process context, so they are allowed to block.
 *
 *  If the return value of the notifier can be and'ed
 *  with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
 *  will return immediately, with the return value of
 *  the notifier function which halted execution.
 *  Otherwise the return value is the return value
 *  of the last notifier function called.
 */
int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
            unsigned long val, void *v, int nr_to_call, int *nr_calls)
{
    int ret = NOTIFY_DONE;

    /*
     * We check the head outside the lock, but if this access is
     * racy then it does not matter what the result of the test
     * is, we re-check the list after having taken the lock anyway:
     */
    if (rcu_dereference_raw(nh->head)) {
        down_read(&nh->rwsem);
        // 真正執行list中所有callback function的API
        ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
                                nr_calls);
        up_read(&nh->rwsem);
    }
    return ret;
}

/**
 *  notifier_call_chain - Informs the registered notifiers about an event.
 *  @nl:        Pointer to head of the blocking notifier chain
 *  @val:       Value passed unmodified to notifier function
 *  @v:     Pointer passed unmodified to notifier function
 *  @nr_to_call:    Number of notifier functions to be called. Don't care
 *                  value of this parameter is -1.
 *  @nr_calls:  Records the number of notifications sent. Don't care
 *              value of this field is NULL.
 *  @returns:   notifier_call_chain returns the value returned by the
 *              last notifier function called.
 */
static int __kprobes notifier_call_chain(struct notifier_block **nl,
            unsigned long val, void *v, int nr_to_call, int *nr_calls)
{
    int ret = NOTIFY_DONE;
    struct notifier_block *nb, *next_nb;

    nb = rcu_dereference_raw(*nl);

    // 由blocking_notifier_call_chain傳進來的nr_to_call為-1, 
    // 由於nr_to_call只會--, 所以nr_to_call就是always成立
    // 於是停止的條件只剩下nb為NULL
    while (nb && nr_to_call) {
        // ??這段的用意就不是很明瞭了??
        // 為啥不在後面在nb = nb->next?
        next_nb = rcu_dereference_raw(nb->next);

#ifdef CONFIG_DEBUG_NOTIFIERS
        if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
            WARN(1, "Invalid notifier called!");
            nb = next_nb;
            continue;
        }
#endif
        // 執行callback function
        ret = nb->notifier_call(nb, val, v);

        if (nr_calls)
            (*nr_calls)++;

        // 如果帶有 STOP的bit就停止執行後面的callback function
        if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
            break;
        nb = next_nb;
        nr_to_call--;
    }
    return ret;
}

這篇文章還不算完成,後面在補充啦~~


2011年2月13日 星期日

Netlink, NETLINK_FIREWALL


關於NETLINK的介紹請看Netlink introduction,這裡假設您已經了解NETLINK,並且準備使用NETLINK_FIREWALL這個netlink family,這個family必須載入ip_queue.ko這個module。而或者您已經直接將他編進kernel當中。

我們由kernel的觀點來看NETLINK_FIREWALL提供哪些功能,首先看到net/ipv4/netfilter/ip_queue.c
static int __init ip_queue_init(void)
{
    ...
    //註冊NETLINK_FIREWALL的handler,即ipq_rcv_skb
        ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
                                      ipq_rcv_skb, NULL, THIS_MODULE);
    ...
}

static void
ipq_rcv_skb(struct sk_buff *skb)
{
        mutex_lock(&ipqnl_mutex);
        __ipq_rcv_skb(skb);
        mutex_unlock(&ipqnl_mutex);
}

static inline void
__ipq_rcv_skb(struct sk_buff *skb)
{
    ...
    status = ipq_receive_peer(NLMSG_DATA(nlh), type,
                              nlmsglen - NLMSG_LENGTH(0));
    if (status < 0)
            RCV_SKB_FAIL(status);

    if (flags & NLM_F_ACK)
            netlink_ack(skb, nlh, 0);
}

// 這裡就是提供NETLINK_FIREWALL control功能的function了
// 包含了設定copy to user-space的packet型態,
// 以及設定packet的verdict(NF_DROP/NF_ACCEPT等)
static int
ipq_receive_peer(struct ipq_peer_msg *pmsg,
                 unsigned char type, unsigned int len)
{
        int status = 0;

        if (len < sizeof(*pmsg))
                return -EINVAL;

        switch (type) {
        case IPQM_MODE:
         // 設定copy到user-space的模式為何?IPQ_COPY_META或是IPQ_COPY_PACKET
                status = ipq_set_mode(pmsg->msg.mode.value,
                                      pmsg->msg.mode.range);
                break;

        case IPQM_VERDICT:
        // packet的verdict
                if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
                        status = -EINVAL;
                else
                        status = ipq_set_verdict(&pmsg->msg.verdict,
                                                 len - sizeof(*pmsg));
                        break;
        default:
                status = -EINVAL;
        }
        return status;
}
上述這段code就能大概了解NETLINK_FIREWALL在kernel的流程與提供的facility為何,透過IPQM_MODE設定copy to user-space的資料模式,當user-space收到資料後,判斷該資料是要DROP還是ACCEPT,決定後再透過IPQM_VERDICT告訴kernel,該封包是要DROP還是ACCEPT。

初步了解kernel提供的功能之後,下面就寫一個當接收到icmp echo封包,且seq為奇數的就DROP,其餘的就ACCEPT的範例。
#include <stdio.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#include <netinet/in.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4/ip_queue.h>

/**
 * 建立socket
 */
static int create_nl_socket(int proto)
{
    int sock;
    struct sockaddr_nl addr;

    if ((sock = socket(AF_NETLINK, SOCK_RAW, proto)) < 0) {
        fprintf(stderr, "open sock failed.(%s)\n", strerror(errno));
        return -1;
    }

    memset(&addr, 0, sizeof(addr));
    addr.nl_family = AF_NETLINK;
    addr.nl_pid = getpid();

    if (bind(sock, (struct sockaddr*)&addr, sizeof(addr)) < 0) {
        fprintf(stderr, "bind failed.(%s)\n", strerror(errno));
        goto bind_err;
    }

    return sock;

bind_err:
    close(sock);
    return -1;
}


/**
 * 設定IPQM_MODE
 */
static int ipq_set_mode(int sock, uint8_t mode, size_t range)
{
    unsigned char buf[1024];
    struct msghdr msg;
    struct sockaddr_nl dst = { .nl_family = AF_NETLINK };
    struct nlmsghdr *nlh;
    struct ipq_peer_msg *pmsg;
    struct iovec iov = {
        .iov_base = (void *) buf,
        .iov_len = sizeof(buf)
    };

    memset(buf, 0, sizeof(buf));
    msg = (struct msghdr) {
            .msg_name = (void *)&dst,
            .msg_namelen = sizeof(dst),
            .msg_iov = &iov,
            .msg_iovlen = 1,
    };

    nlh = (struct nlmsghdr*) buf;
    *nlh = (struct nlmsghdr) {
        .nlmsg_len = sizeof(buf),
        .nlmsg_flags = NLM_F_REQUEST,
        .nlmsg_type = IPQM_MODE,
        .nlmsg_pid = getpid(),
    };

    pmsg = (struct ipq_peer_msg*) NLMSG_DATA(nlh);
    *pmsg = (struct ipq_peer_msg) {
        .msg.mode.value = mode, // IPQM_META或是IPQM_PACKET
        .msg.mode.range = range, // 封包的大小
    };

    printf("%s(#%d):  nlmsglen:%d, NLMSG_LENGTH(0):%d\n",
            __func__, __LINE__, nlh->nlmsg_len, NLMSG_LENGTH(0));
    return sendmsg(sock, &msg, 0);
}

/**
 * 列印封包內容
 */
static void print_pkt(ipq_packet_msg_t *ipq_pkt)
{
    int i;
    printf("packet_id:0x%lx, mark:0x%lx\n,"
            "hook:%d, idev:%s, odev:%s\n,"
            "hw_proto:%d, hw_type:%d, hw_addrlen:%d\n,"
            "hw_addr:0x%02X%02X%02X%02X%02X%02X%02X%02X\n,"
            "data_len:%ld, payload:\n",
            ipq_pkt->packet_id, ipq_pkt->mark,
            ipq_pkt->hook, ipq_pkt->indev_name, ipq_pkt->outdev_name,
            ipq_pkt->hw_protocol, ipq_pkt->hw_type, ipq_pkt->hw_addrlen,
            ipq_pkt->hw_addr[0], ipq_pkt->hw_addr[1],
            ipq_pkt->hw_addr[2], ipq_pkt->hw_addr[3],
            ipq_pkt->hw_addr[4], ipq_pkt->hw_addr[5],
            ipq_pkt->hw_addr[6], ipq_pkt->hw_addr[7],
            ipq_pkt->data_len);
    for (i = 0; i < ipq_pkt->data_len; i++) {
        printf("%02X ", ipq_pkt->payload[i]);
        if (!((i+1) % 16)) printf("\n");
    }
}


/**
 * 根據封包內容給verdict
 */
static void 
get_verdict(ipq_packet_msg_t *ipq_pkt, int *verdict, unsigned long *id)
{
    struct iphdr *iph;
    struct icmphdr *icmph;

    *id = ipq_pkt->packet_id;
    if (ipq_pkt->data_len < sizeof(struct iphdr)) {
        *verdict = NF_DROP;
        return;
    }
    iph = (struct iphdr *) ipq_pkt->payload;
    if (iph->protocol == IPPROTO_ICMP) {
        icmph = (struct icmphdr *) (ipq_pkt->payload + iph->ihl * 4);
        printf("Type: %d, Id:0x%04x, seq:0x%04x\n",
                icmph->type, ntohs(icmph->un.echo.id),
           ntohs(icmph->un.echo.sequence));
        // 序號為奇數就將之DROP
        if (ntohs(icmph->un.echo.sequence) % 2) {
            *verdict = NF_DROP;
            return;
        }
    }
    // 其餘就是ACCEPT
    *verdict = NF_ACCEPT;
}

/**
 * 設定封包的verdict
 */
static int set_verdict(int sock, int verdict, unsigned long id)
{
    unsigned char buf[1024];
    struct msghdr msg;
    struct sockaddr_nl dst = { .nl_family = AF_NETLINK };
    struct nlmsghdr *nlh;
    struct ipq_peer_msg *pmsg;
    struct iovec iov = { .iov_base = (void *) buf, .iov_len = sizeof(buf) };

    memset(buf, 0, sizeof(buf));
    msg = (struct msghdr) {
            .msg_name = (void *)&dst,
            .msg_namelen = sizeof(dst),
            .msg_iov = &iov,
            .msg_iovlen = 1,
    };

    nlh = (struct nlmsghdr*) buf;
    *nlh = (struct nlmsghdr) {
        .nlmsg_len = sizeof(buf),
        .nlmsg_flags = NLM_F_REQUEST,
        .nlmsg_type = IPQM_VERDICT,
        .nlmsg_pid = getpid(),
    };

    pmsg = (struct ipq_peer_msg*) NLMSG_DATA(nlh);
    *pmsg = (struct ipq_peer_msg) {
        .msg.verdict.value = verdict, // NF_DROP或是NF_ACCEPT
        // packet_id詳細資料請看kernel的ipq_set_verdict()
        .msg.verdict.id = id,
    };

    char *p = "NONE";
    switch (verdict) {
        case NF_DROP:
            p = "DROP";
            break;
        case NF_ACCEPT:
            p = "ACCEPT";
            break;
    }
    printf("%s(#%d): %s packet %ld\n", __func__, __LINE__, p, id);
    return sendmsg(sock, &msg, 0);
}

/**
 * 處理接收到的封包
 */
static int ipq_recv_pkt(int sock, size_t len)
{
    unsigned char buf[NLMSG_SPACE(0) + len];
    struct msghdr msg;
    struct sockaddr_nl dst = { .nl_family = AF_NETLINK };
    struct nlmsghdr *nlh;
    struct iovec iov = {
        .iov_base = (void *) buf,
        .iov_len = len,
    };

    memset(buf, 0, sizeof(buf));
    msg = (struct msghdr) {
            .msg_name = (void *)&dst,
            .msg_namelen = sizeof(dst),
            .msg_iov = &iov,
            .msg_iovlen = 1,
    };

    len = recvmsg(sock, &msg, 0);
    for (nlh = (struct nlmsghdr *) buf; NLMSG_OK (nlh, len);
            nlh = NLMSG_NEXT (nlh, len)) {
        /* The end of multipart message. */
        if (nlh->nlmsg_type == NLMSG_DONE) {
            printf("NLMSG_DONE\n");
            return 0;
        }

        /* Do some error handling. */
        if (nlh->nlmsg_type == NLMSG_ERROR) {
            fprintf(stderr, "NLMSG_ERROR\n");
            return -1;
        }

        if (nlh->nlmsg_type == IPQM_PACKET) {
            int verdict;
            unsigned long id;

            print_pkt(NLMSG_DATA(nlh));
            get_verdict(NLMSG_DATA(nlh), &verdict, &id);
            set_verdict(sock, verdict, id);
        }
    }
    return 0;
}


int main(int argc, char *argv[])
{
    int sock, ret, cnt;

    sock = create_nl_socket(NETLINK_FIREWALL);
    if (sock < 0) {
        fprintf(stderr, "create_nl_socket failed\n");
        return -1;
    }

    ret = ipq_set_mode(sock, IPQ_COPY_PACKET, 2048);
    if (ret < 0) {
        fprintf(stderr, "ipq_set_mode failed\n");
    } else {
        printf("ipq_set_mode success\n");
    }

    for (cnt = 0; cnt < 10; cnt++) {
        ret = ipq_recv_pkt(sock, 2048);
    }
    close(sock);
    return 0;
}


透過iptable將經過OUTPUT chain的packet送到QUEUE去,這樣kernel才會將packet丟到NFTLINK_FIRWALL處理。


您可以看到kernel送出來的packet內容是從IP header開始。


您可以發現ping有一半的packet被DROP了。

Kernel version:2.6.37


2011年2月12日 星期六

安裝iptables到QEMU中


首先到netfilter.org中下載iptables。

接著configure並且make之後執行DESTDIR=/path/to/install make install將iptables安裝到特定目錄去(QEMU的root filesystem,我的路徑是~/initramfs)。

並且將相關的檔案(library)複製到root filesystem中。
brook@vista:~/initramfs$ echo "/usr/local/lib" > etc/ld.so.conf
brook@vista:~/initramfs$ cp /lib64/ld-linux-x86-64.so.2 lib64
brook@vista:~/initramfs$ cp /lib/libdl.so.2 lib
brook@vista:~/initramfs$ cp /lib/libm.so.6 lib
brook@vista:~/initramfs$ cp /lib/libc.so.6 lib
brook@vista:~/initramfs$ cp /sbin/ldconfig sbin
brook@vista:~/initramfs$ cp /sbin/ldconfig.real sbin
brook@vista:~/initramfs$ fakeroot
root@vista:~/initramfs# chown -R root.root .
root@vista:~/initramfs# find . |cpio -H newc -o > ../initrd


您可以發現share library路徑並沒有包含/usr/local/lib,所以要執行ldconfig


雖然順利執行iptables,但是kernel中的module並沒有load進來,所以把相關的ko複製到root filesystem中吧。

root@vista:~/initramfs# cp /usr/src/linux/net/ipv4/netfilter/iptable_filter.ko lib/modules/2.6.37/
root@vista:~/initramfs# cp /usr/src/linux/net/ipv4/netfilter/ip_tables.ko lib/modules/2.6.37/
root@vista:~/initramfs# cp /usr/src/linux/net/netfilter/x_tables.ko lib/modules/2.6.37/


終於順利的執行iptables了。

相關文章:
如何利用kvm/qemu練習linux module


熱門文章