/* * KTCPVS An implementation of the TCP Virtual Server daemon inside * kernel for the LINUX operating system. KTCPVS can be used * to build a moderately scalable and highly available server * based on a cluster of servers, with more flexibility. * * Version: $Id$ * * Authors: Wensong Zhang * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include #include #include #include #include #include #include #include /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "tcp_vs.h" /* * FIXME: Locking needs reconsideration in the near future!!!!!!!!!!! */ int sysctl_ktcpvs_unload = 0; static struct tcp_vs dummy_vs = { start: 0, stop: 0, name: "dummy_vs", serverport: 8080, maxSpareServers: 4, minSpareServers: 2, startservers: 2, maxClients: 1000, }; int tcp_vs_sysctl_register(struct tcp_vs *vs); int tcp_vs_sysctl_unregister(struct tcp_vs *vs); struct proc_dir_entry *proc_net_ktcpvs_vs_create(struct tcp_vs *vs); void proc_net_ktcpvs_vs_release(struct tcp_vs *vs); /* * Lookup destination by {addr,port} in the given service */ struct tcp_vs_dest * tcp_vs_lookup_dest(struct tcp_vs *vs, __u32 daddr, __u16 dport) { struct tcp_vs_dest *dest; struct list_head *l, *e; read_lock_bh(&__tcp_vs_lock); /* * Find the destination for the given virtual server */ l = &vs->destinations; for (e=l->next; e!=l; e=e->next) { dest = list_entry(e, struct tcp_vs_dest, n_list); if ((dest->addr == daddr) && (dest->port == dport)) { /* HIT */ read_unlock_bh(&__tcp_vs_lock); return dest; } } read_unlock_bh(&__tcp_vs_lock); return NULL; } /* * Add a destination into an TCP virtual server */ int tcp_vs_add_dest(struct tcp_vs *vs, __u32 daddr, __u16 dport, int weight) { struct tcp_vs_dest *dest; EnterFunction("tcp_vs_add_dest"); if (weight < 0) { TCP_VS_ERR("server weight less than zero\n"); return -ERANGE; } /* * Check if the dest already exists in the list */ dest = tcp_vs_lookup_dest(vs, daddr, dport); if (dest != NULL) { TCP_VS_DBG("tcp_vs_add_dest(): dest already exists\n"); return -EEXIST; } /* * Allocate and initialize the dest structure */ dest = kmalloc(sizeof(struct tcp_vs_dest), GFP_KERNEL); if (dest == NULL) { TCP_VS_ERR("kmalloc failed.\n"); return -EFAULT; } memset(dest, 0, sizeof(struct tcp_vs_dest)); dest->addr = daddr; dest->port = dport; dest->weight = weight; atomic_set(&dest->conns, 0); atomic_set(&dest->refcnt, 0); write_lock_bh(&__tcp_vs_lock); /* * Add the dest entry into the list */ list_add(&dest->n_list, &vs->destinations); atomic_inc(&dest->refcnt); write_unlock_bh(&__tcp_vs_lock); LeaveFunction("tcp_vs_add_dest"); return 0; } /* * Edit a destination in the given virtual server */ int tcp_vs_edit_dest(struct tcp_vs *vs, __u32 daddr, __u16 dport, int weight) { struct tcp_vs_dest *dest; EnterFunction("tcp_vs_edit_dest"); if (weight < 0) { TCP_VS_ERR("server weight less than zero\n"); return -ERANGE; } /* * Lookup the destination list */ dest = tcp_vs_lookup_dest(vs, daddr, dport); if (dest == NULL) { TCP_VS_DBG("destination not exist\n"); return -ENOENT; } write_lock_bh(&__tcp_vs_lock); dest->weight = weight; write_unlock_bh(&__tcp_vs_lock); LeaveFunction("tcp_vs_edit_dest"); return 0; } /* * Delete a destination from the given virtual server */ void __tcp_vs_del_dest(struct tcp_vs_dest *dest) { /* dest->flags &= ~TCP_VS_DEST_F_AVAILABLE; */ /* * Remove it from the d-linked destination list. */ list_del(&dest->n_list); /* * Decrease the refcnt of the dest, and free the dest * if nobody refers to it (refcnt=0). Otherwise, throw * the destination into the trash. */ if (atomic_dec_and_test(&dest->refcnt)) kfree_s(dest, sizeof(*dest)); } int tcp_vs_del_dest(struct tcp_vs *vs, __u32 daddr, __u16 dport) { struct tcp_vs_dest *dest; EnterFunction("tcp_vs_del_dest"); /* * Lookup the destination list */ dest = tcp_vs_lookup_dest(vs, daddr, dport); if (dest == NULL) { TCP_VS_DBG("tcp_vs_del_dest(): destination not found!\n"); return -ENOENT; } write_lock_bh(&__tcp_vs_lock); /* * Remove dest from the destination list */ __tcp_vs_del_dest(dest); /* * Called the update_service function of its scheduler */ vs->scheduler->update_vs(vs); write_unlock_bh(&__tcp_vs_lock); LeaveFunction("tcp_vs_del_dest"); return 0; } int parse_command(char *buf, char *cmd, u_int32_t *addr, u_int16_t *port, int *weight) { char *p; char word[30]; char *q=word; int i=0; /* parse command */ if (buf == NULL) return i; p = tcp_vs_getword(buf, q, 30); TCP_VS_DBG("cmd %s\n", q); strcpy(cmd, q); i++; /* parse addr */ if (p == NULL) return i; p = tcp_vs_getword(p, q, 30); TCP_VS_DBG("addr %s\n", q); *addr = htonl(simple_strtoul(q, &q, 16)); i++; /* parse port */ if (p == NULL) return i; p = tcp_vs_getword(p, q, 30); TCP_VS_DBG("port %s\n", q); *port = htons(simple_strtoul(q, &q, 16)); i++; /* parse weight */ if (p == NULL) return i; p = tcp_vs_getword(p, q, 30); TCP_VS_DBG("weight %s\n", q); *weight = simple_strtoul(q, &q, 10); i++; return i; } int tcp_vs_write_destinations(struct tcp_vs *vs, char *str) { int n; char cmd[30]; __u32 addr; __u16 port; int weight; int ret = -EFAULT; n=parse_command(str, cmd, &addr, &port, &weight); TCP_VS_DBG("The command: %s %08X %X %d %d\n", cmd, addr, port, weight, n); if (n!=3 && n!=4) { TCP_VS_ERR("write command is not correct\n"); return -EFAULT; } if (!strcmp(cmd, "add")) ret = tcp_vs_add_dest(vs, addr, port, weight); else if (!strcmp(cmd, "set")) ret = tcp_vs_edit_dest(vs, addr, port, weight); else if (!strcmp(cmd, "del")) ret = tcp_vs_del_dest(vs, addr, port); return ret; } /* this is taken from proc_dostring */ int proc_do_tcpvs_string(char *str, int strl, int write, struct file *filp, void *buffer, size_t *lenp) { int len; char *p, c; if (!str || !strl || !*lenp || (filp->f_pos && !write)) { *lenp = 0; return 0; } if (write) { len = 0; p = buffer; while (len < *lenp) { if(get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; len++; } if (len >= strl) len = strl-1; if(copy_from_user(str, buffer, len)) return -EFAULT; ((char *) str)[len] = 0; filp->f_pos += *lenp; } else { len = strlen(str); if (len > strl) len = strl; if (len > *lenp) len = *lenp; if (len) if(copy_to_user(buffer, str, len)) return -EFAULT; if (len < *lenp) { if(put_user('\n', ((char *) buffer) + len)) return -EFAULT; len++; } *lenp = len; filp->f_pos += len; } return 0; } static int proc_do_tcpvs_scheduler (ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp) { struct tcp_vs *vs; struct tcp_vs_scheduler *sched; char str[256]; if (!table->data || !table->maxlen || !*lenp || (filp->f_pos && !write)) { *lenp = 0; return 0; } vs = (struct tcp_vs *) table->data; sched = vs->scheduler; if (write) { if (proc_do_tcpvs_string(str, table->maxlen, write, filp, buffer, lenp)) return -EFAULT; /* update the scheduler of virtual server */ if (sched) { if (!strcmp(sched->name, str)) /* the same scheduler as the original one */ return 0; else /* unbind the original scheduler */ tcp_vs_unbind_scheduler(vs); } /* if str is "none", set the scheduler NULL */ if (!strcmp(str, "none")) { vs->scheduler = NULL; return 0; } /* bind scheduler here */ sched = tcp_vs_get_scheduler(str); if (sched == NULL) { TCP_VS_INFO("Scheduler module tcp_vs_%s.o not found\n", str); return -EFAULT; } tcp_vs_bind_scheduler(vs, sched); tcp_vs_put_scheduler(sched); } else { if (!sched) { if (proc_do_tcpvs_string("none", 4, write, filp, buffer, lenp)) return -EFAULT; } else { if (proc_do_tcpvs_string(sched->name, strlen(sched->name), write, filp, buffer, lenp)) return -EFAULT; } } return 0; } static int proc_do_tcpvs_destinations (ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp) { struct tcp_vs *vs; char str[1024]; if (!table->data || !table->maxlen || !*lenp || (filp->f_pos && !write)) { *lenp = 0; return 0; } vs = (struct tcp_vs *) table->data; if (write) { if (proc_do_tcpvs_string(str, table->maxlen, write, filp, buffer, lenp)) return -EFAULT; /* process destination write commands here */ return tcp_vs_write_destinations(vs, str); } else { struct list_head *l, *e; struct tcp_vs_dest *dest; int i=0; /* it might be some problem in calling it this way */ l = &vs->destinations; for (e=l->next; e!=l; e=e->next) { dest = list_entry(e, struct tcp_vs_dest, n_list); i += sprintf(str+i, "%08X %X %d\n", dest->addr, dest->port, dest->weight); } if (proc_do_tcpvs_string(str, strlen(str), write, filp, buffer, lenp)) return -EFAULT; } return 0; } struct tcp_vs *tcp_vs_lookup_byname(const char *name) { struct list_head *e; struct tcp_vs *vs; list_for_each (e, &tcp_vs_list) { vs = list_entry(e, struct tcp_vs, list); if (!strcmp(name, vs->name)) /* HIT */ return vs; } return NULL; } int tcp_vs_add_virtualserver(struct tcp_vs_ctl *ctl) { struct tcp_vs *vs; struct tcp_vs_scheduler *sched; EnterFunction("tcp_vs_add_virtualserver"); /* lookup scheduler here */ sched = tcp_vs_get_scheduler(ctl->sched_name); if (sched == NULL) { TCP_VS_INFO("Scheduler module tcp_vs_%s.o not found\n", ctl->sched_name); return -EFAULT; } vs = kmalloc(sizeof(*vs), GFP_KERNEL); if (!vs) { TCP_VS_ERR("no available memory\n"); return -EFAULT; } memcpy(vs, &dummy_vs, sizeof(*vs)); vs->name = strdup(ctl->name); vs->serverport = ctl->serverport; vs->timeout = ctl->timeout; INIT_LIST_HEAD(&vs->destinations); INIT_LIST_HEAD(&vs->sched_rule); atomic_inc(&tcp_vs_index); vs->index = atomic_read(&tcp_vs_index); if (tcp_vs_sysctl_register(vs)) { atomic_dec(&tcp_vs_index); kfree(vs); return -EFAULT; } proc_net_ktcpvs_vs_create(vs); list_add(&vs->list, &tcp_vs_list); tcp_vs_bind_scheduler(vs, sched); tcp_vs_put_scheduler(sched); LeaveFunction("tcp_vs_add_virtualserver"); return 0; } int tcp_vs_edit_virtualserver(struct tcp_vs *vs, struct tcp_vs_ctl *ctl) { struct tcp_vs_scheduler *sched; EnterFunction("tcp_vs_edit_virtualserver"); /* lookup scheduler here */ if (strcmp(vs->scheduler->name, ctl->sched_name)) { sched = tcp_vs_get_scheduler(ctl->sched_name); if (sched == NULL) { TCP_VS_INFO("Scheduler module tcp_vs_%s.o not found\n", ctl->sched_name); return -EFAULT; } tcp_vs_unbind_scheduler(vs); tcp_vs_bind_scheduler(vs, sched); tcp_vs_put_scheduler(sched); } /* more attribute should be assigned here!!!!!! */ vs->serverport = ctl->serverport; vs->timeout = ctl->timeout; LeaveFunction("tcp_vs_edit_virtualserver"); return 0; } int tcp_vs_del_virtualserver(struct tcp_vs *vs) { EnterFunction("tcp_vs_del_virtualserver"); if (atomic_read(&vs->running)) { TCP_VS_ERR("The VS is running, you'd better stop it first" "before deleting it.\n"); return -1; } tcp_vs_unbind_scheduler(vs); tcp_vs_sysctl_unregister(vs); proc_net_ktcpvs_vs_release(vs); list_del(&vs->list); kfree(vs); LeaveFunction("tcp_vs_del_virtualserver"); return 0; } int tcp_vs_flush() { struct list_head *l; struct tcp_vs *vs; EnterFunction("tcp_vs_flush"); for (l=&tcp_vs_list; l->next!=l; ) { vs = list_entry(l->next, struct tcp_vs, list); if (tcp_vs_del_virtualserver(vs) == -1) return -1; } LeaveFunction("tcp_vs_flush"); return 0; } int tcp_vs_control(struct tcp_vs_ctl *ctl, char *ctl_data) { int ret = -EINVAL; struct tcp_vs *vs; EnterFunction("tcp_vs_control"); /* * Avoid the non-terminated string here */ ctl->name[KTCPVS_VSNAME_MAXLEN-1] = 0; ctl->sched_name[KTCPVS_SCHEDNAME_MAXLEN-1] = 0; TCP_VS_DBG("name=%s, cmd=%d data=%s\n", ctl->name, ctl->cmd, ctl_data); /* * Flush all the TCP virtual servers... */ if (ctl->cmd == TCP_VS_CMD_FLUSH) return tcp_vs_flush(); vs = tcp_vs_lookup_byname(ctl->name); if (!vs) { if (ctl->cmd==TCP_VS_CMD_ADD) return tcp_vs_add_virtualserver(ctl); else return -ESRCH; } switch (ctl->cmd) { case TCP_VS_CMD_START: vs->start = 1; ret = 0; break; case TCP_VS_CMD_STOP: vs->stop = 0; ret = 0; break; case TCP_VS_CMD_SET: ret = tcp_vs_edit_virtualserver(vs, ctl); break; case TCP_VS_CMD_DEL: ret = tcp_vs_del_virtualserver(vs); break; case TCP_VS_CMD_ADD_DEST: ret = tcp_vs_add_dest(vs, ctl->daddr, ctl->dport, ctl->weight); break; case TCP_VS_CMD_SET_DEST: ret =tcp_vs_edit_dest(vs, ctl->daddr, ctl->dport, ctl->weight); break; case TCP_VS_CMD_DEL_DEST: ret = tcp_vs_del_dest(vs, ctl->daddr, ctl->dport); break; default: /* scheduler may have control, so call it here */ if (ctl_data && vs->scheduler) vs->scheduler->control(vs, ctl, ctl_data); } LeaveFunction("tcp_vs_control"); return ret; } /* * Just thought that I better move /proc/sys/net/ktcpvs/config to * /proc/net/ktcpvs/config. It seems to make more sense!!!!!!!!! * Anyway, it is not difficult to move, because the tcp_vs_ctl * is already isolated from its caller. */ static int proc_do_tcpvs_config (ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp) { struct tcp_vs_ctl vs_ctl; if (write) { if(*lenp < sizeof(vs_ctl)) return -EINVAL; if(copy_from_user(&vs_ctl, buffer, sizeof(vs_ctl))) return -EFAULT; return tcp_vs_control(&vs_ctl, NULL); } else { /* list all tcp_vs information here */ return 0; } } static struct ctl_table_header *ktcpvs_table_header; static ctl_table ktcpvs_table[] = { {NET_KTCPVS_CONFIG, "config", NULL, 4096, 0644, NULL, &proc_do_tcpvs_config}, {NET_KTCPVS_UNLOAD, "unload", &sysctl_ktcpvs_unload, sizeof(int), 0644, NULL, &proc_dointvec}, {0} }; static ctl_table ktcpvs_dir_table[] = { {NET_KTCPVS, "ktcpvs", NULL, 0, 0555, ktcpvs_table}, {0} }; static ctl_table ktcpvs_root_table[] = { {CTL_NET, "net", NULL, 0, 0555, ktcpvs_dir_table}, {0} }; static struct tcp_vs_sysctl_table dummy_vs_sysctl = { NULL, {{NET_KTCPVS_VS_START, "start", &dummy_vs.start, sizeof(int), 0644, NULL, &proc_dointvec}, {NET_KTCPVS_VS_STOP, "stop", &dummy_vs.stop, sizeof(int), 0644, NULL, &proc_dointvec}, {NET_KTCPVS_VS_SERVERPORT, "serverport", &dummy_vs.serverport, sizeof(int), 0644, NULL, &proc_dointvec}, {NET_KTCPVS_VS_SERVERPORT, "startservers", &dummy_vs.startservers, sizeof(int), 0644, NULL, &proc_dointvec}, {NET_KTCPVS_VS_SCHEDULER, "scheduler", &dummy_vs, KTCPVS_SCHEDNAME_MAXLEN, 0644, NULL, &proc_do_tcpvs_scheduler}, {NET_KTCPVS_VS_DESTINATIONS, "destinations", &dummy_vs, 256, 0644, NULL, &proc_do_tcpvs_destinations}, {0}}, {{NET_KTCPVS_DUMMYVS, "dummy_vs", NULL, 0, 0555, dummy_vs_sysctl.vs_vars}, {0}}, {{NET_KTCPVS, "ktcpvs", NULL, 0, 0555, dummy_vs_sysctl.vs_dir}, {0}}, {{CTL_NET, "net", NULL, 0, 0555, dummy_vs_sysctl.ktcpvs_dir}, {0}} }; int tcp_vs_sysctl_register(struct tcp_vs *vs) { int i; struct tcp_vs_sysctl_table *t; if (vs == NULL) return -1; t = kmalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) return -1; memcpy(t, &dummy_vs_sysctl, sizeof(*t)); for (i=0; ivs_vars)/sizeof(t->vs_vars[0])-1; i++) { t->vs_vars[i].data += (char*)vs - (char*)&dummy_vs; t->vs_vars[i].de = NULL; } t->vs_dir[0].procname = vs->name; t->vs_dir[0].ctl_name = vs->index; t->vs_dir[0].child = t->vs_vars; t->vs_dir[0].de = NULL; t->ktcpvs_dir[0].child = t->vs_dir; t->ktcpvs_dir[0].de = NULL; t->root_dir[0].child = t->ktcpvs_dir; t->root_dir[0].de = NULL; t->sysctl_header = register_sysctl_table(t->root_dir, 0); if (t->sysctl_header == NULL) { kfree(t); return -1; } else { vs->sysctl = t; return 0; } } int tcp_vs_sysctl_unregister(struct tcp_vs *vs) { if (vs->sysctl) { struct tcp_vs_sysctl_table *t = vs->sysctl; vs->sysctl = NULL; unregister_sysctl_table(t->sysctl_header); kfree(t); } return 0; } /* /proc/net/ktcpvs/config */ static int tcp_vs_get_info(char *buf, char **start, off_t offset, int length) { struct list_head *l, *e; struct tcp_vs *vs; struct tcp_vs_dest *dest; off_t pos=0; int len=0; int size; EnterFunction("tcp_vs_get_info"); if (length < 0) return -EINVAL; list_for_each(l, &tcp_vs_list) { vs = list_entry(l, struct tcp_vs, list); size = sprintf(buf+len, "TCPVS %s\n" " scheduler = %s\n" " serverport = %d\n" " startservers = %d\n" " maxClients = %d\n", vs->name, vs->scheduler->name, vs->serverport, vs->startservers, vs->maxClients); len += size; pos += size; if (pos <= offset) len=0; if (pos >= offset+length) break; list_for_each(e, &vs->destinations) { dest = list_entry(e, struct tcp_vs_dest, n_list); size = sprintf(buf+len, " server = %08X:%X %d\n", ntohl(dest->addr), ntohs(dest->port), dest->weight); len += size; pos += size; if (pos <= offset) len=0; if (pos >= offset+length) goto done; } } done: *start = buf+len-(pos-offset); /* Start of wanted data */ len = pos-offset; if (len > length) len = length; if (len < 0) len = 0; EnterFunction("tcp_vs_get_info"); return len; } static int tcp_vs_write_proc(struct file *file, const char *buffer, unsigned long count, void *data) { struct tcp_vs_ctl vs_ctl; char *ctl_data=NULL; size_t len; EnterFunction("tcp_vs_write_proc"); if (count < sizeof(vs_ctl)) return -EINVAL; if (copy_from_user(&vs_ctl, buffer, sizeof(vs_ctl))) return -EFAULT; if (count > sizeof(vs_ctl)) { len = count-sizeof(vs_ctl); if (len > PAGE_SIZE) { if (!(ctl_data=vmalloc(len))) return -ENOMEM; } else { if (!(ctl_data=kmalloc(len, GFP_KERNEL))) return -ENOMEM; } if (copy_from_user(ctl_data, buffer+sizeof(vs_ctl), len)) return -EFAULT; } file->f_pos += count; tcp_vs_control(&vs_ctl, ctl_data); LeaveFunction("tcp_vs_write_proc"); return count; } /* per tcpvs info output */ static ssize_t tcp_vs_pervs_read_proc(struct file * file, char * buf, size_t count, loff_t *ppos) { struct proc_dir_entry *de; struct list_head *l; struct tcp_vs *vs; struct tcp_vs_dest *dest; char *page; int len; EnterFunction("tcp_vs_pervs_read_proc"); if (count == 0) return 0; if (file->f_pos) return 0; de = (struct proc_dir_entry*) file->f_dentry->d_inode->u.generic_ip; if (!de || !de->data) return -ENOTDIR; vs = (struct tcp_vs *) de->data; if (!(page = (char*) __get_free_page(GFP_KERNEL))) return -ENOMEM; len = sprintf(page, "TCPVS %s\n" " scheduler = %s\n" " serverport = %d\n" " startservers = %d\n" " maxClients = %d\n", vs->name, vs->scheduler->name, vs->serverport, vs->startservers, vs->maxClients); list_for_each(l, &vs->destinations) { dest = list_entry(l, struct tcp_vs_dest, n_list); len += sprintf(buf+len, " server = %08X:%X %d\n", ntohl(dest->addr), ntohs(dest->port), dest->weight); if (len > PAGE_SIZE-80) break; } if (len <= count) { if (copy_to_user(buf, page, len)) return -EFAULT; file->f_pos += len; } else { len = 0; } TCP_VS_DBG("count:%d ppos:%d len:%d\n", count, (int)*ppos, len); free_page((unsigned long) page); LeaveFunction("tcp_vs_pervs_read_proc"); return count; } struct file_operations proc_net_ktcpvs_file_operations = { read: tcp_vs_pervs_read_proc, }; static struct proc_dir_entry *proc_net_ktcpvs; struct proc_dir_entry *proc_net_ktcpvs_vs_create(struct tcp_vs *vs) { struct proc_dir_entry *de; de = create_proc_entry(vs->name, S_IFREG|S_IRUGO, proc_net_ktcpvs); if (!de) return NULL; de->data = (void *) vs; de->proc_fops = &proc_net_ktcpvs_file_operations; return de; } void proc_net_ktcpvs_vs_release(struct tcp_vs *vs) { remove_proc_entry(vs->name, proc_net_ktcpvs); } void tcp_vs_control_start(void) { struct proc_dir_entry *ent; INIT_LIST_HEAD(&dummy_vs.destinations); INIT_LIST_HEAD(&dummy_vs.sched_rule); proc_net_ktcpvs = proc_mkdir("ktcpvs", proc_net); ent = create_proc_entry("config", S_IFREG|S_IRUGO|S_IWUSR, proc_net_ktcpvs); if (ent) { ent->get_info = tcp_vs_get_info; ent->write_proc = tcp_vs_write_proc; } ktcpvs_table_header = register_sysctl_table(ktcpvs_root_table, 0); } void tcp_vs_control_stop(void) { unregister_sysctl_table(ktcpvs_table_header); remove_proc_entry("config", proc_net_ktcpvs); remove_proc_entry("ktcpvs", proc_net); }