Commit 3881ac441f642d56503818123446f7298442236b

Authored by Ursula Braun
Committed by David S. Miller
1 parent 4dc83dfd3e

af_iucv: add HiperSockets transport

The current transport mechanism for af_iucv is the z/VM offered
communications facility IUCV. To provide equivalent support when
running Linux in an LPAR, HiperSockets transport is added to the
AF_IUCV address family. It requires explicit binding of an AF_IUCV
socket to a HiperSockets device. A new packet_type ETH_P_AF_IUCV
is announced. An af_iucv specific transport header is defined
preceding the skb data. A small protocol is implemented for
connecting and for flow control/congestion management.

Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 729 additions and 72 deletions Side-by-side Diff

include/net/iucv/af_iucv.h
... ... @@ -14,6 +14,7 @@
14 14 #include <linux/list.h>
15 15 #include <linux/poll.h>
16 16 #include <linux/socket.h>
  17 +#include <net/iucv/iucv.h>
17 18  
18 19 #ifndef AF_IUCV
19 20 #define AF_IUCV 32
... ... @@ -33,6 +34,7 @@
33 34 };
34 35  
35 36 #define IUCV_QUEUELEN_DEFAULT 65535
  37 +#define IUCV_HIPER_MSGLIM_DEFAULT 128
36 38 #define IUCV_CONN_TIMEOUT (HZ * 40)
37 39 #define IUCV_DISCONN_TIMEOUT (HZ * 2)
38 40 #define IUCV_CONN_IDLE_TIMEOUT (HZ * 60)
39 41  
... ... @@ -57,8 +59,51 @@
57 59 spinlock_t lock;
58 60 };
59 61  
  62 +#define AF_IUCV_FLAG_ACK 0x1
  63 +#define AF_IUCV_FLAG_SYN 0x2
  64 +#define AF_IUCV_FLAG_FIN 0x4
  65 +#define AF_IUCV_FLAG_WIN 0x8
  66 +
  67 +struct af_iucv_trans_hdr {
  68 + u16 magic;
  69 + u8 version;
  70 + u8 flags;
  71 + u16 window;
  72 + char destNodeID[8];
  73 + char destUserID[8];
  74 + char destAppName[16];
  75 + char srcNodeID[8];
  76 + char srcUserID[8];
  77 + char srcAppName[16]; /* => 70 bytes */
  78 + struct iucv_message iucv_hdr; /* => 33 bytes */
  79 + u8 pad; /* total 104 bytes */
  80 +} __packed;
  81 +
  82 +enum iucv_tx_notify {
  83 + /* transmission of skb is completed and was successful */
  84 + TX_NOTIFY_OK = 0,
  85 + /* target is unreachable */
  86 + TX_NOTIFY_UNREACHABLE = 1,
  87 + /* transfer pending queue full */
  88 + TX_NOTIFY_TPQFULL = 2,
  89 + /* general error */
  90 + TX_NOTIFY_GENERALERROR = 3,
  91 + /* transmission of skb is pending - may interleave
  92 + * with TX_NOTIFY_DELAYED_* */
  93 + TX_NOTIFY_PENDING = 4,
  94 + /* transmission of skb was done successfully (delayed) */
  95 + TX_NOTIFY_DELAYED_OK = 5,
  96 + /* target unreachable (detected delayed) */
  97 + TX_NOTIFY_DELAYED_UNREACHABLE = 6,
  98 + /* general error (detected delayed) */
  99 + TX_NOTIFY_DELAYED_GENERALERROR = 7,
  100 +};
  101 +
60 102 #define iucv_sk(__sk) ((struct iucv_sock *) __sk)
61 103  
  104 +#define AF_IUCV_TRANS_IUCV 0
  105 +#define AF_IUCV_TRANS_HIPER 1
  106 +
62 107 struct iucv_sock {
63 108 struct sock sk;
64 109 char src_user_id[8];
... ... @@ -75,6 +120,13 @@
75 120 unsigned int send_tag;
76 121 u8 flags;
77 122 u16 msglimit;
  123 + u16 msglimit_peer;
  124 + atomic_t msg_sent;
  125 + atomic_t msg_recv;
  126 + atomic_t pendings;
  127 + int transport;
  128 + void (*sk_txnotify)(struct sk_buff *skb,
  129 + enum iucv_tx_notify n);
78 130 };
79 131  
80 132 /* iucv socket options (SOL_IUCV) */
... ... @@ -27,10 +27,9 @@
27 27 #include <asm/cpcmd.h>
28 28 #include <linux/kmod.h>
29 29  
30   -#include <net/iucv/iucv.h>
31 30 #include <net/iucv/af_iucv.h>
32 31  
33   -#define VERSION "1.1"
  32 +#define VERSION "1.2"
34 33  
35 34 static char iucv_userid[80];
36 35  
... ... @@ -92,6 +91,12 @@
92 91 static void iucv_sock_kill(struct sock *sk);
93 92 static void iucv_sock_close(struct sock *sk);
94 93  
  94 +static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
  95 + struct packet_type *pt, struct net_device *orig_dev);
  96 +static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
  97 + struct sk_buff *skb, u8 flags);
  98 +static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
  99 +
95 100 /* Call Back functions */
96 101 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
97 102 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
... ... @@ -296,7 +301,11 @@
296 301  
297 302 if (sk->sk_state != IUCV_CONNECTED)
298 303 return 1;
299   - return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
  304 + if (iucv->transport == AF_IUCV_TRANS_IUCV)
  305 + return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
  306 + else
  307 + return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
  308 + (atomic_read(&iucv->pendings) <= 0));
300 309 }
301 310  
302 311 /**
... ... @@ -314,6 +323,79 @@
314 323 rcu_read_unlock();
315 324 }
316 325  
  326 +/**
  327 + * afiucv_hs_send() - send a message through HiperSockets transport
  328 + */
  329 +static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
  330 + struct sk_buff *skb, u8 flags)
  331 +{
  332 + struct net *net = sock_net(sock);
  333 + struct iucv_sock *iucv = iucv_sk(sock);
  334 + struct af_iucv_trans_hdr *phs_hdr;
  335 + struct sk_buff *nskb;
  336 + int err, confirm_recv = 0;
  337 +
  338 + memset(skb->head, 0, ETH_HLEN);
  339 + phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
  340 + sizeof(struct af_iucv_trans_hdr));
  341 + skb_reset_mac_header(skb);
  342 + skb_reset_network_header(skb);
  343 + skb_push(skb, ETH_HLEN);
  344 + skb_reset_mac_header(skb);
  345 + memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
  346 +
  347 + phs_hdr->magic = ETH_P_AF_IUCV;
  348 + phs_hdr->version = 1;
  349 + phs_hdr->flags = flags;
  350 + if (flags == AF_IUCV_FLAG_SYN)
  351 + phs_hdr->window = iucv->msglimit;
  352 + else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
  353 + confirm_recv = atomic_read(&iucv->msg_recv);
  354 + phs_hdr->window = confirm_recv;
  355 + if (confirm_recv)
  356 + phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
  357 + }
  358 + memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
  359 + memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
  360 + memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
  361 + memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
  362 + ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
  363 + ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
  364 + ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
  365 + ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
  366 + if (imsg)
  367 + memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
  368 +
  369 + rcu_read_lock();
  370 + skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
  371 + rcu_read_unlock();
  372 + if (!skb->dev)
  373 + return -ENODEV;
  374 + if (!(skb->dev->flags & IFF_UP))
  375 + return -ENETDOWN;
  376 + if (skb->len > skb->dev->mtu) {
  377 + if (sock->sk_type == SOCK_SEQPACKET)
  378 + return -EMSGSIZE;
  379 + else
  380 + skb_trim(skb, skb->dev->mtu);
  381 + }
  382 + skb->protocol = ETH_P_AF_IUCV;
  383 + skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
  384 + nskb = skb_clone(skb, GFP_ATOMIC);
  385 + if (!nskb)
  386 + return -ENOMEM;
  387 + skb_queue_tail(&iucv->send_skb_q, nskb);
  388 + err = dev_queue_xmit(skb);
  389 + if (err) {
  390 + skb_unlink(nskb, &iucv->send_skb_q);
  391 + kfree_skb(nskb);
  392 + } else {
  393 + atomic_sub(confirm_recv, &iucv->msg_recv);
  394 + WARN_ON(atomic_read(&iucv->msg_recv) < 0);
  395 + }
  396 + return err;
  397 +}
  398 +
317 399 /* Timers */
318 400 static void iucv_sock_timeout(unsigned long arg)
319 401 {
... ... @@ -382,6 +464,8 @@
382 464 unsigned char user_data[16];
383 465 struct iucv_sock *iucv = iucv_sk(sk);
384 466 unsigned long timeo;
  467 + int err, blen;
  468 + struct sk_buff *skb;
385 469  
386 470 iucv_sock_clear_timer(sk);
387 471 lock_sock(sk);
... ... @@ -392,6 +476,20 @@
392 476 break;
393 477  
394 478 case IUCV_CONNECTED:
  479 + if (iucv->transport == AF_IUCV_TRANS_HIPER) {
  480 + /* send fin */
  481 + blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
  482 + skb = sock_alloc_send_skb(sk, blen, 1, &err);
  483 + if (skb) {
  484 + skb_reserve(skb,
  485 + sizeof(struct af_iucv_trans_hdr) +
  486 + ETH_HLEN);
  487 + err = afiucv_hs_send(NULL, sk, skb,
  488 + AF_IUCV_FLAG_FIN);
  489 + }
  490 + sk->sk_state = IUCV_DISCONN;
  491 + sk->sk_state_change(sk);
  492 + }
395 493 case IUCV_DISCONN:
396 494 sk->sk_state = IUCV_CLOSING;
397 495 sk->sk_state_change(sk);
398 496  
399 497  
400 498  
... ... @@ -461,10 +559,18 @@
461 559 spin_lock_init(&iucv->message_q.lock);
462 560 skb_queue_head_init(&iucv->backlog_skb_q);
463 561 iucv->send_tag = 0;
  562 + atomic_set(&iucv->pendings, 0);
464 563 iucv->flags = 0;
465   - iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
  564 + iucv->msglimit = 0;
  565 + atomic_set(&iucv->msg_sent, 0);
  566 + atomic_set(&iucv->msg_recv, 0);
466 567 iucv->path = NULL;
  568 + iucv->sk_txnotify = afiucv_hs_callback_txnotify;
467 569 memset(&iucv->src_user_id , 0, 32);
  570 + if (pr_iucv)
  571 + iucv->transport = AF_IUCV_TRANS_IUCV;
  572 + else
  573 + iucv->transport = AF_IUCV_TRANS_HIPER;
468 574  
469 575 sk->sk_destruct = iucv_sock_destruct;
470 576 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
... ... @@ -595,7 +701,9 @@
595 701 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
596 702 struct sock *sk = sock->sk;
597 703 struct iucv_sock *iucv;
598   - int err;
  704 + int err = 0;
  705 + struct net_device *dev;
  706 + char uid[9];
599 707  
600 708 /* Verify the input sockaddr */
601 709 if (!addr || addr->sa_family != AF_IUCV)
602 710  
603 711  
604 712  
605 713  
... ... @@ -614,19 +722,46 @@
614 722 err = -EADDRINUSE;
615 723 goto done_unlock;
616 724 }
617   - if (iucv->path) {
618   - err = 0;
  725 + if (iucv->path)
619 726 goto done_unlock;
620   - }
621 727  
622 728 /* Bind the socket */
623   - memcpy(iucv->src_name, sa->siucv_name, 8);
624 729  
625   - /* Copy the user id */
626   - memcpy(iucv->src_user_id, iucv_userid, 8);
627   - sk->sk_state = IUCV_BOUND;
628   - err = 0;
  730 + if (pr_iucv)
  731 + if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
  732 + goto vm_bind; /* VM IUCV transport */
629 733  
  734 + /* try hiper transport */
  735 + memcpy(uid, sa->siucv_user_id, sizeof(uid));
  736 + ASCEBC(uid, 8);
  737 + rcu_read_lock();
  738 + for_each_netdev_rcu(&init_net, dev) {
  739 + if (!memcmp(dev->perm_addr, uid, 8)) {
  740 + memcpy(iucv->src_name, sa->siucv_name, 8);
  741 + memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
  742 + sock->sk->sk_bound_dev_if = dev->ifindex;
  743 + sk->sk_state = IUCV_BOUND;
  744 + iucv->transport = AF_IUCV_TRANS_HIPER;
  745 + if (!iucv->msglimit)
  746 + iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
  747 + rcu_read_unlock();
  748 + goto done_unlock;
  749 + }
  750 + }
  751 + rcu_read_unlock();
  752 +vm_bind:
  753 + if (pr_iucv) {
  754 + /* use local userid for backward compat */
  755 + memcpy(iucv->src_name, sa->siucv_name, 8);
  756 + memcpy(iucv->src_user_id, iucv_userid, 8);
  757 + sk->sk_state = IUCV_BOUND;
  758 + iucv->transport = AF_IUCV_TRANS_IUCV;
  759 + if (!iucv->msglimit)
  760 + iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
  761 + goto done_unlock;
  762 + }
  763 + /* found no dev to bind */
  764 + err = -ENODEV;
630 765 done_unlock:
631 766 /* Release the socket list lock */
632 767 write_unlock_bh(&iucv_sk_list.lock);
633 768  
634 769  
635 770  
... ... @@ -662,40 +797,40 @@
662 797  
663 798 memcpy(&iucv->src_name, name, 8);
664 799  
  800 + if (!iucv->msglimit)
  801 + iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
  802 +
665 803 return err;
666 804 }
667 805  
668   -/* Connect an unconnected socket */
669   -static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
670   - int alen, int flags)
  806 +static int afiucv_hs_connect(struct socket *sock)
671 807 {
  808 + struct sock *sk = sock->sk;
  809 + struct sk_buff *skb;
  810 + int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
  811 + int err = 0;
  812 +
  813 + /* send syn */
  814 + skb = sock_alloc_send_skb(sk, blen, 1, &err);
  815 + if (!skb) {
  816 + err = -ENOMEM;
  817 + goto done;
  818 + }
  819 + skb->dev = NULL;
  820 + skb_reserve(skb, blen);
  821 + err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
  822 +done:
  823 + return err;
  824 +}
  825 +
  826 +static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
  827 +{
672 828 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
673 829 struct sock *sk = sock->sk;
674 830 struct iucv_sock *iucv = iucv_sk(sk);
675 831 unsigned char user_data[16];
676 832 int err;
677 833  
678   - if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
679   - return -EINVAL;
680   -
681   - if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
682   - return -EBADFD;
683   -
684   - if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
685   - return -EINVAL;
686   -
687   - if (sk->sk_state == IUCV_OPEN) {
688   - err = iucv_sock_autobind(sk);
689   - if (unlikely(err))
690   - return err;
691   - }
692   -
693   - lock_sock(sk);
694   -
695   - /* Set the destination information */
696   - memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
697   - memcpy(iucv->dst_name, sa->siucv_name, 8);
698   -
699 834 high_nmcpy(user_data, sa->siucv_name);
700 835 low_nmcpy(user_data, iucv->src_name);
701 836 ASCEBC(user_data, sizeof(user_data));
702 837  
703 838  
704 839  
705 840  
706 841  
707 842  
... ... @@ -728,20 +863,61 @@
728 863 err = -ECONNREFUSED;
729 864 break;
730 865 }
731   - goto done;
732 866 }
  867 +done:
  868 + return err;
  869 +}
733 870  
734   - if (sk->sk_state != IUCV_CONNECTED) {
  871 +/* Connect an unconnected socket */
  872 +static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
  873 + int alen, int flags)
  874 +{
  875 + struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
  876 + struct sock *sk = sock->sk;
  877 + struct iucv_sock *iucv = iucv_sk(sk);
  878 + int err;
  879 +
  880 + if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
  881 + return -EINVAL;
  882 +
  883 + if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
  884 + return -EBADFD;
  885 +
  886 + if (sk->sk_state == IUCV_OPEN &&
  887 + iucv->transport == AF_IUCV_TRANS_HIPER)
  888 + return -EBADFD; /* explicit bind required */
  889 +
  890 + if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
  891 + return -EINVAL;
  892 +
  893 + if (sk->sk_state == IUCV_OPEN) {
  894 + err = iucv_sock_autobind(sk);
  895 + if (unlikely(err))
  896 + return err;
  897 + }
  898 +
  899 + lock_sock(sk);
  900 +
  901 + /* Set the destination information */
  902 + memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
  903 + memcpy(iucv->dst_name, sa->siucv_name, 8);
  904 +
  905 + if (iucv->transport == AF_IUCV_TRANS_HIPER)
  906 + err = afiucv_hs_connect(sock);
  907 + else
  908 + err = afiucv_path_connect(sock, addr);
  909 + if (err)
  910 + goto done;
  911 +
  912 + if (sk->sk_state != IUCV_CONNECTED)
735 913 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
736 914 IUCV_DISCONN),
737 915 sock_sndtimeo(sk, flags & O_NONBLOCK));
738   - }
739 916  
740   - if (sk->sk_state == IUCV_DISCONN) {
  917 + if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
741 918 err = -ECONNREFUSED;
742   - }
743 919  
744   - if (err) {
  920 + if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
745 921 pr_iucv->path_sever(iucv->path, NULL);
746 922 iucv_path_free(iucv->path);
747 923 iucv->path = NULL;
748 924  
... ... @@ -965,9 +1141,16 @@
965 1141 * this is fine for SOCK_SEQPACKET (unless we want to support
966 1142 * segmented records using the MSG_EOR flag), but
967 1143 * for SOCK_STREAM we might want to improve it in future */
968   - skb = sock_alloc_send_skb(sk, len, noblock, &err);
  1144 + if (iucv->transport == AF_IUCV_TRANS_HIPER)
  1145 + skb = sock_alloc_send_skb(sk,
  1146 + len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
  1147 + noblock, &err);
  1148 + else
  1149 + skb = sock_alloc_send_skb(sk, len, noblock, &err);
969 1150 if (!skb)
970 1151 goto out;
  1152 + if (iucv->transport == AF_IUCV_TRANS_HIPER)
  1153 + skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
971 1154 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
972 1155 err = -EFAULT;
973 1156 goto fail;
... ... @@ -988,6 +1171,15 @@
988 1171 /* increment and save iucv message tag for msg_completion cbk */
989 1172 txmsg.tag = iucv->send_tag++;
990 1173 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
  1174 + if (iucv->transport == AF_IUCV_TRANS_HIPER) {
  1175 + atomic_inc(&iucv->msg_sent);
  1176 + err = afiucv_hs_send(&txmsg, sk, skb, 0);
  1177 + if (err) {
  1178 + atomic_dec(&iucv->msg_sent);
  1179 + goto fail;
  1180 + }
  1181 + goto release;
  1182 + }
991 1183 skb_queue_tail(&iucv->send_skb_q, skb);
992 1184  
993 1185 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
... ... @@ -1028,6 +1220,7 @@
1028 1220 goto fail;
1029 1221 }
1030 1222  
  1223 +release:
1031 1224 release_sock(sk);
1032 1225 return len;
1033 1226  
... ... @@ -1160,7 +1353,8 @@
1160 1353 struct sock *sk = sock->sk;
1161 1354 struct iucv_sock *iucv = iucv_sk(sk);
1162 1355 unsigned int copied, rlen;
1163   - struct sk_buff *skb, *rskb, *cskb;
  1356 + struct sk_buff *skb, *rskb, *cskb, *sskb;
  1357 + int blen;
1164 1358 int err = 0;
1165 1359  
1166 1360 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
... ... @@ -1185,7 +1379,7 @@
1185 1379 copied = min_t(unsigned int, rlen, len);
1186 1380  
1187 1381 cskb = skb;
1188   - if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
  1382 + if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1189 1383 if (!(flags & MSG_PEEK))
1190 1384 skb_queue_head(&sk->sk_receive_queue, skb);
1191 1385 return -EFAULT;
... ... @@ -1223,6 +1417,7 @@
1223 1417 }
1224 1418  
1225 1419 kfree_skb(skb);
  1420 + atomic_inc(&iucv->msg_recv);
1226 1421  
1227 1422 /* Queue backlog skbs */
1228 1423 spin_lock_bh(&iucv->message_q.lock);
... ... @@ -1239,6 +1434,24 @@
1239 1434 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1240 1435 if (!list_empty(&iucv->message_q.list))
1241 1436 iucv_process_message_q(sk);
  1437 + if (atomic_read(&iucv->msg_recv) >=
  1438 + iucv->msglimit / 2) {
  1439 + /* send WIN to peer */
  1440 + blen = sizeof(struct af_iucv_trans_hdr) +
  1441 + ETH_HLEN;
  1442 + sskb = sock_alloc_send_skb(sk, blen, 1, &err);
  1443 + if (sskb) {
  1444 + skb_reserve(sskb,
  1445 + sizeof(struct af_iucv_trans_hdr)
  1446 + + ETH_HLEN);
  1447 + err = afiucv_hs_send(NULL, sk, sskb,
  1448 + AF_IUCV_FLAG_WIN);
  1449 + }
  1450 + if (err) {
  1451 + sk->sk_state = IUCV_DISCONN;
  1452 + sk->sk_state_change(sk);
  1453 + }
  1454 + }
1242 1455 }
1243 1456 spin_unlock_bh(&iucv->message_q.lock);
1244 1457 }
... ... @@ -1698,6 +1911,389 @@
1698 1911 bh_unlock_sock(sk);
1699 1912 }
1700 1913  
  1914 +/***************** HiperSockets transport callbacks ********************/
  1915 +static void afiucv_swap_src_dest(struct sk_buff *skb)
  1916 +{
  1917 + struct af_iucv_trans_hdr *trans_hdr =
  1918 + (struct af_iucv_trans_hdr *)skb->data;
  1919 + char tmpID[8];
  1920 + char tmpName[8];
  1921 +
  1922 + ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
  1923 + ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
  1924 + ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
  1925 + ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
  1926 + memcpy(tmpID, trans_hdr->srcUserID, 8);
  1927 + memcpy(tmpName, trans_hdr->srcAppName, 8);
  1928 + memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
  1929 + memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
  1930 + memcpy(trans_hdr->destUserID, tmpID, 8);
  1931 + memcpy(trans_hdr->destAppName, tmpName, 8);
  1932 + skb_push(skb, ETH_HLEN);
  1933 + memset(skb->data, 0, ETH_HLEN);
  1934 +}
  1935 +
  1936 +/**
  1937 + * afiucv_hs_callback_syn - react on received SYN
  1938 + **/
  1939 +static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
  1940 +{
  1941 + struct sock *nsk;
  1942 + struct iucv_sock *iucv, *niucv;
  1943 + struct af_iucv_trans_hdr *trans_hdr;
  1944 + int err;
  1945 +
  1946 + iucv = iucv_sk(sk);
  1947 + trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
  1948 + if (!iucv) {
  1949 + /* no sock - connection refused */
  1950 + afiucv_swap_src_dest(skb);
  1951 + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
  1952 + err = dev_queue_xmit(skb);
  1953 + goto out;
  1954 + }
  1955 +
  1956 + nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
  1957 + bh_lock_sock(sk);
  1958 + if ((sk->sk_state != IUCV_LISTEN) ||
  1959 + sk_acceptq_is_full(sk) ||
  1960 + !nsk) {
  1961 + /* error on server socket - connection refused */
  1962 + if (nsk)
  1963 + sk_free(nsk);
  1964 + afiucv_swap_src_dest(skb);
  1965 + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
  1966 + err = dev_queue_xmit(skb);
  1967 + bh_unlock_sock(sk);
  1968 + goto out;
  1969 + }
  1970 +
  1971 + niucv = iucv_sk(nsk);
  1972 + iucv_sock_init(nsk, sk);
  1973 + niucv->transport = AF_IUCV_TRANS_HIPER;
  1974 + niucv->msglimit = iucv->msglimit;
  1975 + if (!trans_hdr->window)
  1976 + niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
  1977 + else
  1978 + niucv->msglimit_peer = trans_hdr->window;
  1979 + memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
  1980 + memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
  1981 + memcpy(niucv->src_name, iucv->src_name, 8);
  1982 + memcpy(niucv->src_user_id, iucv->src_user_id, 8);
  1983 + nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
  1984 + afiucv_swap_src_dest(skb);
  1985 + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
  1986 + trans_hdr->window = niucv->msglimit;
  1987 + /* if receiver acks the xmit connection is established */
  1988 + err = dev_queue_xmit(skb);
  1989 + if (!err) {
  1990 + iucv_accept_enqueue(sk, nsk);
  1991 + nsk->sk_state = IUCV_CONNECTED;
  1992 + sk->sk_data_ready(sk, 1);
  1993 + } else
  1994 + iucv_sock_kill(nsk);
  1995 + bh_unlock_sock(sk);
  1996 +
  1997 +out:
  1998 + return NET_RX_SUCCESS;
  1999 +}
  2000 +
  2001 +/**
  2002 + * afiucv_hs_callback_synack() - react on received SYN-ACK
  2003 + **/
  2004 +static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
  2005 +{
  2006 + struct iucv_sock *iucv = iucv_sk(sk);
  2007 + struct af_iucv_trans_hdr *trans_hdr =
  2008 + (struct af_iucv_trans_hdr *)skb->data;
  2009 +
  2010 + if (!iucv)
  2011 + goto out;
  2012 + if (sk->sk_state != IUCV_BOUND)
  2013 + goto out;
  2014 + bh_lock_sock(sk);
  2015 + iucv->msglimit_peer = trans_hdr->window;
  2016 + sk->sk_state = IUCV_CONNECTED;
  2017 + sk->sk_state_change(sk);
  2018 + bh_unlock_sock(sk);
  2019 +out:
  2020 + kfree_skb(skb);
  2021 + return NET_RX_SUCCESS;
  2022 +}
  2023 +
  2024 +/**
  2025 + * afiucv_hs_callback_synfin() - react on received SYN_FIN
  2026 + **/
  2027 +static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
  2028 +{
  2029 + struct iucv_sock *iucv = iucv_sk(sk);
  2030 +
  2031 + if (!iucv)
  2032 + goto out;
  2033 + if (sk->sk_state != IUCV_BOUND)
  2034 + goto out;
  2035 + bh_lock_sock(sk);
  2036 + sk->sk_state = IUCV_DISCONN;
  2037 + sk->sk_state_change(sk);
  2038 + bh_unlock_sock(sk);
  2039 +out:
  2040 + kfree_skb(skb);
  2041 + return NET_RX_SUCCESS;
  2042 +}
  2043 +
  2044 +/**
  2045 + * afiucv_hs_callback_fin() - react on received FIN
  2046 + **/
  2047 +static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
  2048 +{
  2049 + struct iucv_sock *iucv = iucv_sk(sk);
  2050 +
  2051 + /* other end of connection closed */
  2052 + if (iucv) {
  2053 + bh_lock_sock(sk);
  2054 + if (!list_empty(&iucv->accept_q))
  2055 + sk->sk_state = IUCV_SEVERED;
  2056 + else
  2057 + sk->sk_state = IUCV_DISCONN;
  2058 + sk->sk_state_change(sk);
  2059 + bh_unlock_sock(sk);
  2060 + }
  2061 + kfree_skb(skb);
  2062 + return NET_RX_SUCCESS;
  2063 +}
  2064 +
  2065 +/**
  2066 + * afiucv_hs_callback_win() - react on received WIN
  2067 + **/
  2068 +static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
  2069 +{
  2070 + struct iucv_sock *iucv = iucv_sk(sk);
  2071 + struct af_iucv_trans_hdr *trans_hdr =
  2072 + (struct af_iucv_trans_hdr *)skb->data;
  2073 +
  2074 + if (!iucv)
  2075 + return NET_RX_SUCCESS;
  2076 +
  2077 + if (sk->sk_state != IUCV_CONNECTED)
  2078 + return NET_RX_SUCCESS;
  2079 +
  2080 + atomic_sub(trans_hdr->window, &iucv->msg_sent);
  2081 + iucv_sock_wake_msglim(sk);
  2082 + return NET_RX_SUCCESS;
  2083 +}
  2084 +
  2085 +/**
  2086 + * afiucv_hs_callback_rx() - react on received data
  2087 + **/
  2088 +static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
  2089 +{
  2090 + struct iucv_sock *iucv = iucv_sk(sk);
  2091 +
  2092 + if (!iucv) {
  2093 + kfree_skb(skb);
  2094 + return NET_RX_SUCCESS;
  2095 + }
  2096 +
  2097 + if (sk->sk_state != IUCV_CONNECTED) {
  2098 + kfree_skb(skb);
  2099 + return NET_RX_SUCCESS;
  2100 + }
  2101 +
  2102 + /* write stuff from iucv_msg to skb cb */
  2103 + if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
  2104 + kfree_skb(skb);
  2105 + return NET_RX_SUCCESS;
  2106 + }
  2107 + skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
  2108 + skb_reset_transport_header(skb);
  2109 + skb_reset_network_header(skb);
  2110 + spin_lock(&iucv->message_q.lock);
  2111 + if (skb_queue_empty(&iucv->backlog_skb_q)) {
  2112 + if (sock_queue_rcv_skb(sk, skb)) {
  2113 + /* handle rcv queue full */
  2114 + skb_queue_tail(&iucv->backlog_skb_q, skb);
  2115 + }
  2116 + } else
  2117 + skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
  2118 + spin_unlock(&iucv->message_q.lock);
  2119 + return NET_RX_SUCCESS;
  2120 +}
  2121 +
  2122 +/**
  2123 + * afiucv_hs_rcv() - base function for arriving data through HiperSockets
  2124 + * transport
  2125 + * called from netif RX softirq
  2126 + **/
  2127 +static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
  2128 + struct packet_type *pt, struct net_device *orig_dev)
  2129 +{
  2130 + struct hlist_node *node;
  2131 + struct sock *sk;
  2132 + struct iucv_sock *iucv;
  2133 + struct af_iucv_trans_hdr *trans_hdr;
  2134 + char nullstring[8];
  2135 + int err = 0;
  2136 +
  2137 + skb_pull(skb, ETH_HLEN);
  2138 + trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
  2139 + EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
  2140 + EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
  2141 + EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
  2142 + EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
  2143 + memset(nullstring, 0, sizeof(nullstring));
  2144 + iucv = NULL;
  2145 + sk = NULL;
  2146 + read_lock(&iucv_sk_list.lock);
  2147 + sk_for_each(sk, node, &iucv_sk_list.head) {
  2148 + if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
  2149 + if ((!memcmp(&iucv_sk(sk)->src_name,
  2150 + trans_hdr->destAppName, 8)) &&
  2151 + (!memcmp(&iucv_sk(sk)->src_user_id,
  2152 + trans_hdr->destUserID, 8)) &&
  2153 + (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
  2154 + (!memcmp(&iucv_sk(sk)->dst_user_id,
  2155 + nullstring, 8))) {
  2156 + iucv = iucv_sk(sk);
  2157 + break;
  2158 + }
  2159 + } else {
  2160 + if ((!memcmp(&iucv_sk(sk)->src_name,
  2161 + trans_hdr->destAppName, 8)) &&
  2162 + (!memcmp(&iucv_sk(sk)->src_user_id,
  2163 + trans_hdr->destUserID, 8)) &&
  2164 + (!memcmp(&iucv_sk(sk)->dst_name,
  2165 + trans_hdr->srcAppName, 8)) &&
  2166 + (!memcmp(&iucv_sk(sk)->dst_user_id,
  2167 + trans_hdr->srcUserID, 8))) {
  2168 + iucv = iucv_sk(sk);
  2169 + break;
  2170 + }
  2171 + }
  2172 + }
  2173 + read_unlock(&iucv_sk_list.lock);
  2174 + if (!iucv)
  2175 + sk = NULL;
  2176 +
  2177 + /* no sock
  2178 + how should we send with no sock
  2179 + 1) send without sock no send rc checking?
  2180 + 2) introduce default sock to handle this cases
  2181 +
  2182 + SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
  2183 + data -> send FIN
  2184 + SYN|ACK, SYN|FIN, FIN -> no action? */
  2185 +
  2186 + switch (trans_hdr->flags) {
  2187 + case AF_IUCV_FLAG_SYN:
  2188 + /* connect request */
  2189 + err = afiucv_hs_callback_syn(sk, skb);
  2190 + break;
  2191 + case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
  2192 + /* connect request confirmed */
  2193 + err = afiucv_hs_callback_synack(sk, skb);
  2194 + break;
  2195 + case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
  2196 + /* connect request refused */
  2197 + err = afiucv_hs_callback_synfin(sk, skb);
  2198 + break;
  2199 + case (AF_IUCV_FLAG_FIN):
  2200 + /* close request */
  2201 + err = afiucv_hs_callback_fin(sk, skb);
  2202 + break;
  2203 + case (AF_IUCV_FLAG_WIN):
  2204 + err = afiucv_hs_callback_win(sk, skb);
  2205 + if (skb->len > sizeof(struct af_iucv_trans_hdr))
  2206 + err = afiucv_hs_callback_rx(sk, skb);
  2207 + else
  2208 + kfree(skb);
  2209 + break;
  2210 + case 0:
  2211 + /* plain data frame */
  2212 + err = afiucv_hs_callback_rx(sk, skb);
  2213 + break;
  2214 + default:
  2215 + ;
  2216 + }
  2217 +
  2218 + return err;
  2219 +}
  2220 +
  2221 +/**
  2222 + * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
  2223 + * transport
  2224 + **/
  2225 +static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
  2226 + enum iucv_tx_notify n)
  2227 +{
  2228 + struct sock *isk = skb->sk;
  2229 + struct sock *sk = NULL;
  2230 + struct iucv_sock *iucv = NULL;
  2231 + struct sk_buff_head *list;
  2232 + struct sk_buff *list_skb;
  2233 + struct sk_buff *this = NULL;
  2234 + unsigned long flags;
  2235 + struct hlist_node *node;
  2236 +
  2237 + read_lock(&iucv_sk_list.lock);
  2238 + sk_for_each(sk, node, &iucv_sk_list.head)
  2239 + if (sk == isk) {
  2240 + iucv = iucv_sk(sk);
  2241 + break;
  2242 + }
  2243 + read_unlock(&iucv_sk_list.lock);
  2244 +
  2245 + if (!iucv)
  2246 + return;
  2247 +
  2248 + bh_lock_sock(sk);
  2249 + list = &iucv->send_skb_q;
  2250 + list_skb = list->next;
  2251 + if (skb_queue_empty(list))
  2252 + goto out_unlock;
  2253 +
  2254 + spin_lock_irqsave(&list->lock, flags);
  2255 + while (list_skb != (struct sk_buff *)list) {
  2256 + if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
  2257 + this = list_skb;
  2258 + switch (n) {
  2259 + case TX_NOTIFY_OK:
  2260 + __skb_unlink(this, list);
  2261 + iucv_sock_wake_msglim(sk);
  2262 + kfree_skb(this);
  2263 + break;
  2264 + case TX_NOTIFY_PENDING:
  2265 + atomic_inc(&iucv->pendings);
  2266 + break;
  2267 + case TX_NOTIFY_DELAYED_OK:
  2268 + __skb_unlink(this, list);
  2269 + atomic_dec(&iucv->pendings);
  2270 + if (atomic_read(&iucv->pendings) <= 0)
  2271 + iucv_sock_wake_msglim(sk);
  2272 + kfree_skb(this);
  2273 + break;
  2274 + case TX_NOTIFY_UNREACHABLE:
  2275 + case TX_NOTIFY_DELAYED_UNREACHABLE:
  2276 + case TX_NOTIFY_TPQFULL: /* not yet used */
  2277 + case TX_NOTIFY_GENERALERROR:
  2278 + case TX_NOTIFY_DELAYED_GENERALERROR:
  2279 + __skb_unlink(this, list);
  2280 + kfree_skb(this);
  2281 + if (!list_empty(&iucv->accept_q))
  2282 + sk->sk_state = IUCV_SEVERED;
  2283 + else
  2284 + sk->sk_state = IUCV_DISCONN;
  2285 + sk->sk_state_change(sk);
  2286 + break;
  2287 + }
  2288 + break;
  2289 + }
  2290 + list_skb = list_skb->next;
  2291 + }
  2292 + spin_unlock_irqrestore(&list->lock, flags);
  2293 +
  2294 +out_unlock:
  2295 + bh_unlock_sock(sk);
  2296 +}
1701 2297 static const struct proto_ops iucv_sock_ops = {
1702 2298 .family = PF_IUCV,
1703 2299 .owner = THIS_MODULE,
... ... @@ -1724,7 +2320,12 @@
1724 2320 .create = iucv_sock_create,
1725 2321 };
1726 2322  
1727   -static int __init afiucv_iucv_init(void)
  2323 +static struct packet_type iucv_packet_type = {
  2324 + .type = cpu_to_be16(ETH_P_AF_IUCV),
  2325 + .func = afiucv_hs_rcv,
  2326 +};
  2327 +
  2328 +static int afiucv_iucv_init(void)
1728 2329 {
1729 2330 int err;
1730 2331  
1731 2332  
... ... @@ -1763,24 +2364,22 @@
1763 2364 {
1764 2365 int err;
1765 2366  
1766   - if (!MACHINE_IS_VM) {
1767   - pr_err("The af_iucv module cannot be loaded"
1768   - " without z/VM\n");
1769   - err = -EPROTONOSUPPORT;
1770   - goto out;
1771   - }
1772   - cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1773   - if (unlikely(err)) {
1774   - WARN_ON(err);
1775   - err = -EPROTONOSUPPORT;
1776   - goto out;
1777   - }
  2367 + if (MACHINE_IS_VM) {
  2368 + cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
  2369 + if (unlikely(err)) {
  2370 + WARN_ON(err);
  2371 + err = -EPROTONOSUPPORT;
  2372 + goto out;
  2373 + }
1778 2374  
1779   - pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
1780   - if (!pr_iucv) {
1781   - printk(KERN_WARNING "iucv_if lookup failed\n");
1782   - err = -EPROTONOSUPPORT;
1783   - goto out;
  2375 + pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
  2376 + if (!pr_iucv) {
  2377 + printk(KERN_WARNING "iucv_if lookup failed\n");
  2378 + memset(&iucv_userid, 0, sizeof(iucv_userid));
  2379 + }
  2380 + } else {
  2381 + memset(&iucv_userid, 0, sizeof(iucv_userid));
  2382 + pr_iucv = NULL;
1784 2383 }
1785 2384  
1786 2385 err = proto_register(&iucv_proto, 0);
... ... @@ -1790,10 +2389,12 @@
1790 2389 if (err)
1791 2390 goto out_proto;
1792 2391  
1793   - err = afiucv_iucv_init();
1794   - if (err)
1795   - goto out_sock;
1796   -
  2392 + if (pr_iucv) {
  2393 + err = afiucv_iucv_init();
  2394 + if (err)
  2395 + goto out_sock;
  2396 + }
  2397 + dev_add_pack(&iucv_packet_type);
1797 2398 return 0;
1798 2399  
1799 2400 out_sock:
... ... @@ -1808,10 +2409,13 @@
1808 2409  
1809 2410 static void __exit afiucv_exit(void)
1810 2411 {
1811   - device_unregister(af_iucv_dev);
1812   - driver_unregister(&af_iucv_driver);
1813   - pr_iucv->iucv_unregister(&af_iucv_handler, 0);
1814   - symbol_put(iucv_if);
  2412 + if (pr_iucv) {
  2413 + device_unregister(af_iucv_dev);
  2414 + driver_unregister(&af_iucv_driver);
  2415 + pr_iucv->iucv_unregister(&af_iucv_handler, 0);
  2416 + symbol_put(iucv_if);
  2417 + }
  2418 + dev_remove_pack(&iucv_packet_type);
1815 2419 sock_unregister(PF_IUCV);
1816 2420 proto_unregister(&iucv_proto);
1817 2421 }