summaryrefslogtreecommitdiff
path: root/net/rxrpc/conn_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rxrpc/conn_event.c')
-rw-r--r--net/rxrpc/conn_event.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 98ad9b51ca2cd..9a41ec708aeb9 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -247,6 +247,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ bool secured = false;
int ret;
if (conn->state == RXRPC_CONN_ABORTED)
@@ -262,6 +263,13 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return ret;
case RXRPC_PACKET_TYPE_RESPONSE:
+ spin_lock_irq(&conn->state_lock);
+ if (conn->state != RXRPC_CONN_SERVICE_CHALLENGING) {
+ spin_unlock_irq(&conn->state_lock);
+ return 0;
+ }
+ spin_unlock_irq(&conn->state_lock);
+
ret = conn->security->verify_response(conn, skb);
if (ret < 0)
return ret;
@@ -272,11 +280,13 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return ret;
spin_lock_irq(&conn->state_lock);
- if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
+ if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
conn->state = RXRPC_CONN_SERVICE;
+ secured = true;
+ }
spin_unlock_irq(&conn->state_lock);
- if (conn->state == RXRPC_CONN_SERVICE) {
+ if (secured) {
/* Offload call state flipping to the I/O thread. As
* we've already received the packet, put it on the
* front of the queue.
@@ -557,11 +567,11 @@ void rxrpc_post_response(struct rxrpc_connection *conn, struct sk_buff *skb)
spin_lock_irq(&local->lock);
old = conn->tx_response;
if (old) {
- struct rxrpc_skb_priv *osp = rxrpc_skb(skb);
+ struct rxrpc_skb_priv *osp = rxrpc_skb(old);
/* Always go with the response to the most recent challenge. */
if (after(sp->resp.challenge_serial, osp->resp.challenge_serial))
- conn->tx_response = old;
+ conn->tx_response = skb;
else
old = skb;
} else {
@@ -569,4 +579,5 @@ void rxrpc_post_response(struct rxrpc_connection *conn, struct sk_buff *skb)
}
spin_unlock_irq(&local->lock);
rxrpc_poke_conn(conn, rxrpc_conn_get_poke_response);
+ rxrpc_free_skb(old, rxrpc_skb_put_old_response);
}