diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 253188fb8cb0cea0e35d0f4ed77b5e2c6332d507..e3e50950a863675b72a3c1e0d605d81cf5f258f2 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
 		kernel_neon_begin();
 		aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, walk.iv);
-		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 		kernel_neon_end();
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	if (walk.nbytes) {
 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 9fbcde307daf90b554ac5e96da627f0f77eb24e9..5eede3749e646b425614aa86de9143c82545fcc6 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
 		union morus640_block_in tail;
 
 		memcpy(tail.bytes, src, size);
+		memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
 
-		crypto_morus640_load_a(&m, src);
+		crypto_morus640_load_a(&m, tail.bytes);
 		crypto_morus640_core(state, &m);
 		crypto_morus640_store_a(tail.bytes, &m);
 		memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 264ec12c0b9c334a16d743651771a18b616616a0..7f6735d9003f13c1e4adb7ffde8d8be5cc700fe1 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25])
 	st[24] ^= bc[ 4];
 }
 
-static void __optimize("O3") keccakf(u64 st[25])
+static void keccakf(u64 st[25])
 {
 	int round;
 
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 91bb98c42a1ca76376ae0db4b43fd7a89fca27b0..aaf9e5afaad435e2342a15fc963aa91367079957 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
 
 void hwrng_unregister(struct hwrng *rng)
 {
+	int err;
+
 	mutex_lock(&rng_mutex);
 
 	list_del(&rng->list);
-	if (current_rng == rng)
-		enable_best_rng();
+	if (current_rng == rng) {
+		err = enable_best_rng();
+		if (err) {
+			drop_current_rng();
+			cur_rng_set_by_user = 0;
+		}
+	}
 
 	if (list_empty(&rng_list)) {
 		mutex_unlock(&rng_mutex);
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 00c7aab8e7d0f5861e778dc4d26affe5c1234603..afebbd87c4aa1d22ca179f558552cb2f410fcc0a 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1548,15 +1548,14 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 			tp->urg_data = 0;
 
 		if ((avail + offset) >= skb->len) {
-			if (likely(skb))
-				chtls_free_skb(sk, skb);
-			buffers_freed++;
 			if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
 				tp->copied_seq += skb->len;
 				hws->rcvpld = skb->hdr_len;
 			} else {
 				tp->copied_seq += hws->rcvpld;
 			}
+			chtls_free_skb(sk, skb);
+			buffers_freed++;
 			hws->copied_seq = 0;
 			if (copied >= target &&
 			    !skb_peek(&sk->sk_receive_queue))