@@ -24,6 +24,29 @@ bool fuse_uring_enabled(void)
2424 return enable_uring ;
2525}
2626
27+ struct fuse_uring_pdu {
28+ struct fuse_ring_ent * ent ;
29+ };
30+
31+ static const struct fuse_iqueue_ops fuse_io_uring_ops ;
32+
33+ static void uring_cmd_set_ring_ent (struct io_uring_cmd * cmd ,
34+ struct fuse_ring_ent * ring_ent )
35+ {
36+ struct fuse_uring_pdu * pdu =
37+ io_uring_cmd_to_pdu (cmd , struct fuse_uring_pdu );
38+
39+ pdu -> ent = ring_ent ;
40+ }
41+
42+ static struct fuse_ring_ent * uring_cmd_to_ring_ent (struct io_uring_cmd * cmd )
43+ {
44+ struct fuse_uring_pdu * pdu =
45+ io_uring_cmd_to_pdu (cmd , struct fuse_uring_pdu );
46+
47+ return pdu -> ent ;
48+ }
49+
2750static void fuse_uring_req_end (struct fuse_ring_ent * ent , struct fuse_req * req ,
2851 int error )
2952{
@@ -776,6 +799,31 @@ static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
776799 return 0 ;
777800}
778801
802+ static bool is_ring_ready (struct fuse_ring * ring , int current_qid )
803+ {
804+ int qid ;
805+ struct fuse_ring_queue * queue ;
806+ bool ready = true;
807+
808+ for (qid = 0 ; qid < ring -> nr_queues && ready ; qid ++ ) {
809+ if (current_qid == qid )
810+ continue ;
811+
812+ queue = ring -> queues [qid ];
813+ if (!queue ) {
814+ ready = false;
815+ break ;
816+ }
817+
818+ spin_lock (& queue -> lock );
819+ if (list_empty (& queue -> ent_avail_queue ))
820+ ready = false;
821+ spin_unlock (& queue -> lock );
822+ }
823+
824+ return ready ;
825+ }
826+
779827/*
780828 * fuse_uring_req_fetch command handling
781829 */
@@ -784,11 +832,23 @@ static void fuse_uring_do_register(struct fuse_ring_ent *ent,
784832 unsigned int issue_flags )
785833{
786834 struct fuse_ring_queue * queue = ent -> queue ;
835+ struct fuse_ring * ring = queue -> ring ;
836+ struct fuse_conn * fc = ring -> fc ;
837+ struct fuse_iqueue * fiq = & fc -> iq ;
787838
788839 spin_lock (& queue -> lock );
789840 ent -> cmd = cmd ;
790841 fuse_uring_ent_avail (ent , queue );
791842 spin_unlock (& queue -> lock );
843+
844+ if (!ring -> ready ) {
845+ bool ready = is_ring_ready (ring , queue -> qid );
846+
847+ if (ready ) {
848+ WRITE_ONCE (fiq -> ops , & fuse_io_uring_ops );
849+ WRITE_ONCE (ring -> ready , true);
850+ }
851+ }
792852}
793853
794854/*
@@ -972,3 +1032,123 @@ int __maybe_unused fuse_uring_cmd(struct io_uring_cmd *cmd,
9721032
9731033 return - EIOCBQUEUED ;
9741034}
1035+
1036+ static void fuse_uring_send (struct fuse_ring_ent * ent , struct io_uring_cmd * cmd ,
1037+ ssize_t ret , unsigned int issue_flags )
1038+ {
1039+ struct fuse_ring_queue * queue = ent -> queue ;
1040+
1041+ spin_lock (& queue -> lock );
1042+ ent -> state = FRRS_USERSPACE ;
1043+ list_move (& ent -> list , & queue -> ent_in_userspace );
1044+ ent -> cmd = NULL ;
1045+ spin_unlock (& queue -> lock );
1046+
1047+ io_uring_cmd_done (cmd , ret , 0 , issue_flags );
1048+ }
1049+
1050+ /*
1051+ * This prepares and sends the ring request in fuse-uring task context.
1052+ * User buffers are not mapped yet - the application does not have permission
1053+ * to write to it - this has to be executed in ring task context.
1054+ */
1055+ static void fuse_uring_send_in_task (struct io_uring_cmd * cmd ,
1056+ unsigned int issue_flags )
1057+ {
1058+ struct fuse_ring_ent * ent = uring_cmd_to_ring_ent (cmd );
1059+ struct fuse_ring_queue * queue = ent -> queue ;
1060+ int err ;
1061+
1062+ if (!(issue_flags & IO_URING_F_TASK_DEAD )) {
1063+ err = fuse_uring_prepare_send (ent , ent -> fuse_req );
1064+ if (err ) {
1065+ fuse_uring_next_fuse_req (ent , queue , issue_flags );
1066+ return ;
1067+ }
1068+ } else {
1069+ err = - ECANCELED ;
1070+ }
1071+
1072+ fuse_uring_send (ent , cmd , err , issue_flags );
1073+ }
1074+
1075+ static struct fuse_ring_queue * fuse_uring_task_to_queue (struct fuse_ring * ring )
1076+ {
1077+ unsigned int qid ;
1078+ struct fuse_ring_queue * queue ;
1079+
1080+ qid = task_cpu (current );
1081+
1082+ if (WARN_ONCE (qid >= ring -> nr_queues ,
1083+ "Core number (%u) exceeds nr queues (%zu)\n" , qid ,
1084+ ring -> nr_queues ))
1085+ qid = 0 ;
1086+
1087+ queue = ring -> queues [qid ];
1088+ WARN_ONCE (!queue , "Missing queue for qid %d\n" , qid );
1089+
1090+ return queue ;
1091+ }
1092+
1093+ static void fuse_uring_dispatch_ent (struct fuse_ring_ent * ent )
1094+ {
1095+ struct io_uring_cmd * cmd = ent -> cmd ;
1096+
1097+ uring_cmd_set_ring_ent (cmd , ent );
1098+ io_uring_cmd_complete_in_task (cmd , fuse_uring_send_in_task );
1099+ }
1100+
1101+ /* queue a fuse request and send it if a ring entry is available */
1102+ void fuse_uring_queue_fuse_req (struct fuse_iqueue * fiq , struct fuse_req * req )
1103+ {
1104+ struct fuse_conn * fc = req -> fm -> fc ;
1105+ struct fuse_ring * ring = fc -> ring ;
1106+ struct fuse_ring_queue * queue ;
1107+ struct fuse_ring_ent * ent = NULL ;
1108+ int err ;
1109+
1110+ err = - EINVAL ;
1111+ queue = fuse_uring_task_to_queue (ring );
1112+ if (!queue )
1113+ goto err ;
1114+
1115+ if (req -> in .h .opcode != FUSE_NOTIFY_REPLY )
1116+ req -> in .h .unique = fuse_get_unique (fiq );
1117+
1118+ spin_lock (& queue -> lock );
1119+ err = - ENOTCONN ;
1120+ if (unlikely (queue -> stopped ))
1121+ goto err_unlock ;
1122+
1123+ ent = list_first_entry_or_null (& queue -> ent_avail_queue ,
1124+ struct fuse_ring_ent , list );
1125+ if (ent )
1126+ fuse_uring_add_req_to_ring_ent (ent , req );
1127+ else
1128+ list_add_tail (& req -> list , & queue -> fuse_req_queue );
1129+ spin_unlock (& queue -> lock );
1130+
1131+ if (ent )
1132+ fuse_uring_dispatch_ent (ent );
1133+
1134+ return ;
1135+
1136+ err_unlock :
1137+ spin_unlock (& queue -> lock );
1138+ err :
1139+ req -> out .h .error = err ;
1140+ clear_bit (FR_PENDING , & req -> flags );
1141+ fuse_request_end (req );
1142+ }
1143+
1144+ static const struct fuse_iqueue_ops fuse_io_uring_ops = {
1145+ /* should be send over io-uring as enhancement */
1146+ .send_forget = fuse_dev_queue_forget ,
1147+
1148+ /*
1149+ * could be send over io-uring, but interrupts should be rare,
1150+ * no need to make the code complex
1151+ */
1152+ .send_interrupt = fuse_dev_queue_interrupt ,
1153+ .send_req = fuse_uring_queue_fuse_req ,
1154+ };
0 commit comments