@@ -459,7 +459,7 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
459459 io_buf_size = dev -> dev_info .max_io_buf_bytes ;
460460 for (i = 0 ; i < q -> q_depth ; i ++ ) {
461461 q -> ios [i ].buf_addr = NULL ;
462- q -> ios [i ].flags = UBLKSRV_NEED_FETCH_RQ | UBLKSRV_IO_FREE ;
462+ q -> ios [i ].flags = UBLKS_IO_NEED_FETCH_RQ | UBLKS_IO_FREE ;
463463 q -> ios [i ].tag = i ;
464464
465465 if (ublk_queue_no_buf (q ))
@@ -591,22 +591,22 @@ int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io)
591591 __u64 user_data ;
592592
593593 /* only freed io can be issued */
594- if (!(io -> flags & UBLKSRV_IO_FREE ))
594+ if (!(io -> flags & UBLKS_IO_FREE ))
595595 return 0 ;
596596
597597 /*
598598 * we issue because we need either fetching or committing or
599599 * getting data
600600 */
601601 if (!(io -> flags &
602- (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_NEED_GET_DATA )))
602+ (UBLKS_IO_NEED_FETCH_RQ | UBLKS_IO_NEED_COMMIT_RQ_COMP | UBLKS_IO_NEED_GET_DATA )))
603603 return 0 ;
604604
605- if (io -> flags & UBLKSRV_NEED_GET_DATA )
605+ if (io -> flags & UBLKS_IO_NEED_GET_DATA )
606606 cmd_op = UBLK_U_IO_NEED_GET_DATA ;
607- else if (io -> flags & UBLKSRV_NEED_COMMIT_RQ_COMP )
607+ else if (io -> flags & UBLKS_IO_NEED_COMMIT_RQ_COMP )
608608 cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ ;
609- else if (io -> flags & UBLKSRV_NEED_FETCH_RQ )
609+ else if (io -> flags & UBLKS_IO_NEED_FETCH_RQ )
610610 cmd_op = UBLK_U_IO_FETCH_REQ ;
611611
612612 if (io_uring_sq_space_left (& t -> ring ) < 1 )
@@ -649,7 +649,7 @@ int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io)
649649
650650 ublk_dbg (UBLK_DBG_IO_CMD , "%s: (thread %u qid %d tag %u cmd_op %u) iof %x stopping %d\n" ,
651651 __func__ , t -> idx , q -> q_id , io -> tag , cmd_op ,
652- io -> flags , !!(t -> state & UBLKSRV_THREAD_STOPPING ));
652+ io -> flags , !!(t -> state & UBLKS_T_STOPPING ));
653653 return 1 ;
654654}
655655
@@ -701,7 +701,7 @@ static int ublk_thread_is_idle(struct ublk_thread *t)
701701
702702static int ublk_thread_is_done (struct ublk_thread * t )
703703{
704- return (t -> state & UBLKSRV_THREAD_STOPPING ) && ublk_thread_is_idle (t );
704+ return (t -> state & UBLKS_T_STOPPING ) && ublk_thread_is_idle (t );
705705}
706706
707707static inline void ublksrv_handle_tgt_cqe (struct ublk_thread * t ,
@@ -727,7 +727,7 @@ static void ublk_handle_cqe(struct ublk_thread *t,
727727 unsigned tag = user_data_to_tag (cqe -> user_data );
728728 unsigned cmd_op = user_data_to_op (cqe -> user_data );
729729 int fetch = (cqe -> res != UBLK_IO_RES_ABORT ) &&
730- !(t -> state & UBLKSRV_THREAD_STOPPING );
730+ !(t -> state & UBLKS_T_STOPPING );
731731 struct ublk_io * io ;
732732
733733 if (cqe -> res < 0 && cqe -> res != - ENODEV )
@@ -738,7 +738,7 @@ static void ublk_handle_cqe(struct ublk_thread *t,
738738 __func__ , cqe -> res , q -> q_id , tag , cmd_op ,
739739 is_target_io (cqe -> user_data ),
740740 user_data_to_tgt_data (cqe -> user_data ),
741- (t -> state & UBLKSRV_THREAD_STOPPING ));
741+ (t -> state & UBLKS_T_STOPPING ));
742742
743743 /* Don't retrieve io in case of target io */
744744 if (is_target_io (cqe -> user_data )) {
@@ -750,27 +750,27 @@ static void ublk_handle_cqe(struct ublk_thread *t,
750750 t -> cmd_inflight -- ;
751751
752752 if (!fetch ) {
753- t -> state |= UBLKSRV_THREAD_STOPPING ;
754- io -> flags &= ~UBLKSRV_NEED_FETCH_RQ ;
753+ t -> state |= UBLKS_T_STOPPING ;
754+ io -> flags &= ~UBLKS_IO_NEED_FETCH_RQ ;
755755 }
756756
757757 if (cqe -> res == UBLK_IO_RES_OK ) {
758758 assert (tag < q -> q_depth );
759759 if (q -> tgt_ops -> queue_io )
760760 q -> tgt_ops -> queue_io (t , q , tag );
761761 } else if (cqe -> res == UBLK_IO_RES_NEED_GET_DATA ) {
762- io -> flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE ;
762+ io -> flags |= UBLKS_IO_NEED_GET_DATA | UBLKS_IO_FREE ;
763763 ublk_queue_io_cmd (t , io );
764764 } else {
765765 /*
766766 * COMMIT_REQ will be completed immediately since no fetching
767767 * piggyback is required.
768768 *
769769 * Marking IO_FREE only, then this io won't be issued since
770- * we only issue io with (UBLKSRV_IO_FREE | UBLKSRV_NEED_*)
770+ * we only issue io with (UBLKS_IO_FREE | UBLKSRV_NEED_*)
771771 *
772772 * */
773- io -> flags = UBLKSRV_IO_FREE ;
773+ io -> flags = UBLKS_IO_FREE ;
774774 }
775775}
776776
@@ -797,7 +797,7 @@ static int ublk_process_io(struct ublk_thread *t)
797797 t -> dev -> dev_info .dev_id ,
798798 t -> idx , io_uring_sq_ready (& t -> ring ),
799799 t -> cmd_inflight ,
800- (t -> state & UBLKSRV_THREAD_STOPPING ));
800+ (t -> state & UBLKS_T_STOPPING ));
801801
802802 if (ublk_thread_is_done (t ))
803803 return - ENODEV ;
@@ -806,8 +806,8 @@ static int ublk_process_io(struct ublk_thread *t)
806806 reapped = ublk_reap_events_uring (t );
807807
808808 ublk_dbg (UBLK_DBG_THREAD , "submit result %d, reapped %d stop %d idle %d\n" ,
809- ret , reapped , (t -> state & UBLKSRV_THREAD_STOPPING ),
810- (t -> state & UBLKSRV_THREAD_IDLE ));
809+ ret , reapped , (t -> state & UBLKS_T_STOPPING ),
810+ (t -> state & UBLKS_T_IDLE ));
811811
812812 return reapped ;
813813}
@@ -926,7 +926,7 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
926926 return ret ;
927927
928928 if (ctx -> auto_zc_fallback )
929- extra_flags = UBLKSRV_AUTO_BUF_REG_FALLBACK ;
929+ extra_flags = UBLKS_Q_AUTO_BUF_REG_FALLBACK ;
930930
931931 for (i = 0 ; i < dinfo -> nr_hw_queues ; i ++ ) {
932932 dev -> q [i ].dev = dev ;
0 commit comments