@@ -482,13 +482,13 @@ static inline int queue_out_packet(struct amdtp_stream *s,
482482}
483483
484484static inline int queue_in_packet (struct amdtp_stream * s ,
485- struct fw_iso_packet * params , bool sched_irq )
485+ struct fw_iso_packet * params )
486486{
487487 // Queue one packet for IR context.
488488 params -> header_length = s -> ctx_data .tx .ctx_header_size ;
489489 params -> payload_length = s -> ctx_data .tx .max_ctx_payload_length ;
490490 params -> skip = false;
491- return queue_packet (s , params , sched_irq );
491+ return queue_packet (s , params , false );
492492}
493493
494494static void generate_cip_header (struct amdtp_stream * s , __be32 cip_header [2 ],
@@ -790,15 +790,24 @@ static void process_ctx_payloads(struct amdtp_stream *s,
790790 update_pcm_pointers (s , pcm , pcm_frames );
791791}
792792
793+ static void amdtp_stream_master_callback (struct fw_iso_context * context ,
794+ u32 tstamp , size_t header_length ,
795+ void * header , void * private_data );
796+
797+ static void amdtp_stream_master_first_callback (struct fw_iso_context * context ,
798+ u32 tstamp , size_t header_length ,
799+ void * header , void * private_data );
800+
793801static void out_stream_callback (struct fw_iso_context * context , u32 tstamp ,
794802 size_t header_length , void * header ,
795803 void * private_data )
796804{
797805 struct amdtp_stream * s = private_data ;
798806 const __be32 * ctx_header = header ;
799- unsigned int events_per_period = s -> events_per_period ;
800- unsigned int event_count = s -> event_count ;
807+ unsigned int events_per_period = s -> ctx_data . rx . events_per_period ;
808+ unsigned int event_count = s -> ctx_data . rx . event_count ;
801809 unsigned int packets ;
810+ bool is_irq_target ;
802811 int i ;
803812
804813 if (s -> packet_index < 0 )
@@ -811,6 +820,10 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
811820
812821 process_ctx_payloads (s , s -> pkt_descs , packets );
813822
823+ is_irq_target =
824+ !!(context -> callback .sc == amdtp_stream_master_callback ||
825+ context -> callback .sc == amdtp_stream_master_first_callback );
826+
814827 for (i = 0 ; i < packets ; ++ i ) {
815828 const struct pkt_desc * desc = s -> pkt_descs + i ;
816829 unsigned int syt ;
@@ -829,10 +842,12 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
829842 desc -> data_blocks , desc -> data_block_counter ,
830843 syt , i );
831844
832- event_count += desc -> data_blocks ;
833- if (event_count >= events_per_period ) {
834- event_count -= events_per_period ;
835- sched_irq = true;
845+ if (is_irq_target ) {
846+ event_count += desc -> data_blocks ;
847+ if (event_count >= events_per_period ) {
848+ event_count -= events_per_period ;
849+ sched_irq = true;
850+ }
836851 }
837852
838853 if (queue_out_packet (s , & template .params , sched_irq ) < 0 ) {
@@ -841,7 +856,7 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
841856 }
842857 }
843858
844- s -> event_count = event_count ;
859+ s -> ctx_data . rx . event_count = event_count ;
845860}
846861
847862static void in_stream_callback (struct fw_iso_context * context , u32 tstamp ,
@@ -850,8 +865,6 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
850865{
851866 struct amdtp_stream * s = private_data ;
852867 __be32 * ctx_header = header ;
853- unsigned int events_per_period = s -> events_per_period ;
854- unsigned int event_count = s -> event_count ;
855868 unsigned int packets ;
856869 int i ;
857870 int err ;
@@ -873,31 +886,47 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
873886 }
874887
875888 for (i = 0 ; i < packets ; ++ i ) {
876- const struct pkt_desc * desc = s -> pkt_descs + i ;
877889 struct fw_iso_packet params = {0 };
878- bool sched_irq = false;
879-
880- if (err >= 0 ) {
881- event_count += desc -> data_blocks ;
882- if (event_count >= events_per_period ) {
883- event_count -= events_per_period ;
884- sched_irq = true;
885- }
886- } else {
887- sched_irq =
888- !((s -> packet_index + 1 ) % s -> idle_irq_interval );
889- }
890890
891- if (queue_in_packet (s , & params , sched_irq ) < 0 ) {
891+ if (queue_in_packet (s , & params ) < 0 ) {
892892 cancel_stream (s );
893893 return ;
894894 }
895895 }
896+ }
897+
898+ static void amdtp_stream_master_callback (struct fw_iso_context * context ,
899+ u32 tstamp , size_t header_length ,
900+ void * header , void * private_data )
901+ {
902+ struct amdtp_domain * d = private_data ;
903+ struct amdtp_stream * irq_target = d -> irq_target ;
904+ struct amdtp_stream * s ;
905+
906+ out_stream_callback (context , tstamp , header_length , header , irq_target );
907+ if (amdtp_streaming_error (irq_target ))
908+ goto error ;
896909
897- s -> event_count = event_count ;
910+ list_for_each_entry (s , & d -> streams , list ) {
911+ if (s != irq_target && amdtp_stream_running (s )) {
912+ fw_iso_context_flush_completions (s -> context );
913+ if (amdtp_streaming_error (s ))
914+ goto error ;
915+ }
916+ }
917+
918+ return ;
919+ error :
920+ if (amdtp_stream_running (irq_target ))
921+ cancel_stream (irq_target );
922+
923+ list_for_each_entry (s , & d -> streams , list ) {
924+ if (amdtp_stream_running (s ))
925+ cancel_stream (s );
926+ }
898927}
899928
900- /* this is executed one time */
929+ // this is executed one time.
901930static void amdtp_stream_first_callback (struct fw_iso_context * context ,
902931 u32 tstamp , size_t header_length ,
903932 void * header , void * private_data )
@@ -928,18 +957,39 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
928957 context -> callback .sc (context , tstamp , header_length , header , s );
929958}
930959
960+ static void amdtp_stream_master_first_callback (struct fw_iso_context * context ,
961+ u32 tstamp , size_t header_length ,
962+ void * header , void * private_data )
963+ {
964+ struct amdtp_domain * d = private_data ;
965+ struct amdtp_stream * s = d -> irq_target ;
966+ const __be32 * ctx_header = header ;
967+
968+ s -> callbacked = true;
969+ wake_up (& s -> callback_wait );
970+
971+ s -> start_cycle = compute_it_cycle (* ctx_header , s -> queue_size );
972+
973+ context -> callback .sc = amdtp_stream_master_callback ;
974+
975+ context -> callback .sc (context , tstamp , header_length , header , d );
976+ }
977+
931978/**
932979 * amdtp_stream_start - start transferring packets
933980 * @s: the AMDTP stream to start
934981 * @channel: the isochronous channel on the bus
935982 * @speed: firewire speed code
983+ * @d: the AMDTP domain to which the AMDTP stream belongs
984+ * @is_irq_target: whether isoc context for the AMDTP stream is used to generate
985+ * hardware IRQ.
936986 *
937987 * The stream cannot be started until it has been configured with
938988 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
939989 * device can be started.
940990 */
941991static int amdtp_stream_start (struct amdtp_stream * s , int channel , int speed ,
942- struct amdtp_domain * d )
992+ struct amdtp_domain * d , bool is_irq_target )
943993{
944994 static const struct {
945995 unsigned int data_block ;
@@ -955,10 +1005,13 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
9551005 };
9561006 unsigned int events_per_buffer = d -> events_per_buffer ;
9571007 unsigned int events_per_period = d -> events_per_period ;
1008+ unsigned int idle_irq_interval ;
9581009 unsigned int ctx_header_size ;
9591010 unsigned int max_ctx_payload_size ;
9601011 enum dma_data_direction dir ;
9611012 int type , tag , err ;
1013+ fw_iso_callback_t ctx_cb ;
1014+ void * ctx_data ;
9621015
9631016 mutex_lock (& s -> mutex );
9641017
@@ -969,6 +1022,12 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
9691022 }
9701023
9711024 if (s -> direction == AMDTP_IN_STREAM ) {
1025+ // NOTE: IT context should be used for constant IRQ.
1026+ if (is_irq_target ) {
1027+ err = - EINVAL ;
1028+ goto err_unlock ;
1029+ }
1030+
9721031 s -> data_block_counter = UINT_MAX ;
9731032 } else {
9741033 entry = & initial_state [s -> sfc ];
@@ -1008,22 +1067,29 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
10081067 if (events_per_buffer == 0 )
10091068 events_per_buffer = events_per_period * 3 ;
10101069
1011- s -> idle_irq_interval =
1012- DIV_ROUND_UP (CYCLES_PER_SECOND * events_per_period ,
1013- amdtp_rate_table [s -> sfc ]);
1070+ idle_irq_interval = DIV_ROUND_UP (CYCLES_PER_SECOND * events_per_period ,
1071+ amdtp_rate_table [s -> sfc ]);
10141072 s -> queue_size = DIV_ROUND_UP (CYCLES_PER_SECOND * events_per_buffer ,
10151073 amdtp_rate_table [s -> sfc ]);
1016- s -> events_per_period = events_per_period ;
1017- s -> event_count = 0 ;
10181074
10191075 err = iso_packets_buffer_init (& s -> buffer , s -> unit , s -> queue_size ,
10201076 max_ctx_payload_size , dir );
10211077 if (err < 0 )
10221078 goto err_unlock ;
10231079
1080+ if (is_irq_target ) {
1081+ s -> ctx_data .rx .events_per_period = events_per_period ;
1082+ s -> ctx_data .rx .event_count = 0 ;
1083+ ctx_cb = amdtp_stream_master_first_callback ;
1084+ ctx_data = d ;
1085+ } else {
1086+ ctx_cb = amdtp_stream_first_callback ;
1087+ ctx_data = s ;
1088+ }
1089+
10241090 s -> context = fw_iso_context_create (fw_parent_device (s -> unit )-> card ,
10251091 type , channel , speed , ctx_header_size ,
1026- amdtp_stream_first_callback , s );
1092+ ctx_cb , ctx_data );
10271093 if (IS_ERR (s -> context )) {
10281094 err = PTR_ERR (s -> context );
10291095 if (err == - EBUSY )
@@ -1054,14 +1120,20 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
10541120 s -> packet_index = 0 ;
10551121 do {
10561122 struct fw_iso_packet params ;
1057- bool sched_irq ;
10581123
1059- sched_irq = !((s -> packet_index + 1 ) % s -> idle_irq_interval );
10601124 if (s -> direction == AMDTP_IN_STREAM ) {
1061- err = queue_in_packet (s , & params , sched_irq );
1125+ err = queue_in_packet (s , & params );
10621126 } else {
1127+ bool sched_irq = false;
1128+
10631129 params .header_length = 0 ;
10641130 params .payload_length = 0 ;
1131+
1132+ if (is_irq_target ) {
1133+ sched_irq = !((s -> packet_index + 1 ) %
1134+ idle_irq_interval );
1135+ }
1136+
10651137 err = queue_out_packet (s , & params , sched_irq );
10661138 }
10671139 if (err < 0 )
@@ -1276,17 +1348,33 @@ int amdtp_domain_start(struct amdtp_domain *d)
12761348 struct amdtp_stream * s ;
12771349 int err = 0 ;
12781350
1351+ // Select an IT context as IRQ target.
12791352 list_for_each_entry (s , & d -> streams , list ) {
1280- err = amdtp_stream_start (s , s -> channel , s -> speed , d );
1281- if (err < 0 )
1353+ if (s -> direction == AMDTP_OUT_STREAM )
12821354 break ;
12831355 }
1356+ if (!s )
1357+ return - ENXIO ;
1358+ d -> irq_target = s ;
12841359
1285- if (err < 0 ) {
1286- list_for_each_entry (s , & d -> streams , list )
1287- amdtp_stream_stop (s );
1360+ list_for_each_entry (s , & d -> streams , list ) {
1361+ if (s != d -> irq_target ) {
1362+ err = amdtp_stream_start (s , s -> channel , s -> speed , d ,
1363+ false);
1364+ if (err < 0 )
1365+ goto error ;
1366+ }
12881367 }
12891368
1369+ s = d -> irq_target ;
1370+ err = amdtp_stream_start (s , s -> channel , s -> speed , d , true);
1371+ if (err < 0 )
1372+ goto error ;
1373+
1374+ return 0 ;
1375+ error :
1376+ list_for_each_entry (s , & d -> streams , list )
1377+ amdtp_stream_stop (s );
12901378 return err ;
12911379}
12921380EXPORT_SYMBOL_GPL (amdtp_domain_start );
@@ -1299,12 +1387,17 @@ void amdtp_domain_stop(struct amdtp_domain *d)
12991387{
13001388 struct amdtp_stream * s , * next ;
13011389
1390+ if (d -> irq_target )
1391+ amdtp_stream_stop (d -> irq_target );
1392+
13021393 list_for_each_entry_safe (s , next , & d -> streams , list ) {
13031394 list_del (& s -> list );
13041395
1305- amdtp_stream_stop (s );
1396+ if (s != d -> irq_target )
1397+ amdtp_stream_stop (s );
13061398 }
13071399
13081400 d -> events_per_period = 0 ;
1401+ d -> irq_target = NULL ;
13091402}
13101403EXPORT_SYMBOL_GPL (amdtp_domain_stop );
0 commit comments