@@ -106,6 +106,8 @@ typedef enum GCDAsyncSocketError GCDAsyncSocketError;
106
106
* The socket queue is optional.
107
107
* If you pass NULL, GCDAsyncSocket will automatically create it's own socket queue.
108
108
* If you choose to provide a socket queue, the socket queue must not be a concurrent queue.
109
+ * If you choose to provide a socket queue, and the socket queue has a configured target queue,
110
+ * then please see the discussion for the method markSocketQueueTargetQueue.
109
111
*
110
112
* The delegate queue and socket queue can optionally be the same.
111
113
**/
@@ -744,6 +746,81 @@ typedef enum GCDAsyncSocketError GCDAsyncSocketError;
744
746
- (BOOL )autoDisconnectOnClosedReadStream ;
745
747
- (void )setAutoDisconnectOnClosedReadStream : (BOOL )flag ;
746
748
749
+ /* *
750
+ * GCDAsyncSocket maintains thread safety by using an internal serial dispatch_queue.
751
+ * In most cases, the instance creates this queue itself.
752
+ * However, to allow for maximum flexibility, the internal queue may be passed in the init method.
753
+ * This allows for some advanced options such as controlling socket priority via target queues.
754
+ * However, when one begins to use target queues like this, they open the door to some specific deadlock issues.
755
+ *
756
+ * For example, imagine there are 2 queues:
757
+ * dispatch_queue_t socketQueue;
758
+ * dispatch_queue_t socketTargetQueue;
759
+ *
760
+ * If you do this (pseudo-code):
761
+ * socketQueue.targetQueue = socketTargetQueue;
762
+ *
763
+ * Then all socketQueue operations will actually get run on the given socketTargetQueue.
764
+ * This is fine and works great in most situations.
765
+ * But if you run code directly from within the socketTargetQueue that accesses the socket,
766
+ * you could potentially get deadlock. Imagine the following code:
767
+ *
768
+ * - (BOOL)socketHasSomething
769
+ * {
770
+ * __block BOOL result = NO;
771
+ * dispatch_block_t block = ^{
772
+ * result = [self someInternalMethodToBeRunOnlyOnSocketQueue];
773
+ * }
774
+ * if (is_executing_on_queue(socketQueue))
775
+ * block();
776
+ * else
777
+ * dispatch_sync(socketQueue, block);
778
+ *
779
+ * return result;
780
+ * }
781
+ *
782
+ * What happens if you call this method from the socketTargetQueue? The result is deadlock.
783
+ * This is because the GCD API offers no mechanism to discover a queue's targetQueue.
784
+ * Thus we have no idea if our socketQueue is configured with a targetQueue.
785
+ * If we had this information, we could easily avoid deadlock.
786
+ * But, since these API's are missing or unfeasible, you'll have to explicitly set it.
787
+ *
788
+ * IF you pass a socketQueue via the init method,
789
+ * AND you've configured the passed socketQueue with a targetQueue,
790
+ * THEN you should pass the end queue in the target hierarchy.
791
+ *
792
+ * For example, consider the following queue hierarchy:
793
+ * socketQueue -> ipQueue -> moduleQueue
794
+ *
795
+ * This example demonstrates priority shaping within some server.
796
+ * All incoming client connections from the same IP address are executed on the same target queue.
797
+ * And all connections for a particular module are executed on the same target queue.
798
+ * Thus, the priority of all networking for the entire module can be changed on the fly.
799
+ * Additionally, networking traffic from a single IP cannot monopolize the module.
800
+ *
801
+ * Here's how you would accomplish something like that:
802
+ * - (dispatch_queue_t)newSocketQueueForConnectionFromAddress:(NSData *)address onSocket:(GCDAsyncSocket *)sock
803
+ * {
804
+ * dispatch_queue_t socketQueue = dispatch_queue_create("", NULL);
805
+ * dispatch_queue_t ipQueue = [self ipQueueForAddress:address];
806
+ *
807
+ * dispatch_set_target_queue(socketQueue, ipQueue);
808
+ * dispatch_set_target_queue(iqQueue, moduleQueue);
809
+ *
810
+ * return socketQueue;
811
+ * }
812
+ * - (void)socket:(GCDAsyncSocket *)sock didAcceptNewSocket:(GCDAsyncSocket *)newSocket
813
+ * {
814
+ * [clientConnections addObject:newSocket];
815
+ * [newSocket markSocketQueueTargetQueue:moduleQueue];
816
+ * }
817
+ *
818
+ * Note: This workaround is ONLY needed if you intend to execute code directly on the ipQueue or moduleQueue.
819
+ * This is often NOT the case, as such queues are used solely for execution shaping.
820
+ **/
821
+ - (void )markSocketQueueTargetQueue : (dispatch_queue_t )socketQueuesPreConfiguredTargetQueue ;
822
+ - (void )unmarkSocketQueueTargetQueue : (dispatch_queue_t )socketQueuesPreviouslyConfiguredTargetQueue ;
823
+
747
824
/* *
748
825
* It's not thread-safe to access certain variables from outside the socket's internal queue.
749
826
*
0 commit comments