|
1 | 1 | #include "proxy-conn.hpp"
|
2 | 2 |
|
3 |
| -//#define SYNC_TRANSFER |
4 |
| - |
5 | 3 | /**
|
6 | 4 | *
|
7 | 5 | *
|
@@ -110,28 +108,76 @@ void connection::shutdown()
|
110 | 108 |
|
111 | 109 | bool connection::on_request(const gdb_packet& pkt)
|
112 | 110 | {
|
113 |
| - if (pkt.type() == gdb_packet_type::generic) { |
| 111 | + if (pkt.type() == gdb_packet_type::dat) { |
114 | 112 | std::clog << "req: " << ++seq << std::endl;
|
| 113 | + |
| 114 | +#if 0 |
| 115 | + //if (seq > 4) |
| 116 | + { |
| 117 | + const static char raw[] = R"#($qXfer:threads:read::0,1000#92)#"; |
| 118 | + gdb_packet internal_pkt; |
| 119 | + internal_pkt.parse(raw, sizeof(raw)); |
| 120 | + |
| 121 | + push_internal_request(std::move(internal_pkt), [this](auto &req, auto& resp) { |
| 122 | + std::clog << "response to internal request:\n" |
| 123 | + << " <- " << req.raw_data() << '\n' |
| 124 | + << " -> " << resp.raw_data() << std::endl; |
| 125 | + }); |
| 126 | + } |
| 127 | +#endif |
115 | 128 | }
|
116 | 129 |
|
117 | 130 | return false;
|
118 | 131 | }
|
119 | 132 |
|
120 | 133 | bool connection::on_response(const gdb_packet& pkt)
|
121 | 134 | {
|
122 |
| - if (pkt.type() == gdb_packet_type::generic) { |
123 |
| - std::clog << "rsp: " << seq << std::endl; |
| 135 | + auto &requests = m_requests_channel.transfers_queue(); |
| 136 | + auto const& req = requests.front(); |
| 137 | + auto const& req_pkt = req.pkt; |
124 | 138 |
|
125 |
| - auto &queue = m_requests_channel.transfers_queue(); |
126 |
| - auto const& req = queue.front(); |
127 |
| - auto const& req_pkt = req.pkt; |
| 139 | + std::clog << "rsp: " << "type=" << (int)pkt.type() << " internal=" << req.is_internal << std::endl; |
128 | 140 |
|
| 141 | + if (pkt.type() == gdb_packet_type::dat) { |
| 142 | + std::clog << "rsp: " << seq << " (count:" << m_requests_channel.transfers_queue().size() << ")" << std::endl; |
129 | 143 | std::clog << "rsp to the req: " << req_pkt.raw_data() << std::endl;
|
130 |
| - queue.pop_front(); |
| 144 | + |
| 145 | + // Check ACK mode |
| 146 | + if (req_pkt.data().substr(0, 15) == "QStartNoAckMode") { |
| 147 | + if (pkt.data().substr(0, 2) == "OK") { |
| 148 | + m_ack_mode = false; |
| 149 | + } |
| 150 | + } |
| 151 | + |
| 152 | + if (req.on_response) { |
| 153 | + req.on_response(req_pkt, pkt); |
| 154 | + if (req.is_internal) { |
| 155 | + if (m_ack_mode) { |
| 156 | + push_internal_request(gdb_packet_type::ack); |
| 157 | + } |
| 158 | + } |
| 159 | + } |
| 160 | + |
| 161 | + requests.pop_front(); |
| 162 | + |
| 163 | + if (req.is_internal) |
| 164 | + return true; |
| 165 | + } else if (pkt.type() == gdb_packet_type::ack) { |
| 166 | + if (req.is_internal) |
| 167 | + return true; |
131 | 168 | }
|
132 | 169 | return false;
|
133 | 170 | }
|
134 | 171 |
|
| 172 | +void connection::push_internal_request(gdb_packet &&req, transfer::on_response_cb cb) |
| 173 | +{ |
| 174 | + m_requests_channel.start_write(shared_from_this(), {std::move(req), transfer::internal(), std::move(cb)}); |
| 175 | + if (m_ack_mode) { |
| 176 | + // HACK: if we want to Nak internal request, we must not sent immediatelly another request |
| 177 | + m_requests_channel.start_write(shared_from_this(), {gdb_packet(gdb_packet_type::ack), transfer::internal()}); |
| 178 | + } |
| 179 | +} |
| 180 | + |
135 | 181 | //
|
136 | 182 | // Channel
|
137 | 183 | //
|
@@ -177,7 +223,7 @@ void connection::channel::start_write(std::shared_ptr<connection> con, transfer
|
177 | 223 | assert(transfered == total && "Unhandled case, when server receive less data than was sent");
|
178 | 224 | if (!ec) {
|
179 | 225 | // Data packets must be cleaned at the Response handler
|
180 |
| - if (pkt.type() != gdb_packet_type::generic || m_dir == responses || !m_handler) |
| 226 | + if (pkt.type() != gdb_packet_type::dat || m_dir == responses || !m_handler) |
181 | 227 | m_transfers.pop_front();
|
182 | 228 | } else {
|
183 | 229 | con->shutdown();
|
|
0 commit comments