write.ipp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. //
  2. // Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
  3. //
  4. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  5. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // Official repository: https://github.com/boostorg/beast
  8. //
  9. #ifndef BOOST_BEAST_WEBSOCKET_IMPL_WRITE_IPP
  10. #define BOOST_BEAST_WEBSOCKET_IMPL_WRITE_IPP
  11. #include <boost/beast/core/bind_handler.hpp>
  12. #include <boost/beast/core/buffers_cat.hpp>
  13. #include <boost/beast/core/buffers_prefix.hpp>
  14. #include <boost/beast/core/buffers_suffix.hpp>
  15. #include <boost/beast/core/flat_static_buffer.hpp>
  16. #include <boost/beast/core/type_traits.hpp>
  17. #include <boost/beast/core/detail/clamp.hpp>
  18. #include <boost/beast/core/detail/config.hpp>
  19. #include <boost/beast/websocket/detail/frame.hpp>
  20. #include <boost/asio/associated_allocator.hpp>
  21. #include <boost/asio/associated_executor.hpp>
  22. #include <boost/asio/coroutine.hpp>
  23. #include <boost/asio/executor_work_guard.hpp>
  24. #include <boost/asio/handler_continuation_hook.hpp>
  25. #include <boost/asio/handler_invoke_hook.hpp>
  26. #include <boost/assert.hpp>
  27. #include <boost/config.hpp>
  28. #include <boost/throw_exception.hpp>
  29. #include <algorithm>
  30. #include <memory>
  31. namespace boost {
  32. namespace beast {
  33. namespace websocket {
  34. namespace detail {
  35. // Compress a buffer sequence
  36. // Returns: `true` if more calls are needed
  37. //
  38. template<>
  39. template<class ConstBufferSequence>
  40. bool
  41. stream_base<true>::
  42. deflate(
  43. boost::asio::mutable_buffer& out,
  44. buffers_suffix<ConstBufferSequence>& cb,
  45. bool fin,
  46. std::size_t& total_in,
  47. error_code& ec)
  48. {
  49. using boost::asio::buffer;
  50. BOOST_ASSERT(out.size() >= 6);
  51. auto& zo = this->pmd_->zo;
  52. zlib::z_params zs;
  53. zs.avail_in = 0;
  54. zs.next_in = nullptr;
  55. zs.avail_out = out.size();
  56. zs.next_out = out.data();
  57. for(auto in : beast::detail::buffers_range(cb))
  58. {
  59. zs.avail_in = in.size();
  60. if(zs.avail_in == 0)
  61. continue;
  62. zs.next_in = in.data();
  63. zo.write(zs, zlib::Flush::none, ec);
  64. if(ec)
  65. {
  66. if(ec != zlib::error::need_buffers)
  67. return false;
  68. BOOST_ASSERT(zs.avail_out == 0);
  69. BOOST_ASSERT(zs.total_out == out.size());
  70. ec.assign(0, ec.category());
  71. break;
  72. }
  73. if(zs.avail_out == 0)
  74. {
  75. BOOST_ASSERT(zs.total_out == out.size());
  76. break;
  77. }
  78. BOOST_ASSERT(zs.avail_in == 0);
  79. }
  80. total_in = zs.total_in;
  81. cb.consume(zs.total_in);
  82. if(zs.avail_out > 0 && fin)
  83. {
  84. auto const remain = boost::asio::buffer_size(cb);
  85. if(remain == 0)
  86. {
  87. // Inspired by Mark Adler
  88. // https://github.com/madler/zlib/issues/149
  89. //
  90. // VFALCO We could do this flush twice depending
  91. // on how much space is in the output.
  92. zo.write(zs, zlib::Flush::block, ec);
  93. BOOST_ASSERT(! ec || ec == zlib::error::need_buffers);
  94. if(ec == zlib::error::need_buffers)
  95. ec.assign(0, ec.category());
  96. if(ec)
  97. return false;
  98. if(zs.avail_out >= 6)
  99. {
  100. zo.write(zs, zlib::Flush::full, ec);
  101. BOOST_ASSERT(! ec);
  102. // remove flush marker
  103. zs.total_out -= 4;
  104. out = buffer(out.data(), zs.total_out);
  105. return false;
  106. }
  107. }
  108. }
  109. ec.assign(0, ec.category());
  110. out = buffer(out.data(), zs.total_out);
  111. return true;
  112. }
  113. template<>
  114. inline
  115. void
  116. stream_base<true>::
  117. do_context_takeover_write(role_type role)
  118. {
  119. if((role == role_type::client &&
  120. this->pmd_config_.client_no_context_takeover) ||
  121. (role == role_type::server &&
  122. this->pmd_config_.server_no_context_takeover))
  123. {
  124. this->pmd_->zo.reset();
  125. }
  126. }
  127. } // detail
  128. //------------------------------------------------------------------------------
  129. template<class NextLayer, bool deflateSupported>
  130. template<class Buffers, class Handler>
  131. class stream<NextLayer, deflateSupported>::write_some_op
  132. : public boost::asio::coroutine
  133. {
  134. Handler h_;
  135. stream<NextLayer, deflateSupported>& ws_;
  136. boost::asio::executor_work_guard<decltype(std::declval<
  137. stream<NextLayer, deflateSupported>&>().get_executor())> wg_;
  138. buffers_suffix<Buffers> cb_;
  139. detail::frame_header fh_;
  140. detail::prepared_key key_;
  141. std::size_t bytes_transferred_ = 0;
  142. std::size_t remain_;
  143. std::size_t in_;
  144. int how_;
  145. bool fin_;
  146. bool more_;
  147. bool cont_ = false;
  148. public:
  149. static constexpr int id = 2; // for soft_mutex
  150. write_some_op(write_some_op&&) = default;
  151. write_some_op(write_some_op const&) = delete;
  152. template<class DeducedHandler>
  153. write_some_op(
  154. DeducedHandler&& h,
  155. stream<NextLayer, deflateSupported>& ws,
  156. bool fin,
  157. Buffers const& bs)
  158. : h_(std::forward<DeducedHandler>(h))
  159. , ws_(ws)
  160. , wg_(ws_.get_executor())
  161. , cb_(bs)
  162. , fin_(fin)
  163. {
  164. }
  165. using allocator_type =
  166. boost::asio::associated_allocator_t<Handler>;
  167. allocator_type
  168. get_allocator() const noexcept
  169. {
  170. return (boost::asio::get_associated_allocator)(h_);
  171. }
  172. using executor_type = boost::asio::associated_executor_t<
  173. Handler, decltype(std::declval<stream<NextLayer, deflateSupported>&>().get_executor())>;
  174. executor_type
  175. get_executor() const noexcept
  176. {
  177. return (boost::asio::get_associated_executor)(
  178. h_, ws_.get_executor());
  179. }
  180. Handler&
  181. handler()
  182. {
  183. return h_;
  184. }
  185. void operator()(
  186. error_code ec = {},
  187. std::size_t bytes_transferred = 0,
  188. bool cont = true);
  189. friend
  190. bool asio_handler_is_continuation(write_some_op* op)
  191. {
  192. using boost::asio::asio_handler_is_continuation;
  193. return op->cont_ || asio_handler_is_continuation(
  194. std::addressof(op->h_));
  195. }
  196. template<class Function>
  197. friend
  198. void asio_handler_invoke(Function&& f, write_some_op* op)
  199. {
  200. using boost::asio::asio_handler_invoke;
  201. asio_handler_invoke(
  202. f, std::addressof(op->h_));
  203. }
  204. };
  205. template<class NextLayer, bool deflateSupported>
  206. template<class Buffers, class Handler>
  207. void
  208. stream<NextLayer, deflateSupported>::
  209. write_some_op<Buffers, Handler>::
  210. operator()(
  211. error_code ec,
  212. std::size_t bytes_transferred,
  213. bool cont)
  214. {
  215. using beast::detail::clamp;
  216. using boost::asio::buffer;
  217. using boost::asio::buffer_copy;
  218. using boost::asio::buffer_size;
  219. using boost::asio::mutable_buffer;
  220. enum
  221. {
  222. do_nomask_nofrag,
  223. do_nomask_frag,
  224. do_mask_nofrag,
  225. do_mask_frag,
  226. do_deflate
  227. };
  228. std::size_t n;
  229. boost::asio::mutable_buffer b;
  230. cont_ = cont;
  231. BOOST_ASIO_CORO_REENTER(*this)
  232. {
  233. // Set up the outgoing frame header
  234. if(! ws_.wr_cont_)
  235. {
  236. ws_.begin_msg();
  237. fh_.rsv1 = ws_.wr_compress_;
  238. }
  239. else
  240. {
  241. fh_.rsv1 = false;
  242. }
  243. fh_.rsv2 = false;
  244. fh_.rsv3 = false;
  245. fh_.op = ws_.wr_cont_ ?
  246. detail::opcode::cont : ws_.wr_opcode_;
  247. fh_.mask =
  248. ws_.role_ == role_type::client;
  249. // Choose a write algorithm
  250. if(ws_.wr_compress_)
  251. {
  252. how_ = do_deflate;
  253. }
  254. else if(! fh_.mask)
  255. {
  256. if(! ws_.wr_frag_)
  257. {
  258. how_ = do_nomask_nofrag;
  259. }
  260. else
  261. {
  262. BOOST_ASSERT(ws_.wr_buf_size_ != 0);
  263. remain_ = buffer_size(cb_);
  264. if(remain_ > ws_.wr_buf_size_)
  265. how_ = do_nomask_frag;
  266. else
  267. how_ = do_nomask_nofrag;
  268. }
  269. }
  270. else
  271. {
  272. if(! ws_.wr_frag_)
  273. {
  274. how_ = do_mask_nofrag;
  275. }
  276. else
  277. {
  278. BOOST_ASSERT(ws_.wr_buf_size_ != 0);
  279. remain_ = buffer_size(cb_);
  280. if(remain_ > ws_.wr_buf_size_)
  281. how_ = do_mask_frag;
  282. else
  283. how_ = do_mask_nofrag;
  284. }
  285. }
  286. // Maybe suspend
  287. if(ws_.wr_block_.try_lock(this))
  288. {
  289. // Make sure the stream is open
  290. if(! ws_.check_open(ec))
  291. goto upcall;
  292. }
  293. else
  294. {
  295. do_suspend:
  296. // Suspend
  297. BOOST_ASIO_CORO_YIELD
  298. ws_.paused_wr_.emplace(std::move(*this));
  299. // Acquire the write block
  300. ws_.wr_block_.lock(this);
  301. // Resume
  302. BOOST_ASIO_CORO_YIELD
  303. boost::asio::post(
  304. ws_.get_executor(), std::move(*this));
  305. BOOST_ASSERT(ws_.wr_block_.is_locked(this));
  306. // Make sure the stream is open
  307. if(! ws_.check_open(ec))
  308. goto upcall;
  309. }
  310. //------------------------------------------------------------------
  311. if(how_ == do_nomask_nofrag)
  312. {
  313. fh_.fin = fin_;
  314. fh_.len = buffer_size(cb_);
  315. ws_.wr_fb_.reset();
  316. detail::write<flat_static_buffer_base>(
  317. ws_.wr_fb_, fh_);
  318. ws_.wr_cont_ = ! fin_;
  319. // Send frame
  320. BOOST_ASIO_CORO_YIELD
  321. boost::asio::async_write(ws_.stream_,
  322. buffers_cat(ws_.wr_fb_.data(), cb_),
  323. std::move(*this));
  324. if(! ws_.check_ok(ec))
  325. goto upcall;
  326. bytes_transferred_ += clamp(fh_.len);
  327. goto upcall;
  328. }
  329. //------------------------------------------------------------------
  330. else if(how_ == do_nomask_frag)
  331. {
  332. for(;;)
  333. {
  334. n = clamp(remain_, ws_.wr_buf_size_);
  335. fh_.len = n;
  336. remain_ -= n;
  337. fh_.fin = fin_ ? remain_ == 0 : false;
  338. ws_.wr_fb_.reset();
  339. detail::write<flat_static_buffer_base>(
  340. ws_.wr_fb_, fh_);
  341. ws_.wr_cont_ = ! fin_;
  342. // Send frame
  343. BOOST_ASIO_CORO_YIELD
  344. boost::asio::async_write(
  345. ws_.stream_, buffers_cat(
  346. ws_.wr_fb_.data(), buffers_prefix(
  347. clamp(fh_.len), cb_)),
  348. std::move(*this));
  349. if(! ws_.check_ok(ec))
  350. goto upcall;
  351. n = clamp(fh_.len); // because yield
  352. bytes_transferred_ += n;
  353. if(remain_ == 0)
  354. break;
  355. cb_.consume(n);
  356. fh_.op = detail::opcode::cont;
  357. // Allow outgoing control frames to
  358. // be sent in between message frames
  359. ws_.wr_block_.unlock(this);
  360. if( ws_.paused_close_.maybe_invoke() ||
  361. ws_.paused_rd_.maybe_invoke() ||
  362. ws_.paused_ping_.maybe_invoke())
  363. {
  364. BOOST_ASSERT(ws_.wr_block_.is_locked());
  365. goto do_suspend;
  366. }
  367. ws_.wr_block_.lock(this);
  368. }
  369. goto upcall;
  370. }
  371. //------------------------------------------------------------------
  372. else if(how_ == do_mask_nofrag)
  373. {
  374. remain_ = buffer_size(cb_);
  375. fh_.fin = fin_;
  376. fh_.len = remain_;
  377. fh_.key = ws_.create_mask();
  378. detail::prepare_key(key_, fh_.key);
  379. ws_.wr_fb_.reset();
  380. detail::write<flat_static_buffer_base>(
  381. ws_.wr_fb_, fh_);
  382. n = clamp(remain_, ws_.wr_buf_size_);
  383. buffer_copy(buffer(
  384. ws_.wr_buf_.get(), n), cb_);
  385. detail::mask_inplace(buffer(
  386. ws_.wr_buf_.get(), n), key_);
  387. remain_ -= n;
  388. ws_.wr_cont_ = ! fin_;
  389. // Send frame header and partial payload
  390. BOOST_ASIO_CORO_YIELD
  391. boost::asio::async_write(
  392. ws_.stream_, buffers_cat(ws_.wr_fb_.data(),
  393. buffer(ws_.wr_buf_.get(), n)),
  394. std::move(*this));
  395. if(! ws_.check_ok(ec))
  396. goto upcall;
  397. bytes_transferred_ +=
  398. bytes_transferred - ws_.wr_fb_.size();
  399. while(remain_ > 0)
  400. {
  401. cb_.consume(ws_.wr_buf_size_);
  402. n = clamp(remain_, ws_.wr_buf_size_);
  403. buffer_copy(buffer(
  404. ws_.wr_buf_.get(), n), cb_);
  405. detail::mask_inplace(buffer(
  406. ws_.wr_buf_.get(), n), key_);
  407. remain_ -= n;
  408. // Send partial payload
  409. BOOST_ASIO_CORO_YIELD
  410. boost::asio::async_write(ws_.stream_,
  411. buffer(ws_.wr_buf_.get(), n),
  412. std::move(*this));
  413. if(! ws_.check_ok(ec))
  414. goto upcall;
  415. bytes_transferred_ += bytes_transferred;
  416. }
  417. goto upcall;
  418. }
  419. //------------------------------------------------------------------
  420. else if(how_ == do_mask_frag)
  421. {
  422. for(;;)
  423. {
  424. n = clamp(remain_, ws_.wr_buf_size_);
  425. remain_ -= n;
  426. fh_.len = n;
  427. fh_.key = ws_.create_mask();
  428. fh_.fin = fin_ ? remain_ == 0 : false;
  429. detail::prepare_key(key_, fh_.key);
  430. buffer_copy(buffer(
  431. ws_.wr_buf_.get(), n), cb_);
  432. detail::mask_inplace(buffer(
  433. ws_.wr_buf_.get(), n), key_);
  434. ws_.wr_fb_.reset();
  435. detail::write<flat_static_buffer_base>(
  436. ws_.wr_fb_, fh_);
  437. ws_.wr_cont_ = ! fin_;
  438. // Send frame
  439. BOOST_ASIO_CORO_YIELD
  440. boost::asio::async_write(ws_.stream_,
  441. buffers_cat(ws_.wr_fb_.data(),
  442. buffer(ws_.wr_buf_.get(), n)),
  443. std::move(*this));
  444. if(! ws_.check_ok(ec))
  445. goto upcall;
  446. n = bytes_transferred - ws_.wr_fb_.size();
  447. bytes_transferred_ += n;
  448. if(remain_ == 0)
  449. break;
  450. cb_.consume(n);
  451. fh_.op = detail::opcode::cont;
  452. // Allow outgoing control frames to
  453. // be sent in between message frames:
  454. ws_.wr_block_.unlock(this);
  455. if( ws_.paused_close_.maybe_invoke() ||
  456. ws_.paused_rd_.maybe_invoke() ||
  457. ws_.paused_ping_.maybe_invoke())
  458. {
  459. BOOST_ASSERT(ws_.wr_block_.is_locked());
  460. goto do_suspend;
  461. }
  462. ws_.wr_block_.lock(this);
  463. }
  464. goto upcall;
  465. }
  466. //------------------------------------------------------------------
  467. else if(how_ == do_deflate)
  468. {
  469. for(;;)
  470. {
  471. b = buffer(ws_.wr_buf_.get(),
  472. ws_.wr_buf_size_);
  473. more_ = ws_.deflate(b, cb_, fin_, in_, ec);
  474. if(! ws_.check_ok(ec))
  475. goto upcall;
  476. n = buffer_size(b);
  477. if(n == 0)
  478. {
  479. // The input was consumed, but there
  480. // is no output due to compression
  481. // latency.
  482. BOOST_ASSERT(! fin_);
  483. BOOST_ASSERT(buffer_size(cb_) == 0);
  484. goto upcall;
  485. }
  486. if(fh_.mask)
  487. {
  488. fh_.key = ws_.create_mask();
  489. detail::prepared_key key;
  490. detail::prepare_key(key, fh_.key);
  491. detail::mask_inplace(b, key);
  492. }
  493. fh_.fin = ! more_;
  494. fh_.len = n;
  495. ws_.wr_fb_.reset();
  496. detail::write<
  497. flat_static_buffer_base>(ws_.wr_fb_, fh_);
  498. ws_.wr_cont_ = ! fin_;
  499. // Send frame
  500. BOOST_ASIO_CORO_YIELD
  501. boost::asio::async_write(ws_.stream_,
  502. buffers_cat(ws_.wr_fb_.data(), b),
  503. std::move(*this));
  504. if(! ws_.check_ok(ec))
  505. goto upcall;
  506. bytes_transferred_ += in_;
  507. if(more_)
  508. {
  509. fh_.op = detail::opcode::cont;
  510. fh_.rsv1 = false;
  511. // Allow outgoing control frames to
  512. // be sent in between message frames:
  513. ws_.wr_block_.unlock(this);
  514. if( ws_.paused_close_.maybe_invoke() ||
  515. ws_.paused_rd_.maybe_invoke() ||
  516. ws_.paused_ping_.maybe_invoke())
  517. {
  518. BOOST_ASSERT(ws_.wr_block_.is_locked());
  519. goto do_suspend;
  520. }
  521. ws_.wr_block_.lock(this);
  522. }
  523. else
  524. {
  525. if(fh_.fin)
  526. ws_.do_context_takeover_write(ws_.role_);
  527. goto upcall;
  528. }
  529. }
  530. }
  531. //--------------------------------------------------------------------------
  532. upcall:
  533. ws_.wr_block_.unlock(this);
  534. ws_.paused_close_.maybe_invoke() ||
  535. ws_.paused_rd_.maybe_invoke() ||
  536. ws_.paused_ping_.maybe_invoke();
  537. if(! cont_)
  538. {
  539. BOOST_ASIO_CORO_YIELD
  540. boost::asio::post(
  541. ws_.get_executor(),
  542. bind_handler(std::move(*this), ec, bytes_transferred_));
  543. }
  544. h_(ec, bytes_transferred_);
  545. }
  546. }
  547. //------------------------------------------------------------------------------
  548. template<class NextLayer, bool deflateSupported>
  549. template<class ConstBufferSequence>
  550. std::size_t
  551. stream<NextLayer, deflateSupported>::
  552. write_some(bool fin, ConstBufferSequence const& buffers)
  553. {
  554. static_assert(is_sync_stream<next_layer_type>::value,
  555. "SyncStream requirements not met");
  556. static_assert(boost::asio::is_const_buffer_sequence<
  557. ConstBufferSequence>::value,
  558. "ConstBufferSequence requirements not met");
  559. error_code ec;
  560. auto const bytes_transferred =
  561. write_some(fin, buffers, ec);
  562. if(ec)
  563. BOOST_THROW_EXCEPTION(system_error{ec});
  564. return bytes_transferred;
  565. }
  566. template<class NextLayer, bool deflateSupported>
  567. template<class ConstBufferSequence>
  568. std::size_t
  569. stream<NextLayer, deflateSupported>::
  570. write_some(bool fin,
  571. ConstBufferSequence const& buffers, error_code& ec)
  572. {
  573. static_assert(is_sync_stream<next_layer_type>::value,
  574. "SyncStream requirements not met");
  575. static_assert(boost::asio::is_const_buffer_sequence<
  576. ConstBufferSequence>::value,
  577. "ConstBufferSequence requirements not met");
  578. using beast::detail::clamp;
  579. using boost::asio::buffer;
  580. using boost::asio::buffer_copy;
  581. using boost::asio::buffer_size;
  582. std::size_t bytes_transferred = 0;
  583. ec.assign(0, ec.category());
  584. // Make sure the stream is open
  585. if(! check_open(ec))
  586. return bytes_transferred;
  587. detail::frame_header fh;
  588. if(! wr_cont_)
  589. {
  590. begin_msg();
  591. fh.rsv1 = wr_compress_;
  592. }
  593. else
  594. {
  595. fh.rsv1 = false;
  596. }
  597. fh.rsv2 = false;
  598. fh.rsv3 = false;
  599. fh.op = wr_cont_ ?
  600. detail::opcode::cont : wr_opcode_;
  601. fh.mask = role_ == role_type::client;
  602. auto remain = buffer_size(buffers);
  603. if(wr_compress_)
  604. {
  605. buffers_suffix<
  606. ConstBufferSequence> cb{buffers};
  607. for(;;)
  608. {
  609. auto b = buffer(
  610. wr_buf_.get(), wr_buf_size_);
  611. auto const more = this->deflate(
  612. b, cb, fin, bytes_transferred, ec);
  613. if(! check_ok(ec))
  614. return bytes_transferred;
  615. auto const n = buffer_size(b);
  616. if(n == 0)
  617. {
  618. // The input was consumed, but there
  619. // is no output due to compression
  620. // latency.
  621. BOOST_ASSERT(! fin);
  622. BOOST_ASSERT(buffer_size(cb) == 0);
  623. fh.fin = false;
  624. break;
  625. }
  626. if(fh.mask)
  627. {
  628. fh.key = this->create_mask();
  629. detail::prepared_key key;
  630. detail::prepare_key(key, fh.key);
  631. detail::mask_inplace(b, key);
  632. }
  633. fh.fin = ! more;
  634. fh.len = n;
  635. detail::fh_buffer fh_buf;
  636. detail::write<
  637. flat_static_buffer_base>(fh_buf, fh);
  638. wr_cont_ = ! fin;
  639. boost::asio::write(stream_,
  640. buffers_cat(fh_buf.data(), b), ec);
  641. if(! check_ok(ec))
  642. return bytes_transferred;
  643. if(! more)
  644. break;
  645. fh.op = detail::opcode::cont;
  646. fh.rsv1 = false;
  647. }
  648. if(fh.fin)
  649. this->do_context_takeover_write(role_);
  650. }
  651. else if(! fh.mask)
  652. {
  653. if(! wr_frag_)
  654. {
  655. // no mask, no autofrag
  656. fh.fin = fin;
  657. fh.len = remain;
  658. detail::fh_buffer fh_buf;
  659. detail::write<
  660. flat_static_buffer_base>(fh_buf, fh);
  661. wr_cont_ = ! fin;
  662. boost::asio::write(stream_,
  663. buffers_cat(fh_buf.data(), buffers), ec);
  664. if(! check_ok(ec))
  665. return bytes_transferred;
  666. bytes_transferred += remain;
  667. }
  668. else
  669. {
  670. // no mask, autofrag
  671. BOOST_ASSERT(wr_buf_size_ != 0);
  672. buffers_suffix<
  673. ConstBufferSequence> cb{buffers};
  674. for(;;)
  675. {
  676. auto const n = clamp(remain, wr_buf_size_);
  677. remain -= n;
  678. fh.len = n;
  679. fh.fin = fin ? remain == 0 : false;
  680. detail::fh_buffer fh_buf;
  681. detail::write<
  682. flat_static_buffer_base>(fh_buf, fh);
  683. wr_cont_ = ! fin;
  684. boost::asio::write(stream_,
  685. buffers_cat(fh_buf.data(),
  686. buffers_prefix(n, cb)), ec);
  687. if(! check_ok(ec))
  688. return bytes_transferred;
  689. bytes_transferred += n;
  690. if(remain == 0)
  691. break;
  692. fh.op = detail::opcode::cont;
  693. cb.consume(n);
  694. }
  695. }
  696. }
  697. else if(! wr_frag_)
  698. {
  699. // mask, no autofrag
  700. fh.fin = fin;
  701. fh.len = remain;
  702. fh.key = this->create_mask();
  703. detail::prepared_key key;
  704. detail::prepare_key(key, fh.key);
  705. detail::fh_buffer fh_buf;
  706. detail::write<
  707. flat_static_buffer_base>(fh_buf, fh);
  708. buffers_suffix<
  709. ConstBufferSequence> cb{buffers};
  710. {
  711. auto const n = clamp(remain, wr_buf_size_);
  712. auto const b = buffer(wr_buf_.get(), n);
  713. buffer_copy(b, cb);
  714. cb.consume(n);
  715. remain -= n;
  716. detail::mask_inplace(b, key);
  717. wr_cont_ = ! fin;
  718. boost::asio::write(stream_,
  719. buffers_cat(fh_buf.data(), b), ec);
  720. if(! check_ok(ec))
  721. return bytes_transferred;
  722. bytes_transferred += n;
  723. }
  724. while(remain > 0)
  725. {
  726. auto const n = clamp(remain, wr_buf_size_);
  727. auto const b = buffer(wr_buf_.get(), n);
  728. buffer_copy(b, cb);
  729. cb.consume(n);
  730. remain -= n;
  731. detail::mask_inplace(b, key);
  732. boost::asio::write(stream_, b, ec);
  733. if(! check_ok(ec))
  734. return bytes_transferred;
  735. bytes_transferred += n;
  736. }
  737. }
  738. else
  739. {
  740. // mask, autofrag
  741. BOOST_ASSERT(wr_buf_size_ != 0);
  742. buffers_suffix<
  743. ConstBufferSequence> cb{buffers};
  744. for(;;)
  745. {
  746. fh.key = this->create_mask();
  747. detail::prepared_key key;
  748. detail::prepare_key(key, fh.key);
  749. auto const n = clamp(remain, wr_buf_size_);
  750. auto const b = buffer(wr_buf_.get(), n);
  751. buffer_copy(b, cb);
  752. detail::mask_inplace(b, key);
  753. fh.len = n;
  754. remain -= n;
  755. fh.fin = fin ? remain == 0 : false;
  756. wr_cont_ = ! fh.fin;
  757. detail::fh_buffer fh_buf;
  758. detail::write<
  759. flat_static_buffer_base>(fh_buf, fh);
  760. boost::asio::write(stream_,
  761. buffers_cat(fh_buf.data(), b), ec);
  762. if(! check_ok(ec))
  763. return bytes_transferred;
  764. bytes_transferred += n;
  765. if(remain == 0)
  766. break;
  767. fh.op = detail::opcode::cont;
  768. cb.consume(n);
  769. }
  770. }
  771. return bytes_transferred;
  772. }
  773. template<class NextLayer, bool deflateSupported>
  774. template<class ConstBufferSequence, class WriteHandler>
  775. BOOST_ASIO_INITFN_RESULT_TYPE(
  776. WriteHandler, void(error_code, std::size_t))
  777. stream<NextLayer, deflateSupported>::
  778. async_write_some(bool fin,
  779. ConstBufferSequence const& bs, WriteHandler&& handler)
  780. {
  781. static_assert(is_async_stream<next_layer_type>::value,
  782. "AsyncStream requirements not met");
  783. static_assert(boost::asio::is_const_buffer_sequence<
  784. ConstBufferSequence>::value,
  785. "ConstBufferSequence requirements not met");
  786. BOOST_BEAST_HANDLER_INIT(
  787. WriteHandler, void(error_code, std::size_t));
  788. write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
  789. WriteHandler, void(error_code, std::size_t))>{
  790. std::move(init.completion_handler), *this, fin, bs}(
  791. {}, 0, false);
  792. return init.result.get();
  793. }
  794. //------------------------------------------------------------------------------
  795. template<class NextLayer, bool deflateSupported>
  796. template<class ConstBufferSequence>
  797. std::size_t
  798. stream<NextLayer, deflateSupported>::
  799. write(ConstBufferSequence const& buffers)
  800. {
  801. static_assert(is_sync_stream<next_layer_type>::value,
  802. "SyncStream requirements not met");
  803. static_assert(boost::asio::is_const_buffer_sequence<
  804. ConstBufferSequence>::value,
  805. "ConstBufferSequence requirements not met");
  806. error_code ec;
  807. auto const bytes_transferred = write(buffers, ec);
  808. if(ec)
  809. BOOST_THROW_EXCEPTION(system_error{ec});
  810. return bytes_transferred;
  811. }
  812. template<class NextLayer, bool deflateSupported>
  813. template<class ConstBufferSequence>
  814. std::size_t
  815. stream<NextLayer, deflateSupported>::
  816. write(ConstBufferSequence const& buffers, error_code& ec)
  817. {
  818. static_assert(is_sync_stream<next_layer_type>::value,
  819. "SyncStream requirements not met");
  820. static_assert(boost::asio::is_const_buffer_sequence<
  821. ConstBufferSequence>::value,
  822. "ConstBufferSequence requirements not met");
  823. return write_some(true, buffers, ec);
  824. }
  825. template<class NextLayer, bool deflateSupported>
  826. template<class ConstBufferSequence, class WriteHandler>
  827. BOOST_ASIO_INITFN_RESULT_TYPE(
  828. WriteHandler, void(error_code, std::size_t))
  829. stream<NextLayer, deflateSupported>::
  830. async_write(
  831. ConstBufferSequence const& bs, WriteHandler&& handler)
  832. {
  833. static_assert(is_async_stream<next_layer_type>::value,
  834. "AsyncStream requirements not met");
  835. static_assert(boost::asio::is_const_buffer_sequence<
  836. ConstBufferSequence>::value,
  837. "ConstBufferSequence requirements not met");
  838. BOOST_BEAST_HANDLER_INIT(
  839. WriteHandler, void(error_code, std::size_t));
  840. write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
  841. WriteHandler, void(error_code, std::size_t))>{
  842. std::move(init.completion_handler), *this, true, bs}(
  843. {}, 0, false);
  844. return init.result.get();
  845. }
  846. } // websocket
  847. } // beast
  848. } // boost
  849. #endif