mirror of
https://github.com/oatpp/oatpp.git
synced 2024-11-27 08:30:07 +08:00
IO. intruduce 'isValidIoHandle' function. Refactor. Change folder of IODefinitions.hpp
This commit is contained in:
parent
85e23c17db
commit
e5f24a8e55
@ -65,7 +65,8 @@ add_library(oatpp
|
||||
oatpp/core/concurrency/SpinLock.hpp
|
||||
oatpp/core/concurrency/Thread.cpp
|
||||
oatpp/core/concurrency/Thread.hpp
|
||||
oatpp/core/data/IODefinitions.hpp
|
||||
oatpp/core/IODefinitions.cpp
|
||||
oatpp/core/IODefinitions.hpp
|
||||
oatpp/core/data/buffer/FIFOBuffer.cpp
|
||||
oatpp/core/data/buffer/FIFOBuffer.hpp
|
||||
oatpp/core/data/buffer/IOBuffer.cpp
|
||||
|
43
src/oatpp/core/IODefinitions.cpp
Normal file
43
src/oatpp/core/IODefinitions.cpp
Normal file
@ -0,0 +1,43 @@
|
||||
/***************************************************************************
|
||||
*
|
||||
* Project _____ __ ____ _ _
|
||||
* ( _ ) /__\ (_ _)_| |_ _| |_
|
||||
* )(_)( /(__)\ )( (_ _)(_ _)
|
||||
* (_____)(__)(__)(__) |_| |_|
|
||||
*
|
||||
*
|
||||
* Copyright 2018-present, Leonid Stryzhevskyi <lganzzzo@gmail.com>
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
***************************************************************************/
|
||||
|
||||
#include "IODefinitions.hpp"
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
#include <WinSock2.h>
|
||||
#endif
|
||||
|
||||
namespace oatpp {
|
||||
|
||||
bool isValidIoHandle(v_io_handle handle) {
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
return handle != INVALID_SOCKET;
|
||||
#else
|
||||
return handle >= 0;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -22,27 +22,34 @@
|
||||
*
|
||||
***************************************************************************/
|
||||
|
||||
#ifndef oatpp_data_IODefinitions_hpp
|
||||
#define oatpp_data_IODefinitions_hpp
|
||||
#ifndef oatpp_IODefinitions_hpp
|
||||
#define oatpp_IODefinitions_hpp
|
||||
|
||||
#include "oatpp/core/async/Error.hpp"
|
||||
#include "oatpp/core/Types.hpp"
|
||||
|
||||
namespace oatpp { namespace data {
|
||||
namespace oatpp {
|
||||
|
||||
/**
|
||||
* Represents I/O handle (ex.: file descriptor).
|
||||
*/
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
#if defined(_WIN64)
|
||||
typedef unsigned long long v_io_handle;
|
||||
typedef unsigned long long v_io_handle;
|
||||
#else
|
||||
typedef unsigned long v_io_handle;
|
||||
typedef unsigned long v_io_handle;
|
||||
#endif
|
||||
#else
|
||||
typedef int v_io_handle;
|
||||
typedef int v_io_handle;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Check if IO handle is valid.
|
||||
* @param handle - IO handle.
|
||||
* @return - `true` if valid.
|
||||
*/
|
||||
bool isValidIoHandle(v_io_handle handle);
|
||||
|
||||
/**
|
||||
* All I/O buffer operations (like read/write(buffer, size)) should return v_io_size. <br>
|
||||
*
|
||||
@ -132,6 +139,6 @@ public:
|
||||
|
||||
};
|
||||
|
||||
}}
|
||||
}
|
||||
|
||||
#endif //oatpp_data_IODefinitions_hpp
|
||||
#endif // oatpp_IODefinitions_hpp
|
@ -39,14 +39,14 @@ Action Action::createActionByType(v_int32 type) {
|
||||
return Action(type);
|
||||
}
|
||||
|
||||
Action Action::createIOWaitAction(data::v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
Action Action::createIOWaitAction(v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
Action result(TYPE_IO_WAIT);
|
||||
result.m_data.ioData.ioHandle = ioHandle;
|
||||
result.m_data.ioData.ioEventType = ioEventType;
|
||||
return result;
|
||||
}
|
||||
|
||||
Action Action::createIORepeatAction(data::v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
Action Action::createIORepeatAction(v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
Action result(TYPE_IO_REPEAT);
|
||||
result.m_data.ioData.ioHandle = ioHandle;
|
||||
result.m_data.ioData.ioEventType = ioEventType;
|
||||
@ -139,7 +139,7 @@ v_int64 Action::getTimePointMicroseconds() const {
|
||||
return m_data.timePointMicroseconds;
|
||||
}
|
||||
|
||||
oatpp::data::v_io_handle Action::getIOHandle() const {
|
||||
oatpp::v_io_handle Action::getIOHandle() const {
|
||||
return m_data.ioData.ioHandle;
|
||||
}
|
||||
|
||||
@ -383,11 +383,11 @@ CoroutineStarter AbstractCoroutine::waitFor(const std::chrono::duration<v_int64,
|
||||
|
||||
}
|
||||
|
||||
Action AbstractCoroutine::ioWait(data::v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
Action AbstractCoroutine::ioWait(v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
return Action::createIOWaitAction(ioHandle, ioEventType);
|
||||
}
|
||||
|
||||
Action AbstractCoroutine::ioRepeat(data::v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
Action AbstractCoroutine::ioRepeat(v_io_handle ioHandle, Action::IOEventType ioEventType) {
|
||||
return Action::createIORepeatAction(ioHandle, ioEventType);
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#include "./Error.hpp"
|
||||
|
||||
#include "oatpp/core/data/IODefinitions.hpp"
|
||||
#include "oatpp/core/IODefinitions.hpp"
|
||||
|
||||
#include "oatpp/core/collection/FastQueue.hpp"
|
||||
#include "oatpp/core/base/memory/MemoryPool.hpp"
|
||||
@ -170,7 +170,7 @@ public:
|
||||
private:
|
||||
|
||||
struct IOData {
|
||||
oatpp::data::v_io_handle ioHandle;
|
||||
oatpp::v_io_handle ioHandle;
|
||||
IOEventType ioEventType;
|
||||
};
|
||||
|
||||
@ -217,17 +217,17 @@ public:
|
||||
|
||||
/**
|
||||
* Create TYPE_IO_WAIT Action
|
||||
* @param ioHandle - &id:oatpp::data::v_io_handle;.
|
||||
* @param ioHandle - &id:oatpp::v_io_handle;.
|
||||
* @return - Action.
|
||||
*/
|
||||
static Action createIOWaitAction(data::v_io_handle ioHandle, IOEventType ioEventType);
|
||||
static Action createIOWaitAction(v_io_handle ioHandle, IOEventType ioEventType);
|
||||
|
||||
/**
|
||||
* Create TYPE_IO_REPEAT Action
|
||||
* @param ioHandle - &id:oatpp::data::v_io_handle;.
|
||||
* @param ioHandle - &id:oatpp::v_io_handle;.
|
||||
* @return - Action.
|
||||
*/
|
||||
static Action createIORepeatAction(data::v_io_handle ioHandle, IOEventType ioEventType);
|
||||
static Action createIORepeatAction(v_io_handle ioHandle, IOEventType ioEventType);
|
||||
|
||||
/**
|
||||
* Create TYPE_WAIT_REPEAT Action.
|
||||
@ -315,9 +315,9 @@ public:
|
||||
/**
|
||||
* Get I/O handle which is passed with this action to I/O worker.
|
||||
* This method returns meaningful value only if Action is TYPE_IO_WAIT or TYPE_IO_REPEAT.
|
||||
* @return - &id:oatpp::data::v_io_handle;.
|
||||
* @return - &id:oatpp::v_io_handle;.
|
||||
*/
|
||||
oatpp::data::v_io_handle getIOHandle() const;
|
||||
oatpp::v_io_handle getIOHandle() const;
|
||||
|
||||
/**
|
||||
* This method returns meaningful value only if Action is TYPE_IO_WAIT or TYPE_IO_REPEAT.
|
||||
@ -538,13 +538,13 @@ public:
|
||||
* Convenience method to generate Action of `type == Action::TYPE_IO_WAIT`.
|
||||
* @return - TYPE_WAIT_FOR_IO Action.
|
||||
*/
|
||||
static Action ioWait(data::v_io_handle ioHandle, Action::IOEventType ioEventType);
|
||||
static Action ioWait(v_io_handle ioHandle, Action::IOEventType ioEventType);
|
||||
|
||||
/**
|
||||
* Convenience method to generate Action of `type == Action::TYPE_IO_WAIT`.
|
||||
* @return - TYPE_IO_REPEAT Action.
|
||||
*/
|
||||
static Action ioRepeat(data::v_io_handle ioHandle, Action::IOEventType ioEventType);
|
||||
static Action ioRepeat(v_io_handle ioHandle, Action::IOEventType ioEventType);
|
||||
|
||||
/**
|
||||
* Convenience method to generate error reporting Action.
|
||||
|
@ -78,8 +78,8 @@ private:
|
||||
oatpp::collection::FastQueue<CoroutineHandle> m_backlog;
|
||||
oatpp::concurrency::SpinLock m_backlogLock;
|
||||
private:
|
||||
oatpp::data::v_io_handle m_eventQueueHandle;
|
||||
oatpp::data::v_io_handle m_wakeupTrigger;
|
||||
oatpp::v_io_handle m_eventQueueHandle;
|
||||
oatpp::v_io_handle m_wakeupTrigger;
|
||||
std::unique_ptr<v_char8[]> m_inEvents;
|
||||
v_int32 m_inEventsCount;
|
||||
v_int32 m_inEventsCapacity;
|
||||
|
@ -43,7 +43,7 @@ void FIFOBuffer::setBufferPosition(v_buff_size readPosition, v_buff_size writePo
|
||||
m_canRead = canRead;
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::availableToRead() const {
|
||||
v_io_size FIFOBuffer::availableToRead() const {
|
||||
if(!m_canRead) {
|
||||
return 0;
|
||||
}
|
||||
@ -53,7 +53,7 @@ data::v_io_size FIFOBuffer::availableToRead() const {
|
||||
return (m_bufferSize - m_readPosition + m_writePosition);
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::availableToWrite() const {
|
||||
v_io_size FIFOBuffer::availableToWrite() const {
|
||||
if(m_canRead && m_writePosition == m_readPosition) {
|
||||
return 0;
|
||||
}
|
||||
@ -67,10 +67,10 @@ v_buff_size FIFOBuffer::getBufferSize() const {
|
||||
return m_bufferSize;
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::read(void *data, v_buff_size count) {
|
||||
v_io_size FIFOBuffer::read(void *data, v_buff_size count) {
|
||||
|
||||
if(!m_canRead) {
|
||||
return data::IOError::RETRY_READ;
|
||||
return IOError::RETRY_READ;
|
||||
}
|
||||
|
||||
if(count == 0) {
|
||||
@ -116,10 +116,10 @@ data::v_io_size FIFOBuffer::read(void *data, v_buff_size count) {
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::peek(void *data, v_buff_size count) {
|
||||
v_io_size FIFOBuffer::peek(void *data, v_buff_size count) {
|
||||
|
||||
if(!m_canRead) {
|
||||
return data::IOError::RETRY_READ;
|
||||
return IOError::RETRY_READ;
|
||||
}
|
||||
|
||||
if(count == 0) {
|
||||
@ -156,10 +156,10 @@ data::v_io_size FIFOBuffer::peek(void *data, v_buff_size count) {
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::commitReadOffset(v_buff_size count) {
|
||||
v_io_size FIFOBuffer::commitReadOffset(v_buff_size count) {
|
||||
|
||||
if(!m_canRead) {
|
||||
return data::IOError::RETRY_READ;
|
||||
return IOError::RETRY_READ;
|
||||
}
|
||||
|
||||
if(count == 0) {
|
||||
@ -201,10 +201,10 @@ data::v_io_size FIFOBuffer::commitReadOffset(v_buff_size count) {
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::write(const void *data, v_buff_size count) {
|
||||
v_io_size FIFOBuffer::write(const void *data, v_buff_size count) {
|
||||
|
||||
if(m_canRead && m_writePosition == m_readPosition) {
|
||||
return data::IOError::RETRY_WRITE;
|
||||
return IOError::RETRY_WRITE;
|
||||
}
|
||||
|
||||
if(count == 0) {
|
||||
@ -246,10 +246,10 @@ data::v_io_size FIFOBuffer::write(const void *data, v_buff_size count) {
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::readAndWriteToStream(data::stream::OutputStream* stream, v_buff_size count, async::Action& action) {
|
||||
v_io_size FIFOBuffer::readAndWriteToStream(data::stream::OutputStream* stream, v_buff_size count, async::Action& action) {
|
||||
|
||||
if(!m_canRead) {
|
||||
return data::IOError::RETRY_READ;
|
||||
return IOError::RETRY_READ;
|
||||
}
|
||||
|
||||
if(count == 0) {
|
||||
@ -299,10 +299,10 @@ data::v_io_size FIFOBuffer::readAndWriteToStream(data::stream::OutputStream* str
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::readFromStreamAndWrite(data::stream::InputStream* stream, v_buff_size count, async::Action& action) {
|
||||
v_io_size FIFOBuffer::readFromStreamAndWrite(data::stream::InputStream* stream, v_buff_size count, async::Action& action) {
|
||||
|
||||
if(m_canRead && m_writePosition == m_readPosition) {
|
||||
return data::IOError::RETRY_WRITE;
|
||||
return IOError::RETRY_WRITE;
|
||||
}
|
||||
|
||||
if(count == 0) {
|
||||
@ -351,13 +351,13 @@ data::v_io_size FIFOBuffer::readFromStreamAndWrite(data::stream::InputStream* st
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size FIFOBuffer::flushToStream(data::stream::OutputStream* stream) {
|
||||
v_io_size FIFOBuffer::flushToStream(data::stream::OutputStream* stream) {
|
||||
|
||||
if(!m_canRead) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
data::v_io_size result = 0;
|
||||
v_io_size result = 0;
|
||||
|
||||
if(m_readPosition < m_writePosition) {
|
||||
result = stream->writeExactSizeDataSimple(&m_buffer[m_readPosition], m_writePosition - m_readPosition);
|
||||
@ -446,22 +446,22 @@ void SynchronizedFIFOBuffer::setBufferPosition(v_buff_size readPosition, v_buff_
|
||||
m_fifo.setBufferPosition(readPosition, writePosition, canRead);
|
||||
}
|
||||
|
||||
data::v_io_size SynchronizedFIFOBuffer::availableToRead() {
|
||||
v_io_size SynchronizedFIFOBuffer::availableToRead() {
|
||||
std::lock_guard<oatpp::concurrency::SpinLock> lock(m_lock);
|
||||
return m_fifo.availableToRead();
|
||||
}
|
||||
|
||||
data::v_io_size SynchronizedFIFOBuffer::availableToWrite() {
|
||||
v_io_size SynchronizedFIFOBuffer::availableToWrite() {
|
||||
std::lock_guard<oatpp::concurrency::SpinLock> lock(m_lock);
|
||||
return m_fifo.availableToWrite();
|
||||
}
|
||||
|
||||
data::v_io_size SynchronizedFIFOBuffer::read(void *data, v_buff_size count) {
|
||||
v_io_size SynchronizedFIFOBuffer::read(void *data, v_buff_size count) {
|
||||
std::lock_guard<oatpp::concurrency::SpinLock> lock(m_lock);
|
||||
return m_fifo.read(data, count);
|
||||
}
|
||||
|
||||
data::v_io_size SynchronizedFIFOBuffer::write(const void *data, v_buff_size count) {
|
||||
v_io_size SynchronizedFIFOBuffer::write(const void *data, v_buff_size count) {
|
||||
std::lock_guard<oatpp::concurrency::SpinLock> lock(m_lock);
|
||||
return m_fifo.write(data, count);
|
||||
}
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define oatpp_data_buffer_FIFOBuffer_hpp
|
||||
|
||||
#include "oatpp/core/data/stream/Stream.hpp"
|
||||
#include "oatpp/core/data/IODefinitions.hpp"
|
||||
#include "oatpp/core/IODefinitions.hpp"
|
||||
#include "oatpp/core/async/Coroutine.hpp"
|
||||
#include "oatpp/core/concurrency/SpinLock.hpp"
|
||||
|
||||
@ -69,15 +69,15 @@ public:
|
||||
|
||||
/**
|
||||
* Amount of bytes currently available to read from buffer.
|
||||
* @return &id:oatpp::data::v_io_size;.
|
||||
* @return &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size availableToRead() const;
|
||||
v_io_size availableToRead() const;
|
||||
|
||||
/**
|
||||
* Amount of buffer space currently available for data writes.
|
||||
* @return &id:oatpp::data::v_io_size;.
|
||||
* @return &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size availableToWrite() const;
|
||||
v_io_size availableToWrite() const;
|
||||
|
||||
/**
|
||||
* Get FIFOBuffer size.
|
||||
@ -91,7 +91,7 @@ public:
|
||||
* @param count
|
||||
* @return [1..count], IOErrors.
|
||||
*/
|
||||
data::v_io_size read(void *data, v_buff_size count);
|
||||
v_io_size read(void *data, v_buff_size count);
|
||||
|
||||
/**
|
||||
* Peek up to count of bytes int he buffer
|
||||
@ -99,14 +99,14 @@ public:
|
||||
* @param count
|
||||
* @return [1..count], IOErrors.
|
||||
*/
|
||||
data::v_io_size peek(void *data, v_buff_size count);
|
||||
v_io_size peek(void *data, v_buff_size count);
|
||||
|
||||
/**
|
||||
* Commit read offset
|
||||
* @param count
|
||||
* @return [1..count], IOErrors.
|
||||
*/
|
||||
data::v_io_size commitReadOffset(v_buff_size count);
|
||||
v_io_size commitReadOffset(v_buff_size count);
|
||||
|
||||
/**
|
||||
* write up to count bytes from data to buffer
|
||||
@ -114,7 +114,7 @@ public:
|
||||
* @param count
|
||||
* @return [1..count], IOErrors.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count);
|
||||
v_io_size write(const void *data, v_buff_size count);
|
||||
|
||||
/**
|
||||
* call read and then write bytes read to output stream
|
||||
@ -123,7 +123,7 @@ public:
|
||||
* @param action
|
||||
* @return [1..count], IOErrors.
|
||||
*/
|
||||
data::v_io_size readAndWriteToStream(data::stream::OutputStream* stream, v_buff_size count, async::Action& action);
|
||||
v_io_size readAndWriteToStream(data::stream::OutputStream* stream, v_buff_size count, async::Action& action);
|
||||
|
||||
/**
|
||||
* call stream.read() and then write bytes read to buffer
|
||||
@ -132,14 +132,14 @@ public:
|
||||
* @param action
|
||||
* @return
|
||||
*/
|
||||
data::v_io_size readFromStreamAndWrite(data::stream::InputStream* stream, v_buff_size count, async::Action& action);
|
||||
v_io_size readFromStreamAndWrite(data::stream::InputStream* stream, v_buff_size count, async::Action& action);
|
||||
|
||||
/**
|
||||
* flush all availableToRead bytes to stream
|
||||
* @param stream
|
||||
* @return
|
||||
*/
|
||||
data::v_io_size flushToStream(data::stream::OutputStream* stream);
|
||||
v_io_size flushToStream(data::stream::OutputStream* stream);
|
||||
|
||||
/**
|
||||
* flush all availableToRead bytes to stream in asynchronous manner
|
||||
@ -184,15 +184,15 @@ public:
|
||||
|
||||
/**
|
||||
* Amount of bytes currently available to read from buffer.
|
||||
* @return &id:oatpp::data::v_io_size;.
|
||||
* @return &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size availableToRead();
|
||||
v_io_size availableToRead();
|
||||
|
||||
/**
|
||||
* Amount of buffer space currently available for data writes.
|
||||
* @return &id:oatpp::data::v_io_size;.
|
||||
* @return &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size availableToWrite();
|
||||
v_io_size availableToWrite();
|
||||
|
||||
/**
|
||||
* read up to count bytes from the buffer to data
|
||||
@ -200,7 +200,7 @@ public:
|
||||
* @param count
|
||||
* @return [1..count], IOErrors.
|
||||
*/
|
||||
data::v_io_size read(void *data, v_buff_size count);
|
||||
v_io_size read(void *data, v_buff_size count);
|
||||
|
||||
/**
|
||||
* write up to count bytes from data to buffer
|
||||
@ -208,7 +208,7 @@ public:
|
||||
* @param count
|
||||
* @return [1..count], IOErrors.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count);
|
||||
v_io_size write(const void *data, v_buff_size count);
|
||||
|
||||
/* No implementation of other methods */
|
||||
/* User should implement his own synchronization for other methods */
|
||||
|
@ -93,7 +93,7 @@ ProcessingPipeline::ProcessingPipeline(const std::vector<base::ObjectHandle<Proc
|
||||
}
|
||||
}
|
||||
|
||||
data::v_io_size ProcessingPipeline::suggestInputStreamReadSize() {
|
||||
v_io_size ProcessingPipeline::suggestInputStreamReadSize() {
|
||||
return m_processors[0]->suggestInputStreamReadSize();
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef oatpp_data_buffer_Processor_hpp
|
||||
#define oatpp_data_buffer_Processor_hpp
|
||||
|
||||
#include "oatpp/core/data/IODefinitions.hpp"
|
||||
#include "oatpp/core/IODefinitions.hpp"
|
||||
#include "oatpp/core/base/ObjectHandle.hpp"
|
||||
#include <vector>
|
||||
|
||||
@ -176,7 +176,7 @@ public:
|
||||
* the client MAY ask the processor for a suggested read size.
|
||||
* @return - suggested read size.
|
||||
*/
|
||||
virtual data::v_io_size suggestInputStreamReadSize() = 0;
|
||||
virtual v_io_size suggestInputStreamReadSize() = 0;
|
||||
|
||||
/**
|
||||
* Process data.
|
||||
@ -210,7 +210,7 @@ public:
|
||||
* the client MAY ask the processor for a suggested read size.
|
||||
* @return - suggested read size.
|
||||
*/
|
||||
data::v_io_size suggestInputStreamReadSize() override;
|
||||
v_io_size suggestInputStreamReadSize() override;
|
||||
|
||||
/**
|
||||
* Process data.
|
||||
|
@ -43,7 +43,7 @@ BufferOutputStream::~BufferOutputStream() {
|
||||
delete [] m_data;
|
||||
}
|
||||
|
||||
data::v_io_size BufferOutputStream::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size BufferOutputStream::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
|
||||
reserveBytesUpfront(count);
|
||||
|
||||
@ -124,7 +124,7 @@ oatpp::String BufferOutputStream::getSubstring(v_buff_size pos, v_buff_size coun
|
||||
}
|
||||
}
|
||||
|
||||
oatpp::data::v_io_size BufferOutputStream::flushToStream(OutputStream* stream) {
|
||||
oatpp::v_io_size BufferOutputStream::flushToStream(OutputStream* stream) {
|
||||
return stream->writeExactSizeDataSimple(m_data, m_position);
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ void BufferInputStream::reset() {
|
||||
m_position = 0;
|
||||
}
|
||||
|
||||
data::v_io_size BufferInputStream::read(void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size BufferInputStream::read(void *data, v_buff_size count, async::Action& action) {
|
||||
v_buff_size desiredAmount = count;
|
||||
if(desiredAmount > m_size - m_position) {
|
||||
desiredAmount = m_size - m_position;
|
||||
|
@ -60,9 +60,9 @@ public:
|
||||
* @param count - number of bytes to write.
|
||||
* @param action - async specific action. If action is NOT &id:oatpp::async::Action::TYPE_NONE;, then
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;.
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set stream I/O mode.
|
||||
@ -131,7 +131,7 @@ public:
|
||||
* @param stream - stream to flush all data to.
|
||||
* @return - actual amount of bytes flushed.
|
||||
*/
|
||||
oatpp::data::v_io_size flushToStream(OutputStream* stream);
|
||||
oatpp::v_io_size flushToStream(OutputStream* stream);
|
||||
|
||||
/**
|
||||
* Write all bytes from buffer to stream in async manner.
|
||||
@ -195,7 +195,7 @@ public:
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual number of bytes read. 0 - designates end of the buffer.
|
||||
*/
|
||||
data::v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set stream I/O mode.
|
||||
|
@ -65,7 +65,7 @@ void ChunkedBuffer::freeEntry(ChunkEntry* entry){
|
||||
delete entry;
|
||||
}
|
||||
|
||||
data::v_io_size ChunkedBuffer::writeToEntry(ChunkEntry* entry,
|
||||
v_io_size ChunkedBuffer::writeToEntry(ChunkEntry* entry,
|
||||
const void *data,
|
||||
v_buff_size count,
|
||||
v_buff_size& outChunkPos)
|
||||
@ -81,13 +81,13 @@ data::v_io_size ChunkedBuffer::writeToEntry(ChunkEntry* entry,
|
||||
}
|
||||
}
|
||||
|
||||
data::v_io_size ChunkedBuffer::writeToEntryFrom(ChunkEntry* entry,
|
||||
v_io_size ChunkedBuffer::writeToEntryFrom(ChunkEntry* entry,
|
||||
v_buff_size inChunkPos,
|
||||
const void *data,
|
||||
v_buff_size count,
|
||||
v_buff_size& outChunkPos)
|
||||
{
|
||||
data::v_io_size spaceLeft = CHUNK_ENTRY_SIZE - inChunkPos;
|
||||
v_io_size spaceLeft = CHUNK_ENTRY_SIZE - inChunkPos;
|
||||
if(count >= spaceLeft){
|
||||
std::memcpy(&((p_char8) entry->chunk)[inChunkPos], data, (size_t)spaceLeft);
|
||||
outChunkPos = 0;
|
||||
@ -117,7 +117,7 @@ ChunkedBuffer::ChunkEntry* ChunkedBuffer::getChunkForPosition(ChunkEntry* fromCh
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size ChunkedBuffer::write(const void *data, v_buff_size count, async::Action& action){
|
||||
v_io_size ChunkedBuffer::write(const void *data, v_buff_size count, async::Action& action){
|
||||
|
||||
if(count <= 0){
|
||||
return 0;
|
||||
@ -162,7 +162,7 @@ Context& ChunkedBuffer::getOutputStreamContext() {
|
||||
return DEFAULT_CONTEXT;
|
||||
}
|
||||
|
||||
data::v_io_size ChunkedBuffer::readSubstring(void *buffer,
|
||||
v_io_size ChunkedBuffer::readSubstring(void *buffer,
|
||||
v_buff_size pos,
|
||||
v_buff_size count)
|
||||
{
|
||||
@ -184,7 +184,7 @@ data::v_io_size ChunkedBuffer::readSubstring(void *buffer,
|
||||
v_buff_size lastChunkPos;
|
||||
auto lastChunk = getChunkForPosition(firstChunk, firstChunkPos + countToRead, lastChunkPos);
|
||||
|
||||
data::v_io_size bufferPos = 0;
|
||||
v_io_size bufferPos = 0;
|
||||
|
||||
if(firstChunk != lastChunk){
|
||||
|
||||
@ -218,7 +218,7 @@ oatpp::String ChunkedBuffer::getSubstring(v_buff_size pos, v_buff_size count){
|
||||
}
|
||||
|
||||
bool ChunkedBuffer::flushToStream(OutputStream* stream){
|
||||
data::v_io_size pos = m_size;
|
||||
v_io_size pos = m_size;
|
||||
auto curr = m_firstEntry;
|
||||
while (pos > 0) {
|
||||
if(pos > CHUNK_ENTRY_SIZE) {
|
||||
@ -246,7 +246,7 @@ oatpp::async::CoroutineStarter ChunkedBuffer::flushToStreamAsync(const std::shar
|
||||
std::shared_ptr<ChunkedBuffer> m_chunkedBuffer;
|
||||
std::shared_ptr<OutputStream> m_stream;
|
||||
ChunkEntry* m_currEntry;
|
||||
data::v_io_size m_bytesLeft;
|
||||
v_io_size m_bytesLeft;
|
||||
Action m_nextAction;
|
||||
data::buffer::InlineWriteData m_currData;
|
||||
bool m_needInit;
|
||||
|
@ -113,12 +113,12 @@ private:
|
||||
ChunkEntry* obtainNewEntry();
|
||||
void freeEntry(ChunkEntry* entry);
|
||||
|
||||
data::v_io_size writeToEntry(ChunkEntry* entry,
|
||||
v_io_size writeToEntry(ChunkEntry* entry,
|
||||
const void *data,
|
||||
v_buff_size count,
|
||||
v_buff_size& outChunkPos);
|
||||
|
||||
data::v_io_size writeToEntryFrom(ChunkEntry* entry,
|
||||
v_io_size writeToEntryFrom(ChunkEntry* entry,
|
||||
v_buff_size inChunkPos,
|
||||
const void *data,
|
||||
v_buff_size count,
|
||||
@ -167,7 +167,7 @@ public:
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual number of bytes written.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set stream I/O mode.
|
||||
@ -194,7 +194,7 @@ public:
|
||||
* @param count - number of bytes to read.
|
||||
* @return - actual number of bytes read from ChunkedBuffer and written to buffer.
|
||||
*/
|
||||
data::v_io_size readSubstring(void *buffer, v_buff_size pos, v_buff_size count);
|
||||
v_io_size readSubstring(void *buffer, v_buff_size pos, v_buff_size count);
|
||||
|
||||
/**
|
||||
* Create &id:oatpp::String; from part of ChunkedBuffer.
|
||||
|
@ -57,7 +57,7 @@ std::FILE* FileInputStream::getFile() {
|
||||
return m_file;
|
||||
}
|
||||
|
||||
data::v_io_size FileInputStream::read(void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size FileInputStream::read(void *data, v_buff_size count, async::Action& action) {
|
||||
return std::fread(data, 1, count, m_file);
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ std::FILE* FileOutputStream::getFile() {
|
||||
return m_file;
|
||||
}
|
||||
|
||||
data::v_io_size FileOutputStream::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size FileOutputStream::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
return std::fwrite(data, 1, count, m_file);
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ public:
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual number of bytes read.
|
||||
*/
|
||||
data::v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set stream I/O mode.
|
||||
@ -142,9 +142,9 @@ public:
|
||||
* @param count - number of bytes to write.
|
||||
* @param action - async specific action. If action is NOT &id:oatpp::async::Action::TYPE_NONE;, then
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;.
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set stream I/O mode.
|
||||
|
@ -30,7 +30,7 @@ namespace oatpp { namespace data{ namespace stream {
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// WriteCallback
|
||||
|
||||
data::v_io_size WriteCallback::write(data::buffer::InlineWriteData& inlineData, async::Action& action) {
|
||||
v_io_size WriteCallback::write(data::buffer::InlineWriteData& inlineData, async::Action& action) {
|
||||
auto res = write(inlineData.currBufferPtr, inlineData.bytesLeft, action);
|
||||
if(res > 0) {
|
||||
inlineData.inc(res);
|
||||
@ -38,7 +38,7 @@ data::v_io_size WriteCallback::write(data::buffer::InlineWriteData& inlineData,
|
||||
return res;
|
||||
}
|
||||
|
||||
data::v_io_size WriteCallback::writeSimple(const void *data, v_buff_size count) {
|
||||
v_io_size WriteCallback::writeSimple(const void *data, v_buff_size count) {
|
||||
async::Action action;
|
||||
auto res = write(data, count, action);
|
||||
if(!action.isNone()) {
|
||||
@ -48,7 +48,7 @@ data::v_io_size WriteCallback::writeSimple(const void *data, v_buff_size count)
|
||||
return res;
|
||||
}
|
||||
|
||||
data::v_io_size WriteCallback::writeExactSizeDataSimple(data::buffer::InlineWriteData& inlineData) {
|
||||
v_io_size WriteCallback::writeExactSizeDataSimple(data::buffer::InlineWriteData& inlineData) {
|
||||
auto initialCount = inlineData.bytesLeft;
|
||||
while(inlineData.bytesLeft > 0) {
|
||||
async::Action action;
|
||||
@ -57,14 +57,14 @@ data::v_io_size WriteCallback::writeExactSizeDataSimple(data::buffer::InlineWrit
|
||||
OATPP_LOGE("[oatpp::data::stream::WriteCallback::writeExactSizeDataSimple()]", "Error. writeExactSizeDataSimple() is called on a stream in Async mode.");
|
||||
throw std::runtime_error("[oatpp::data::stream::WriteCallback::writeExactSizeDataSimple()]: Error. writeExactSizeDataSimple() is called on a stream in Async mode.");
|
||||
}
|
||||
if(res == data::IOError::BROKEN_PIPE || res == data::IOError::ZERO_VALUE) {
|
||||
if(res == IOError::BROKEN_PIPE || res == IOError::ZERO_VALUE) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return initialCount - inlineData.bytesLeft;
|
||||
}
|
||||
|
||||
data::v_io_size WriteCallback::writeExactSizeDataSimple(const void *data, v_buff_size count) {
|
||||
v_io_size WriteCallback::writeExactSizeDataSimple(const void *data, v_buff_size count) {
|
||||
data::buffer::InlineWriteData inlineData(data, count);
|
||||
return writeExactSizeDataSimple(inlineData);
|
||||
}
|
||||
@ -85,7 +85,7 @@ async::Action WriteCallback::writeExactSizeDataAsyncInline(data::buffer::InlineW
|
||||
} else {
|
||||
switch (res) {
|
||||
case IOError::BROKEN_PIPE:
|
||||
return new AsyncIOError(data::IOError::BROKEN_PIPE);
|
||||
return new AsyncIOError(IOError::BROKEN_PIPE);
|
||||
case IOError::ZERO_VALUE:
|
||||
break;
|
||||
case IOError::RETRY_READ:
|
||||
@ -132,7 +132,7 @@ async::CoroutineStarter WriteCallback::writeExactSizeDataAsync(const void* data,
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// ReadCallback
|
||||
|
||||
data::v_io_size ReadCallback::read(data::buffer::InlineReadData& inlineData, async::Action& action) {
|
||||
v_io_size ReadCallback::read(data::buffer::InlineReadData& inlineData, async::Action& action) {
|
||||
auto res = read(inlineData.currBufferPtr, inlineData.bytesLeft, action);
|
||||
if(res > 0) {
|
||||
inlineData.inc(res);
|
||||
@ -140,7 +140,7 @@ data::v_io_size ReadCallback::read(data::buffer::InlineReadData& inlineData, asy
|
||||
return res;
|
||||
}
|
||||
|
||||
data::v_io_size ReadCallback::readExactSizeDataSimple(data::buffer::InlineReadData& inlineData) {
|
||||
v_io_size ReadCallback::readExactSizeDataSimple(data::buffer::InlineReadData& inlineData) {
|
||||
auto initialCount = inlineData.bytesLeft;
|
||||
while(inlineData.bytesLeft > 0) {
|
||||
async::Action action;
|
||||
@ -149,14 +149,14 @@ data::v_io_size ReadCallback::readExactSizeDataSimple(data::buffer::InlineReadDa
|
||||
OATPP_LOGE("[oatpp::data::stream::ReadCallback::readExactSizeDataSimple()]", "Error. readExactSizeDataSimple() is called on a stream in Async mode.");
|
||||
throw std::runtime_error("[oatpp::data::stream::ReadCallback::readExactSizeDataSimple()]: Error. readExactSizeDataSimple() is called on a stream in Async mode.");
|
||||
}
|
||||
if(res == data::IOError::BROKEN_PIPE || res == data::IOError::ZERO_VALUE) {
|
||||
if(res == IOError::BROKEN_PIPE || res == IOError::ZERO_VALUE) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return initialCount - inlineData.bytesLeft;
|
||||
}
|
||||
|
||||
data::v_io_size ReadCallback::readExactSizeDataSimple(void *data, v_buff_size count) {
|
||||
v_io_size ReadCallback::readExactSizeDataSimple(void *data, v_buff_size count) {
|
||||
data::buffer::InlineReadData inlineData(data, count);
|
||||
return readExactSizeDataSimple(inlineData);
|
||||
}
|
||||
@ -177,7 +177,7 @@ async::Action ReadCallback::readExactSizeDataAsyncInline(data::buffer::InlineRea
|
||||
} else {
|
||||
switch (res) {
|
||||
case IOError::BROKEN_PIPE:
|
||||
return new AsyncIOError(data::IOError::BROKEN_PIPE);
|
||||
return new AsyncIOError(IOError::BROKEN_PIPE);
|
||||
case IOError::ZERO_VALUE:
|
||||
break;
|
||||
case IOError::RETRY_READ:
|
||||
@ -211,7 +211,7 @@ async::Action ReadCallback::readSomeDataAsyncInline(data::buffer::InlineReadData
|
||||
if(res < 0) {
|
||||
switch (res) {
|
||||
case IOError::BROKEN_PIPE:
|
||||
return new AsyncIOError(data::IOError::BROKEN_PIPE);
|
||||
return new AsyncIOError(IOError::BROKEN_PIPE);
|
||||
// case IOError::ZERO_VALUE:
|
||||
// break;
|
||||
case IOError::RETRY_READ:
|
||||
@ -231,7 +231,7 @@ async::Action ReadCallback::readSomeDataAsyncInline(data::buffer::InlineReadData
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size ReadCallback::readSimple(void *data, v_buff_size count) {
|
||||
v_io_size ReadCallback::readSimple(void *data, v_buff_size count) {
|
||||
async::Action action;
|
||||
auto res = read(data, count, action);
|
||||
if(!action.isNone()) {
|
||||
@ -325,7 +325,7 @@ async::CoroutineStarter IOStream::initContextsAsync() {
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// ConsistentOutputStream
|
||||
|
||||
data::v_io_size ConsistentOutputStream::writeAsString(v_int32 value){
|
||||
v_io_size ConsistentOutputStream::writeAsString(v_int32 value){
|
||||
v_char8 a[16];
|
||||
v_int32 size = utils::conversion::int32ToCharSequence(value, &a[0], 16);
|
||||
if(size > 0){
|
||||
@ -334,7 +334,7 @@ data::v_io_size ConsistentOutputStream::writeAsString(v_int32 value){
|
||||
return 0;
|
||||
}
|
||||
|
||||
data::v_io_size ConsistentOutputStream::writeAsString(v_int64 value){
|
||||
v_io_size ConsistentOutputStream::writeAsString(v_int64 value){
|
||||
v_char8 a[32];
|
||||
v_int32 size = utils::conversion::int64ToCharSequence(value, &a[0], 32);
|
||||
if(size > 0){
|
||||
@ -343,7 +343,7 @@ data::v_io_size ConsistentOutputStream::writeAsString(v_int64 value){
|
||||
return 0;
|
||||
}
|
||||
|
||||
data::v_io_size ConsistentOutputStream::writeAsString(v_float32 value){
|
||||
v_io_size ConsistentOutputStream::writeAsString(v_float32 value){
|
||||
v_char8 a[100];
|
||||
v_int32 size = utils::conversion::float32ToCharSequence(value, &a[0], 100);
|
||||
if(size > 0){
|
||||
@ -352,7 +352,7 @@ data::v_io_size ConsistentOutputStream::writeAsString(v_float32 value){
|
||||
return 0;
|
||||
}
|
||||
|
||||
data::v_io_size ConsistentOutputStream::writeAsString(v_float64 value){
|
||||
v_io_size ConsistentOutputStream::writeAsString(v_float64 value){
|
||||
v_char8 a[100];
|
||||
v_int32 size = utils::conversion::float64ToCharSequence(value, &a[0], 100);
|
||||
if(size > 0){
|
||||
@ -361,7 +361,7 @@ data::v_io_size ConsistentOutputStream::writeAsString(v_float64 value){
|
||||
return 0;
|
||||
}
|
||||
|
||||
data::v_io_size ConsistentOutputStream::writeAsString(bool value) {
|
||||
v_io_size ConsistentOutputStream::writeAsString(bool value) {
|
||||
if(value){
|
||||
return writeSimple("true", 4);
|
||||
} else {
|
||||
@ -481,7 +481,7 @@ ConsistentOutputStream& operator << (ConsistentOutputStream& s, bool value) {
|
||||
|
||||
StatelessDataTransferProcessor StatelessDataTransferProcessor::INSTANCE;
|
||||
|
||||
data::v_io_size StatelessDataTransferProcessor::suggestInputStreamReadSize() {
|
||||
v_io_size StatelessDataTransferProcessor::suggestInputStreamReadSize() {
|
||||
return 32767;
|
||||
}
|
||||
|
||||
@ -512,9 +512,9 @@ v_int32 StatelessDataTransferProcessor::iterate(data::buffer::InlineReadData& da
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Other functions
|
||||
|
||||
data::v_io_size transfer(const base::ObjectHandle<ReadCallback>& readCallback,
|
||||
v_io_size transfer(const base::ObjectHandle<ReadCallback>& readCallback,
|
||||
const base::ObjectHandle<WriteCallback>& writeCallback,
|
||||
data::v_io_size transferSize,
|
||||
v_io_size transferSize,
|
||||
void* buffer,
|
||||
v_buff_size bufferSize,
|
||||
const base::ObjectHandle<data::buffer::Processor>& processor)
|
||||
@ -524,7 +524,7 @@ data::v_io_size transfer(const base::ObjectHandle<ReadCallback>& readCallback,
|
||||
data::buffer::InlineReadData outData;
|
||||
|
||||
v_int32 procRes = data::buffer::Processor::Error::PROVIDE_DATA_IN;
|
||||
data::v_io_size progress = 0;
|
||||
v_io_size progress = 0;
|
||||
|
||||
while(transferSize == 0 || progress < transferSize) {
|
||||
|
||||
@ -540,11 +540,11 @@ data::v_io_size transfer(const base::ObjectHandle<ReadCallback>& readCallback,
|
||||
desiredToRead = transferSize - progress;
|
||||
}
|
||||
|
||||
data::v_io_size res = 0;
|
||||
v_io_size res = 0;
|
||||
|
||||
if(desiredToRead > 0) {
|
||||
res = data::IOError::RETRY_READ;
|
||||
while (res == data::IOError::RETRY_READ || res == data::IOError::RETRY_WRITE) {
|
||||
res = IOError::RETRY_READ;
|
||||
while (res == IOError::RETRY_READ || res == IOError::RETRY_WRITE) {
|
||||
res = readCallback->readSimple(buffer, desiredToRead);
|
||||
}
|
||||
}
|
||||
@ -570,8 +570,8 @@ data::v_io_size transfer(const base::ObjectHandle<ReadCallback>& readCallback,
|
||||
}
|
||||
|
||||
case data::buffer::Processor::Error::FLUSH_DATA_OUT: {
|
||||
data::v_io_size res = data::IOError::RETRY_WRITE;
|
||||
while(res == data::IOError::RETRY_WRITE || res == data::IOError::RETRY_READ) {
|
||||
v_io_size res = IOError::RETRY_WRITE;
|
||||
while(res == IOError::RETRY_WRITE || res == IOError::RETRY_READ) {
|
||||
res = writeCallback->writeSimple(outData.currBufferPtr, outData.bytesLeft);
|
||||
}
|
||||
if(res > 0) {
|
||||
@ -655,7 +655,7 @@ async::CoroutineStarter transferAsync(const base::ObjectHandle<ReadCallback>& re
|
||||
}
|
||||
|
||||
Action action;
|
||||
data::v_io_size res = 0;
|
||||
v_io_size res = 0;
|
||||
|
||||
if(desiredToRead > 0) {
|
||||
res = m_readCallback->read(m_readData.currBufferPtr, desiredToRead, action);
|
||||
@ -669,20 +669,20 @@ async::CoroutineStarter transferAsync(const base::ObjectHandle<ReadCallback>& re
|
||||
|
||||
switch(res) {
|
||||
|
||||
case data::IOError::BROKEN_PIPE:
|
||||
case IOError::BROKEN_PIPE:
|
||||
return error<AsyncTransferError>("[oatpp::data::stream::transferAsync]: Error. ReadCallback. BROKEN_PIPE.");
|
||||
|
||||
case data::IOError::ZERO_VALUE:
|
||||
case IOError::ZERO_VALUE:
|
||||
m_inData.set(nullptr, 0);
|
||||
break;
|
||||
|
||||
case data::IOError::RETRY_READ:
|
||||
case IOError::RETRY_READ:
|
||||
if(!action.isNone()) {
|
||||
return action;
|
||||
}
|
||||
return repeat();
|
||||
|
||||
case data::IOError::RETRY_WRITE:
|
||||
case IOError::RETRY_WRITE:
|
||||
if(!action.isNone()) {
|
||||
return action;
|
||||
}
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "oatpp/core/data/buffer/IOBuffer.hpp"
|
||||
#include "oatpp/core/data/buffer/Processor.hpp"
|
||||
|
||||
#include "oatpp/core/data/IODefinitions.hpp"
|
||||
#include "oatpp/core/IODefinitions.hpp"
|
||||
|
||||
namespace oatpp { namespace data{ namespace stream {
|
||||
|
||||
@ -210,15 +210,15 @@ public:
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual number of bytes written. 0 - to indicate end-of-file.
|
||||
*/
|
||||
virtual data::v_io_size write(const void *data, v_buff_size count, async::Action& action) = 0;
|
||||
virtual v_io_size write(const void *data, v_buff_size count, async::Action& action) = 0;
|
||||
|
||||
data::v_io_size write(data::buffer::InlineWriteData& inlineData, async::Action& action);
|
||||
v_io_size write(data::buffer::InlineWriteData& inlineData, async::Action& action);
|
||||
|
||||
data::v_io_size writeSimple(const void *data, v_buff_size count);
|
||||
v_io_size writeSimple(const void *data, v_buff_size count);
|
||||
|
||||
data::v_io_size writeExactSizeDataSimple(data::buffer::InlineWriteData& inlineData);
|
||||
v_io_size writeExactSizeDataSimple(data::buffer::InlineWriteData& inlineData);
|
||||
|
||||
data::v_io_size writeExactSizeDataSimple(const void *data, v_buff_size count);
|
||||
v_io_size writeExactSizeDataSimple(const void *data, v_buff_size count);
|
||||
|
||||
async::Action writeExactSizeDataAsyncInline(data::buffer::InlineWriteData& inlineData, async::Action&& nextAction);
|
||||
|
||||
@ -227,27 +227,27 @@ public:
|
||||
/**
|
||||
* Same as `write((p_char8)data, std::strlen(data));`.
|
||||
* @param data - data to write.
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;.
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size writeSimple(const char* data){
|
||||
v_io_size writeSimple(const char* data){
|
||||
return writeSimple((p_char8)data, std::strlen(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as `write(str->getData(), str->getSize());`
|
||||
* @param str - data to write.
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;.
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size writeSimple(const oatpp::String& str){
|
||||
v_io_size writeSimple(const oatpp::String& str){
|
||||
return writeSimple(str->getData(), str->getSize());
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as `write(&c, 1);`.
|
||||
* @param c - one char to write.
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;.
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size writeCharSimple(v_char8 c){
|
||||
v_io_size writeCharSimple(v_char8 c){
|
||||
return writeSimple(&c, 1);
|
||||
}
|
||||
|
||||
@ -303,19 +303,19 @@ public:
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual number of bytes written to buffer. 0 - to indicate end-of-file.
|
||||
*/
|
||||
virtual data::v_io_size read(void *buffer, v_buff_size count, async::Action& action) = 0;
|
||||
virtual v_io_size read(void *buffer, v_buff_size count, async::Action& action) = 0;
|
||||
|
||||
data::v_io_size read(data::buffer::InlineReadData& inlineData, async::Action& action);
|
||||
v_io_size read(data::buffer::InlineReadData& inlineData, async::Action& action);
|
||||
|
||||
data::v_io_size readExactSizeDataSimple(data::buffer::InlineReadData& inlineData);
|
||||
v_io_size readExactSizeDataSimple(data::buffer::InlineReadData& inlineData);
|
||||
|
||||
data::v_io_size readExactSizeDataSimple(void *data, v_buff_size count);
|
||||
v_io_size readExactSizeDataSimple(void *data, v_buff_size count);
|
||||
|
||||
async::Action readExactSizeDataAsyncInline(data::buffer::InlineReadData& inlineData, async::Action&& nextAction);
|
||||
|
||||
async::Action readSomeDataAsyncInline(data::buffer::InlineReadData& inlineData, async::Action&& nextAction);
|
||||
|
||||
data::v_io_size readSimple(void *data, v_buff_size count);
|
||||
v_io_size readSimple(void *data, v_buff_size count);
|
||||
|
||||
};
|
||||
|
||||
@ -377,37 +377,37 @@ public:
|
||||
/**
|
||||
* Convert value to string and write to stream.
|
||||
* @param value
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;. <br>
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;. <br>
|
||||
*/
|
||||
data::v_io_size writeAsString(v_int32 value);
|
||||
v_io_size writeAsString(v_int32 value);
|
||||
|
||||
/**
|
||||
* Convert value to string and write to stream.
|
||||
* @param value
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;. <br>
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;. <br>
|
||||
*/
|
||||
data::v_io_size writeAsString(v_int64 value);
|
||||
v_io_size writeAsString(v_int64 value);
|
||||
|
||||
/**
|
||||
* Convert value to string and write to stream.
|
||||
* @param value
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;. <br>
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;. <br>
|
||||
*/
|
||||
data::v_io_size writeAsString(v_float32 value);
|
||||
v_io_size writeAsString(v_float32 value);
|
||||
|
||||
/**
|
||||
* Convert value to string and write to stream.
|
||||
* @param value
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;. <br>
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;. <br>
|
||||
*/
|
||||
data::v_io_size writeAsString(v_float64 value);
|
||||
v_io_size writeAsString(v_float64 value);
|
||||
|
||||
/**
|
||||
* Convert value to string and write to stream.
|
||||
* @param value
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;. <br>
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;. <br>
|
||||
*/
|
||||
data::v_io_size writeAsString(bool value);
|
||||
v_io_size writeAsString(bool value);
|
||||
|
||||
};
|
||||
|
||||
@ -447,7 +447,7 @@ class StatelessDataTransferProcessor : public data::buffer::Processor {
|
||||
public:
|
||||
static StatelessDataTransferProcessor INSTANCE;
|
||||
public:
|
||||
data::v_io_size suggestInputStreamReadSize() override;
|
||||
v_io_size suggestInputStreamReadSize() override;
|
||||
v_int32 iterate(data::buffer::InlineReadData& dataIn, data::buffer::InlineReadData& dataOut) override;
|
||||
};
|
||||
|
||||
@ -461,9 +461,9 @@ public:
|
||||
* @param processor - data processing to be applied during the transfer.
|
||||
* @return - the actual amout of bytes read from the `readCallback`.
|
||||
*/
|
||||
data::v_io_size transfer(const base::ObjectHandle<ReadCallback>& readCallback,
|
||||
v_io_size transfer(const base::ObjectHandle<ReadCallback>& readCallback,
|
||||
const base::ObjectHandle<WriteCallback>& writeCallback,
|
||||
data::v_io_size transferSize,
|
||||
v_io_size transferSize,
|
||||
void* buffer,
|
||||
v_buff_size bufferSize,
|
||||
const base::ObjectHandle<data::buffer::Processor>& processor = &StatelessDataTransferProcessor::INSTANCE);
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
namespace oatpp { namespace data{ namespace stream {
|
||||
|
||||
data::v_io_size OutputStreamBufferedProxy::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size OutputStreamBufferedProxy::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
if(m_buffer.availableToWrite() > 0) {
|
||||
return m_buffer.write(data, count);
|
||||
} else {
|
||||
@ -50,7 +50,7 @@ Context& OutputStreamBufferedProxy::getOutputStreamContext() {
|
||||
return m_outputStream->getOutputStreamContext();
|
||||
}
|
||||
|
||||
data::v_io_size OutputStreamBufferedProxy::flush() {
|
||||
v_io_size OutputStreamBufferedProxy::flush() {
|
||||
return m_buffer.flushToStream(m_outputStream.get());
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ oatpp::async::CoroutineStarter OutputStreamBufferedProxy::flushAsync() {
|
||||
return m_buffer.flushToStreamAsync(m_outputStream);
|
||||
}
|
||||
|
||||
data::v_io_size InputStreamBufferedProxy::read(void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size InputStreamBufferedProxy::read(void *data, v_buff_size count, async::Action& action) {
|
||||
|
||||
if(m_buffer.availableToRead() > 0) {
|
||||
return m_buffer.read(data, count);
|
||||
@ -72,7 +72,7 @@ data::v_io_size InputStreamBufferedProxy::read(void *data, v_buff_size count, as
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size InputStreamBufferedProxy::peek(void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size InputStreamBufferedProxy::peek(void *data, v_buff_size count, async::Action& action) {
|
||||
|
||||
if(m_buffer.availableToRead() > 0) {
|
||||
return m_buffer.peek(data, count);
|
||||
@ -86,7 +86,7 @@ data::v_io_size InputStreamBufferedProxy::peek(void *data, v_buff_size count, as
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size InputStreamBufferedProxy::commitReadOffset(v_buff_size count) {
|
||||
v_io_size InputStreamBufferedProxy::commitReadOffset(v_buff_size count) {
|
||||
return m_buffer.commitReadOffset(count);
|
||||
}
|
||||
|
||||
|
@ -53,7 +53,7 @@ public:
|
||||
return Shared_OutputStreamBufferedProxy_Pool::allocateShared(outputStream, memoryLabel);
|
||||
}
|
||||
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set OutputStream I/O mode.
|
||||
@ -73,10 +73,10 @@ public:
|
||||
*/
|
||||
Context& getOutputStreamContext() override;
|
||||
|
||||
data::v_io_size flush();
|
||||
v_io_size flush();
|
||||
oatpp::async::CoroutineStarter flushAsync();
|
||||
|
||||
void setBufferPosition(data::v_io_size readPosition, data::v_io_size writePosition, bool canRead) {
|
||||
void setBufferPosition(v_io_size readPosition, v_io_size writePosition, bool canRead) {
|
||||
m_buffer.setBufferPosition(readPosition, writePosition, canRead);
|
||||
}
|
||||
|
||||
@ -93,8 +93,8 @@ protected:
|
||||
public:
|
||||
InputStreamBufferedProxy(const std::shared_ptr<InputStream>& inputStream,
|
||||
const oatpp::data::share::MemoryLabel& memoryLabel,
|
||||
data::v_io_size bufferReadPosition,
|
||||
data::v_io_size bufferWritePosition,
|
||||
v_io_size bufferReadPosition,
|
||||
v_io_size bufferWritePosition,
|
||||
bool bufferCanRead)
|
||||
: m_inputStream(inputStream)
|
||||
, m_memoryLabel(memoryLabel)
|
||||
@ -110,18 +110,18 @@ public:
|
||||
|
||||
static std::shared_ptr<InputStreamBufferedProxy> createShared(const std::shared_ptr<InputStream>& inputStream,
|
||||
const oatpp::data::share::MemoryLabel& memoryLabel,
|
||||
data::v_io_size bufferReadPosition,
|
||||
data::v_io_size bufferWritePosition,
|
||||
v_io_size bufferReadPosition,
|
||||
v_io_size bufferWritePosition,
|
||||
bool bufferCanRead)
|
||||
{
|
||||
return Shared_InputStreamBufferedProxy_Pool::allocateShared(inputStream, memoryLabel, bufferReadPosition, bufferWritePosition, bufferCanRead);
|
||||
}
|
||||
|
||||
data::v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
data::v_io_size peek(void *data, v_buff_size count, async::Action& action);
|
||||
v_io_size peek(void *data, v_buff_size count, async::Action& action);
|
||||
|
||||
data::v_io_size commitReadOffset(v_buff_size count);
|
||||
v_io_size commitReadOffset(v_buff_size count);
|
||||
|
||||
/**
|
||||
* Set InputStream I/O mode.
|
||||
@ -141,7 +141,7 @@ public:
|
||||
*/
|
||||
Context& getInputStreamContext() override;
|
||||
|
||||
void setBufferPosition(data::v_io_size readPosition, data::v_io_size writePosition, bool canRead) {
|
||||
void setBufferPosition(v_io_size readPosition, v_io_size writePosition, bool canRead) {
|
||||
m_buffer.setBufferPosition(readPosition, writePosition, canRead);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ namespace oatpp { namespace network {
|
||||
|
||||
oatpp::data::stream::DefaultInitializedContext Connection::DEFAULT_CONTEXT(data::stream::StreamType::STREAM_INFINITE);
|
||||
|
||||
Connection::Connection(data::v_io_handle handle)
|
||||
Connection::Connection(v_io_handle handle)
|
||||
: m_handle(handle)
|
||||
{
|
||||
|
||||
@ -76,7 +76,7 @@ Connection::~Connection(){
|
||||
close();
|
||||
}
|
||||
|
||||
data::v_io_size Connection::write(const void *buff, v_buff_size count, async::Action& action){
|
||||
v_io_size Connection::write(const void *buff, v_buff_size count, async::Action& action){
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
|
||||
@ -90,14 +90,14 @@ data::v_io_size Connection::write(const void *buff, v_buff_size count, async::Ac
|
||||
if(m_mode == data::stream::ASYNCHRONOUS) {
|
||||
action = oatpp::async::Action::createIOWaitAction(m_handle, oatpp::async::Action::IOEventType::IO_EVENT_WRITE);
|
||||
}
|
||||
return data::IOError::RETRY_WRITE; // For async io. In case socket is non-blocking
|
||||
return IOError::RETRY_WRITE; // For async io. In case socket is non-blocking
|
||||
} else if(e == WSAEINTR) {
|
||||
return data::IOError::RETRY_WRITE;
|
||||
return IOError::RETRY_WRITE;
|
||||
} else if(e == WSAECONNRESET) {
|
||||
return data::IOError::BROKEN_PIPE;
|
||||
return IOError::BROKEN_PIPE;
|
||||
} else {
|
||||
//OATPP_LOGD("Connection", "write errno=%d", e);
|
||||
return data::IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
return IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@ -119,14 +119,14 @@ data::v_io_size Connection::write(const void *buff, v_buff_size count, async::Ac
|
||||
if(m_mode == data::stream::ASYNCHRONOUS) {
|
||||
action = oatpp::async::Action::createIOWaitAction(m_handle, oatpp::async::Action::IOEventType::IO_EVENT_WRITE);
|
||||
}
|
||||
return data::IOError::RETRY_WRITE; // For async io. In case socket is non-blocking
|
||||
return IOError::RETRY_WRITE; // For async io. In case socket is non-blocking
|
||||
} else if(e == EINTR) {
|
||||
return data::IOError::RETRY_WRITE;
|
||||
return IOError::RETRY_WRITE;
|
||||
} else if(e == EPIPE) {
|
||||
return data::IOError::BROKEN_PIPE;
|
||||
return IOError::BROKEN_PIPE;
|
||||
} else {
|
||||
//OATPP_LOGD("Connection", "write errno=%d", e);
|
||||
return data::IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
return IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@ -135,7 +135,7 @@ data::v_io_size Connection::write(const void *buff, v_buff_size count, async::Ac
|
||||
|
||||
}
|
||||
|
||||
data::v_io_size Connection::read(void *buff, v_buff_size count, async::Action& action){
|
||||
v_io_size Connection::read(void *buff, v_buff_size count, async::Action& action){
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
|
||||
@ -149,14 +149,14 @@ data::v_io_size Connection::read(void *buff, v_buff_size count, async::Action& a
|
||||
if(m_mode == data::stream::ASYNCHRONOUS) {
|
||||
action = oatpp::async::Action::createIOWaitAction(m_handle, oatpp::async::Action::IOEventType::IO_EVENT_READ);
|
||||
}
|
||||
return data::IOError::RETRY_READ; // For async io. In case socket is non-blocking
|
||||
return IOError::RETRY_READ; // For async io. In case socket is non-blocking
|
||||
} else if(e == WSAEINTR) {
|
||||
return data::IOError::RETRY_READ;
|
||||
return IOError::RETRY_READ;
|
||||
} else if(e == WSAECONNRESET) {
|
||||
return data::IOError::BROKEN_PIPE;
|
||||
return IOError::BROKEN_PIPE;
|
||||
} else {
|
||||
//OATPP_LOGD("Connection", "write errno=%d", e);
|
||||
return data::IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
return IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@ -173,14 +173,14 @@ data::v_io_size Connection::read(void *buff, v_buff_size count, async::Action& a
|
||||
if(m_mode == data::stream::ASYNCHRONOUS) {
|
||||
action = oatpp::async::Action::createIOWaitAction(m_handle, oatpp::async::Action::IOEventType::IO_EVENT_READ);
|
||||
}
|
||||
return data::IOError::RETRY_READ; // For async io. In case socket is non-blocking
|
||||
return IOError::RETRY_READ; // For async io. In case socket is non-blocking
|
||||
} else if(e == EINTR) {
|
||||
return data::IOError::RETRY_READ;
|
||||
return IOError::RETRY_READ;
|
||||
} else if(e == ECONNRESET) {
|
||||
return data::IOError::BROKEN_PIPE;
|
||||
return IOError::BROKEN_PIPE;
|
||||
} else {
|
||||
//OATPP_LOGD("Connection", "write errno=%d", e);
|
||||
return data::IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
return IOError::BROKEN_PIPE; // Consider all other errors as a broken pipe.
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -37,16 +37,16 @@ class Connection : public oatpp::base::Countable, public oatpp::data::stream::IO
|
||||
private:
|
||||
static oatpp::data::stream::DefaultInitializedContext DEFAULT_CONTEXT;
|
||||
private:
|
||||
data::v_io_handle m_handle;
|
||||
v_io_handle m_handle;
|
||||
data::stream::IOMode m_mode;
|
||||
private:
|
||||
void setStreamIOMode(oatpp::data::stream::IOMode ioMode);
|
||||
public:
|
||||
/**
|
||||
* Constructor.
|
||||
* @param handle - file descriptor (socket handle). See &id:oatpp::data::v_io_handle;.
|
||||
* @param handle - file descriptor (socket handle). See &id:oatpp::v_io_handle;.
|
||||
*/
|
||||
Connection(data::v_io_handle handle);
|
||||
Connection(v_io_handle handle);
|
||||
public:
|
||||
|
||||
/**
|
||||
@ -61,9 +61,9 @@ public:
|
||||
* @param count - bytes count you want to write.
|
||||
* @param action - async specific action. If action is NOT &id:oatpp::async::Action::TYPE_NONE;, then
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual amount of bytes written. See &id:oatpp::data::v_io_size;.
|
||||
* @return - actual amount of bytes written. See &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size write(const void *buff, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *buff, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Implementation of &id:oatpp::data::stream::IOStream::read;.
|
||||
@ -71,9 +71,9 @@ public:
|
||||
* @param count - buffer size.
|
||||
* @param action - async specific action. If action is NOT &id:oatpp::async::Action::TYPE_NONE;, then
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual amount of bytes read. See &id:oatpp::data::v_io_size;.
|
||||
* @return - actual amount of bytes read. See &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size read(void *buff, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *buff, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set OutputStream I/O mode.
|
||||
@ -118,9 +118,9 @@ public:
|
||||
|
||||
/**
|
||||
* Get socket handle.
|
||||
* @return - socket handle. &id:oatpp::data::v_io_handle;.
|
||||
* @return - socket handle. &id:oatpp::v_io_handle;.
|
||||
*/
|
||||
data::v_io_handle getHandle(){
|
||||
v_io_handle getHandle(){
|
||||
return m_handle;
|
||||
}
|
||||
|
||||
|
@ -64,11 +64,11 @@ ConnectionPool::ConnectionWrapper::~ConnectionWrapper() {
|
||||
}
|
||||
}
|
||||
|
||||
data::v_io_size ConnectionPool::ConnectionWrapper::write(const void *buff, v_buff_size count, async::Action& action) {
|
||||
v_io_size ConnectionPool::ConnectionWrapper::write(const void *buff, v_buff_size count, async::Action& action) {
|
||||
return m_connection->write(buff, count, action);
|
||||
}
|
||||
|
||||
data::v_io_size ConnectionPool::ConnectionWrapper::read(void *buff, v_buff_size count, async::Action& action) {
|
||||
v_io_size ConnectionPool::ConnectionWrapper::read(void *buff, v_buff_size count, async::Action& action) {
|
||||
return m_connection->read(buff, count, action);
|
||||
}
|
||||
|
||||
|
@ -89,8 +89,8 @@ public:
|
||||
ConnectionWrapper(const std::shared_ptr<IOStream>& connection, const std::shared_ptr<Pool>& pool);
|
||||
~ConnectionWrapper();
|
||||
|
||||
data::v_io_size write(const void *buff, v_buff_size count, async::Action& action) override;
|
||||
data::v_io_size read(void *buff, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *buff, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *buff, v_buff_size count, async::Action& action) override;
|
||||
|
||||
void setOutputStreamIOMode(oatpp::data::stream::IOMode ioMode) override;
|
||||
oatpp::data::stream::IOMode getOutputStreamIOMode() override;
|
||||
|
@ -75,7 +75,7 @@ std::shared_ptr<oatpp::data::stream::IOStream> SimpleTCPConnectionProvider::getC
|
||||
}
|
||||
|
||||
struct addrinfo* currResult = result;
|
||||
oatpp::data::v_io_handle clientHandle =- 1;
|
||||
oatpp::v_io_handle clientHandle =- 1;
|
||||
|
||||
while(currResult != nullptr) {
|
||||
|
||||
@ -123,7 +123,7 @@ oatpp::async::CoroutineStarterForResult<const std::shared_ptr<oatpp::data::strea
|
||||
private:
|
||||
oatpp::String m_host;
|
||||
v_int32 m_port;
|
||||
oatpp::data::v_io_handle m_clientHandle;
|
||||
oatpp::v_io_handle m_clientHandle;
|
||||
private:
|
||||
struct addrinfo* m_result;
|
||||
struct addrinfo* m_currentResult;
|
||||
|
@ -50,7 +50,7 @@ void Server::mainLoop(){
|
||||
while(getStatus() == STATUS_RUNNING) {
|
||||
|
||||
auto connection = m_connectionProvider->getConnection();
|
||||
OATPP_LOGD("AAA", "c=%d, status=%d", connection.get(), getStatus());
|
||||
|
||||
if (connection) {
|
||||
if(getStatus() == STATUS_RUNNING){
|
||||
m_connectionHandler->handleConnection(connection, params /* null params */);
|
||||
|
@ -49,7 +49,7 @@ const char* const SimpleTCPConnectionProvider::ExtendedConnection::PROPERTY_PEER
|
||||
const char* const SimpleTCPConnectionProvider::ExtendedConnection::PROPERTY_PEER_ADDRESS_FORMAT = "peer_address_format";
|
||||
const char* const SimpleTCPConnectionProvider::ExtendedConnection::PROPERTY_PEER_PORT = "peer_port";
|
||||
|
||||
SimpleTCPConnectionProvider::ExtendedConnection::ExtendedConnection(data::v_io_handle handle, data::stream::Context::Properties&& properties)
|
||||
SimpleTCPConnectionProvider::ExtendedConnection::ExtendedConnection(v_io_handle handle, data::stream::Context::Properties&& properties)
|
||||
: Connection(handle)
|
||||
, m_context(data::stream::StreamType::STREAM_INFINITE, std::forward<data::stream::Context::Properties>(properties))
|
||||
{}
|
||||
@ -92,7 +92,7 @@ void SimpleTCPConnectionProvider::close() {
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
|
||||
oatpp::data::v_io_handle SimpleTCPConnectionProvider::instantiateServer(){
|
||||
oatpp::v_io_handle SimpleTCPConnectionProvider::instantiateServer(){
|
||||
|
||||
int iResult;
|
||||
|
||||
@ -151,9 +151,9 @@ oatpp::data::v_io_handle SimpleTCPConnectionProvider::instantiateServer(){
|
||||
|
||||
#else
|
||||
|
||||
oatpp::data::v_io_handle SimpleTCPConnectionProvider::instantiateServer(){
|
||||
oatpp::v_io_handle SimpleTCPConnectionProvider::instantiateServer(){
|
||||
|
||||
oatpp::data::v_io_handle serverHandle;
|
||||
oatpp::v_io_handle serverHandle;
|
||||
v_int32 ret;
|
||||
int yes = 1;
|
||||
|
||||
@ -195,7 +195,7 @@ oatpp::data::v_io_handle SimpleTCPConnectionProvider::instantiateServer(){
|
||||
|
||||
#endif
|
||||
|
||||
bool SimpleTCPConnectionProvider::prepareConnectionHandle(oatpp::data::v_io_handle handle) {
|
||||
bool SimpleTCPConnectionProvider::prepareConnectionHandle(oatpp::v_io_handle handle) {
|
||||
|
||||
if (handle < 0) {
|
||||
v_int32 error = errno;
|
||||
@ -223,11 +223,11 @@ bool SimpleTCPConnectionProvider::prepareConnectionHandle(oatpp::data::v_io_hand
|
||||
|
||||
std::shared_ptr<oatpp::data::stream::IOStream> SimpleTCPConnectionProvider::getDefaultConnection() {
|
||||
|
||||
OATPP_LOGD("AAA", "accept begin");
|
||||
oatpp::v_io_handle handle = accept(m_serverHandle, nullptr, nullptr);
|
||||
|
||||
oatpp::data::v_io_handle handle = accept(m_serverHandle, nullptr, nullptr);
|
||||
|
||||
OATPP_LOGD("AAA", "accept result=%d", handle);
|
||||
if(!oatpp::isValidIoHandle(handle)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(prepareConnectionHandle(handle)) {
|
||||
return std::make_shared<Connection>(handle);
|
||||
@ -244,7 +244,11 @@ std::shared_ptr<oatpp::data::stream::IOStream> SimpleTCPConnectionProvider::getE
|
||||
|
||||
data::stream::Context::Properties properties;
|
||||
|
||||
oatpp::data::v_io_handle handle = accept(m_serverHandle, (struct sockaddr*) &clientAddress, &clientAddressSize);
|
||||
oatpp::v_io_handle handle = accept(m_serverHandle, (struct sockaddr*) &clientAddress, &clientAddressSize);
|
||||
|
||||
if(!oatpp::isValidIoHandle(handle)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (clientAddress.ss_family == AF_INET) {
|
||||
|
||||
@ -298,20 +302,13 @@ std::shared_ptr<oatpp::data::stream::IOStream> SimpleTCPConnectionProvider::getC
|
||||
timeout.tv_usec = 0;
|
||||
|
||||
while(!m_closed) {
|
||||
OATPP_LOGD("AAA", "select %d, %d begin", this, m_serverHandle);
|
||||
|
||||
auto res = select(m_serverHandle + 1, &set, nullptr, nullptr, &timeout);
|
||||
OATPP_LOGD("AAA", "select %d, %d end res=%d", this, m_serverHandle, res);
|
||||
|
||||
if (res >= 0) {
|
||||
break;
|
||||
}
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
else {
|
||||
auto e = WSAGetLastError();
|
||||
|
||||
OATPP_LOGD("AAA", "select error=%d", e);
|
||||
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if(m_useExtendedConnections) {
|
||||
|
@ -54,10 +54,10 @@ public:
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param handle - &id:oatpp::data::v_io_handle;.
|
||||
* @param handle - &id:oatpp::v_io_handle;.
|
||||
* @param properties - &id:oatpp::data::stream::Context::Properties;.
|
||||
*/
|
||||
ExtendedConnection(data::v_io_handle handle, data::stream::Context::Properties&& properties);
|
||||
ExtendedConnection(v_io_handle handle, data::stream::Context::Properties&& properties);
|
||||
|
||||
/**
|
||||
* Get output stream context.
|
||||
@ -76,12 +76,12 @@ public:
|
||||
private:
|
||||
v_word16 m_port;
|
||||
std::atomic<bool> m_closed;
|
||||
oatpp::data::v_io_handle m_serverHandle;
|
||||
oatpp::v_io_handle m_serverHandle;
|
||||
bool m_useExtendedConnections;
|
||||
private:
|
||||
oatpp::data::v_io_handle instantiateServer();
|
||||
oatpp::v_io_handle instantiateServer();
|
||||
private:
|
||||
bool prepareConnectionHandle(oatpp::data::v_io_handle handle);
|
||||
bool prepareConnectionHandle(oatpp::v_io_handle handle);
|
||||
std::shared_ptr<IOStream> getDefaultConnection();
|
||||
std::shared_ptr<IOStream> getExtendedConnection();
|
||||
public:
|
||||
|
@ -36,18 +36,18 @@ oatpp::data::stream::IOMode Pipe::Reader::getInputStreamIOMode() {
|
||||
return m_ioMode;
|
||||
}
|
||||
|
||||
void Pipe::Reader::setMaxAvailableToRead(data::v_io_size maxAvailableToRead) {
|
||||
void Pipe::Reader::setMaxAvailableToRead(v_io_size maxAvailableToRead) {
|
||||
m_maxAvailableToRead = maxAvailableToRead;
|
||||
}
|
||||
|
||||
data::v_io_size Pipe::Reader::read(void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size Pipe::Reader::read(void *data, v_buff_size count, async::Action& action) {
|
||||
|
||||
if(m_maxAvailableToRead > -1 && count > m_maxAvailableToRead) {
|
||||
count = m_maxAvailableToRead;
|
||||
}
|
||||
|
||||
Pipe& pipe = *m_pipe;
|
||||
oatpp::data::v_io_size result;
|
||||
oatpp::v_io_size result;
|
||||
|
||||
if(m_ioMode == oatpp::data::stream::IOMode::ASYNCHRONOUS) {
|
||||
|
||||
@ -57,9 +57,9 @@ data::v_io_size Pipe::Reader::read(void *data, v_buff_size count, async::Action&
|
||||
result = pipe.m_fifo.read(data, count);
|
||||
} else if (pipe.m_open) {
|
||||
action = async::Action::createWaitListAction(&m_waitList);
|
||||
result = data::IOError::RETRY_READ;
|
||||
result = IOError::RETRY_READ;
|
||||
} else {
|
||||
result = data::IOError::BROKEN_PIPE;
|
||||
result = IOError::BROKEN_PIPE;
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -70,7 +70,7 @@ data::v_io_size Pipe::Reader::read(void *data, v_buff_size count, async::Action&
|
||||
if (pipe.m_fifo.availableToRead() > 0) {
|
||||
result = pipe.m_fifo.read(data, count);
|
||||
} else {
|
||||
result = data::IOError::BROKEN_PIPE;
|
||||
result = IOError::BROKEN_PIPE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,18 +107,18 @@ oatpp::data::stream::Context& Pipe::Writer::getOutputStreamContext() {
|
||||
return DEFAULT_CONTEXT;
|
||||
}
|
||||
|
||||
void Pipe::Writer::setMaxAvailableToWrite(data::v_io_size maxAvailableToWrite) {
|
||||
void Pipe::Writer::setMaxAvailableToWrite(v_io_size maxAvailableToWrite) {
|
||||
m_maxAvailableToWrtie = maxAvailableToWrite;
|
||||
}
|
||||
|
||||
data::v_io_size Pipe::Writer::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size Pipe::Writer::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
|
||||
if(m_maxAvailableToWrtie > -1 && count > m_maxAvailableToWrtie) {
|
||||
count = m_maxAvailableToWrtie;
|
||||
}
|
||||
|
||||
Pipe& pipe = *m_pipe;
|
||||
oatpp::data::v_io_size result;
|
||||
oatpp::v_io_size result;
|
||||
|
||||
if(m_ioMode == oatpp::data::stream::IOMode::ASYNCHRONOUS) {
|
||||
|
||||
@ -128,9 +128,9 @@ data::v_io_size Pipe::Writer::write(const void *data, v_buff_size count, async::
|
||||
result = pipe.m_fifo.write(data, count);
|
||||
} else if (pipe.m_open) {
|
||||
action = async::Action::createWaitListAction(&m_waitList);
|
||||
result = data::IOError::RETRY_WRITE;
|
||||
result = IOError::RETRY_WRITE;
|
||||
} else {
|
||||
result = data::IOError::BROKEN_PIPE;
|
||||
result = IOError::BROKEN_PIPE;
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -141,7 +141,7 @@ data::v_io_size Pipe::Writer::write(const void *data, v_buff_size count, async::
|
||||
if (pipe.m_open && pipe.m_fifo.availableToWrite() > 0) {
|
||||
result = pipe.m_fifo.write(data, count);
|
||||
} else {
|
||||
result = data::IOError::BROKEN_PIPE;
|
||||
result = IOError::BROKEN_PIPE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ public:
|
||||
/*
|
||||
* this one used for testing purposes only
|
||||
*/
|
||||
data::v_io_size m_maxAvailableToRead;
|
||||
v_io_size m_maxAvailableToRead;
|
||||
|
||||
oatpp::async::CoroutineWaitList m_waitList;
|
||||
WaitListListener m_waitListListener;
|
||||
@ -104,7 +104,7 @@ public:
|
||||
* set to -1 in order to ignore this value.<br>
|
||||
* @param maxAvailableToRead - maximum available amount of bytes to read.
|
||||
*/
|
||||
void setMaxAvailableToRead(data::v_io_size maxAvailableToRead);
|
||||
void setMaxAvailableToRead(v_io_size maxAvailableToRead);
|
||||
|
||||
/**
|
||||
* Implements &id:oatpp::data::stream::InputStream::read; method.
|
||||
@ -113,9 +113,9 @@ public:
|
||||
* @param count - max count of bytes to read.
|
||||
* @param action - async specific action. If action is NOT &id:oatpp::async::Action::TYPE_NONE;, then
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - &id:oatpp::data::v_io_size;.
|
||||
* @return - &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set InputStream I/O mode.
|
||||
@ -173,7 +173,7 @@ public:
|
||||
/*
|
||||
* this one used for testing purposes only
|
||||
*/
|
||||
data::v_io_size m_maxAvailableToWrtie;
|
||||
v_io_size m_maxAvailableToWrtie;
|
||||
|
||||
oatpp::async::CoroutineWaitList m_waitList;
|
||||
WaitListListener m_waitListListener;
|
||||
@ -196,7 +196,7 @@ public:
|
||||
* set to -1 in order to ignore this value.<br>
|
||||
* @param maxAvailableToWrite - maximum available amount of bytes to write.
|
||||
*/
|
||||
void setMaxAvailableToWrite(data::v_io_size maxAvailableToWrite);
|
||||
void setMaxAvailableToWrite(v_io_size maxAvailableToWrite);
|
||||
|
||||
/**
|
||||
* Implements &id:oatpp::data::stream::OutputStream::write; method.
|
||||
@ -205,9 +205,9 @@ public:
|
||||
* @param count - data size.
|
||||
* @param action - async specific action. If action is NOT &id:oatpp::async::Action::TYPE_NONE;, then
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - &id:oatpp::data::v_io_size;.
|
||||
* @return - &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set OutputStream I/O mode.
|
||||
|
@ -39,16 +39,16 @@ std::shared_ptr<Socket> Socket::createShared(const std::shared_ptr<Pipe>& pipeIn
|
||||
return std::make_shared<Socket>(pipeIn, pipeOut);
|
||||
}
|
||||
|
||||
void Socket::setMaxAvailableToReadWrtie(data::v_io_size maxToRead, data::v_io_size maxToWrite) {
|
||||
void Socket::setMaxAvailableToReadWrtie(v_io_size maxToRead, v_io_size maxToWrite) {
|
||||
m_pipeIn->getReader()->setMaxAvailableToRead(maxToRead);
|
||||
m_pipeOut->getWriter()->setMaxAvailableToWrite(maxToWrite);
|
||||
}
|
||||
|
||||
data::v_io_size Socket::read(void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size Socket::read(void *data, v_buff_size count, async::Action& action) {
|
||||
return m_pipeIn->getReader()->read(data, count, action);
|
||||
}
|
||||
|
||||
data::v_io_size Socket::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size Socket::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
return m_pipeOut->getWriter()->write(data, count, action);
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ public:
|
||||
* @param maxToRead - maximum available amount of bytes to read.
|
||||
* @param maxToWrite - maximum available amount of bytes to write.
|
||||
*/
|
||||
void setMaxAvailableToReadWrtie(data::v_io_size maxToRead, data::v_io_size maxToWrite);
|
||||
void setMaxAvailableToReadWrtie(v_io_size maxToRead, v_io_size maxToWrite);
|
||||
|
||||
/**
|
||||
* Read data from socket.
|
||||
@ -76,7 +76,7 @@ public:
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual amount of data read from socket.
|
||||
*/
|
||||
data::v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Write data to socket.
|
||||
@ -86,7 +86,7 @@ public:
|
||||
* caller MUST return this action on coroutine iteration.
|
||||
* @return - actual amount of data written to socket.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set OutputStream I/O mode.
|
||||
|
@ -62,14 +62,14 @@ oatpp::async::CoroutineStarterForResult<const std::shared_ptr<oatpp::data::strea
|
||||
class ConnectCoroutine : public oatpp::async::CoroutineWithResult<ConnectCoroutine, const std::shared_ptr<oatpp::data::stream::IOStream>&> {
|
||||
private:
|
||||
std::shared_ptr<virtual_::Interface> m_interface;
|
||||
data::v_io_size m_maxAvailableToRead;
|
||||
data::v_io_size m_maxAvailableToWrite;
|
||||
v_io_size m_maxAvailableToRead;
|
||||
v_io_size m_maxAvailableToWrite;
|
||||
std::shared_ptr<virtual_::Interface::ConnectionSubmission> m_submission;
|
||||
public:
|
||||
|
||||
ConnectCoroutine(const std::shared_ptr<virtual_::Interface>& interface,
|
||||
data::v_io_size maxAvailableToRead,
|
||||
data::v_io_size maxAvailableToWrite)
|
||||
v_io_size maxAvailableToRead,
|
||||
v_io_size maxAvailableToWrite)
|
||||
: m_interface(interface)
|
||||
, m_maxAvailableToRead(maxAvailableToRead)
|
||||
, m_maxAvailableToWrite(maxAvailableToWrite)
|
||||
|
@ -38,8 +38,8 @@ namespace oatpp { namespace network { namespace virtual_ { namespace client {
|
||||
class ConnectionProvider : public oatpp::network::ClientConnectionProvider {
|
||||
private:
|
||||
std::shared_ptr<virtual_::Interface> m_interface;
|
||||
data::v_io_size m_maxAvailableToRead;
|
||||
data::v_io_size m_maxAvailableToWrite;
|
||||
v_io_size m_maxAvailableToRead;
|
||||
v_io_size m_maxAvailableToWrite;
|
||||
public:
|
||||
|
||||
/**
|
||||
@ -61,7 +61,7 @@ public:
|
||||
* @param maxToRead - maximum available amount of bytes to read.
|
||||
* @param maxToWrite - maximum available amount of bytes to write.
|
||||
*/
|
||||
void setSocketMaxAvailableToReadWrtie(data::v_io_size maxToRead, data::v_io_size maxToWrite) {
|
||||
void setSocketMaxAvailableToReadWrtie(v_io_size maxToRead, v_io_size maxToWrite) {
|
||||
m_maxAvailableToRead = maxToRead;
|
||||
m_maxAvailableToWrite = maxToWrite;
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ std::shared_ptr<ConnectionProvider> ConnectionProvider::createShared(const std::
|
||||
return std::make_shared<ConnectionProvider>(interface);
|
||||
}
|
||||
|
||||
void ConnectionProvider::setSocketMaxAvailableToReadWrtie(data::v_io_size maxToRead, data::v_io_size maxToWrite) {
|
||||
void ConnectionProvider::setSocketMaxAvailableToReadWrtie(v_io_size maxToRead, v_io_size maxToWrite) {
|
||||
m_maxAvailableToRead = maxToRead;
|
||||
m_maxAvailableToWrite = maxToWrite;
|
||||
}
|
||||
|
@ -40,8 +40,8 @@ private:
|
||||
std::shared_ptr<virtual_::Interface> m_interface;
|
||||
std::shared_ptr<virtual_::Interface::ListenerLock> m_listenerLock;
|
||||
bool m_open;
|
||||
data::v_io_size m_maxAvailableToRead;
|
||||
data::v_io_size m_maxAvailableToWrite;
|
||||
v_io_size m_maxAvailableToRead;
|
||||
v_io_size m_maxAvailableToWrite;
|
||||
public:
|
||||
|
||||
/**
|
||||
@ -63,7 +63,7 @@ public:
|
||||
* @param maxToRead - maximum available amount of bytes to read.
|
||||
* @param maxToWrite - maximum available amount of bytes to write.
|
||||
*/
|
||||
void setSocketMaxAvailableToReadWrtie(data::v_io_size maxToRead, data::v_io_size maxToWrite);
|
||||
void setSocketMaxAvailableToReadWrtie(v_io_size maxToRead, v_io_size maxToWrite);
|
||||
|
||||
/**
|
||||
* Break accepting loop.
|
||||
|
@ -45,7 +45,7 @@ void Beautifier::writeIndent(ConsistentOutputStream* outputStream) {
|
||||
}
|
||||
}
|
||||
|
||||
data::v_io_size Beautifier::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size Beautifier::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
|
||||
oatpp::data::stream::BufferOutputStream buffer;
|
||||
|
||||
|
@ -63,9 +63,9 @@ public:
|
||||
* @param data - data to write.
|
||||
* @param count - number of bytes to write.
|
||||
* @param action
|
||||
* @return - actual number of bytes written. &id:oatpp::data::v_io_size;.
|
||||
* @return - actual number of bytes written. &id:oatpp::v_io_size;.
|
||||
*/
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set stream I/O mode.
|
||||
|
@ -67,13 +67,13 @@ async::CoroutineStarter AsyncFileStreamProvider::getInputStreamAsync(const std::
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Other functions
|
||||
|
||||
std::shared_ptr<PartReader> createFilePartReader(const oatpp::String& filename, data::v_io_size maxDataSize) {
|
||||
std::shared_ptr<PartReader> createFilePartReader(const oatpp::String& filename, v_io_size maxDataSize) {
|
||||
auto provider = std::make_shared<FileStreamProvider>(filename);
|
||||
auto reader = std::make_shared<StreamPartReader>(provider, maxDataSize);
|
||||
return reader;
|
||||
}
|
||||
|
||||
std::shared_ptr<AsyncPartReader> createAsyncFilePartReader(const oatpp::String& filename, data::v_io_size maxDataSize) {
|
||||
std::shared_ptr<AsyncPartReader> createAsyncFilePartReader(const oatpp::String& filename, v_io_size maxDataSize) {
|
||||
auto provider = std::make_shared<AsyncFileStreamProvider>(filename);
|
||||
auto reader = std::make_shared<AsyncStreamPartReader>(provider, maxDataSize);
|
||||
return reader;
|
||||
|
@ -125,7 +125,7 @@ public:
|
||||
* @param maxDataSize - max size of the received data. put `-1` for no-limit.
|
||||
* @return - `std::shared_ptr` to &id:oatpp::web::mime::multipart::PartReader;.
|
||||
*/
|
||||
std::shared_ptr<PartReader> createFilePartReader(const oatpp::String& filename, data::v_io_size maxDataSize = -1);
|
||||
std::shared_ptr<PartReader> createFilePartReader(const oatpp::String& filename, v_io_size maxDataSize = -1);
|
||||
|
||||
/**
|
||||
* Create async file part reader. <br>
|
||||
@ -134,7 +134,7 @@ std::shared_ptr<PartReader> createFilePartReader(const oatpp::String& filename,
|
||||
* @param maxDataSize - max size of the received data. put `-1` for no-limit.
|
||||
* @return - `std::shared_ptr` to &id:oatpp::web::mime::multipart::AsyncPartReader;.
|
||||
*/
|
||||
std::shared_ptr<AsyncPartReader> createAsyncFilePartReader(const oatpp::String& filename, data::v_io_size maxDataSize = -1);
|
||||
std::shared_ptr<AsyncPartReader> createAsyncFilePartReader(const oatpp::String& filename, v_io_size maxDataSize = -1);
|
||||
|
||||
}}}}
|
||||
|
||||
|
@ -30,7 +30,7 @@ namespace oatpp { namespace web { namespace mime { namespace multipart {
|
||||
|
||||
const char* const InMemoryPartReader::TAG_NAME = "[oatpp::web::mime::multipart::InMemoryPartReader::TAG]";
|
||||
|
||||
InMemoryPartReader::InMemoryPartReader(data::v_io_size maxDataSize)
|
||||
InMemoryPartReader::InMemoryPartReader(v_io_size maxDataSize)
|
||||
: m_maxDataSize(maxDataSize)
|
||||
{}
|
||||
|
||||
@ -48,7 +48,7 @@ void InMemoryPartReader::onNewPart(const std::shared_ptr<Part>& part) {
|
||||
|
||||
}
|
||||
|
||||
void InMemoryPartReader::onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) {
|
||||
void InMemoryPartReader::onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) {
|
||||
|
||||
auto tag = part->getTagObject();
|
||||
if(!tag) {
|
||||
@ -83,7 +83,7 @@ void InMemoryPartReader::onPartData(const std::shared_ptr<Part>& part, p_char8 d
|
||||
|
||||
const char* const AsyncInMemoryPartReader::TAG_NAME = "[oatpp::web::mime::multipart::AsyncInMemoryPartReader::TAG]";
|
||||
|
||||
AsyncInMemoryPartReader::AsyncInMemoryPartReader(data::v_io_size maxDataSize)
|
||||
AsyncInMemoryPartReader::AsyncInMemoryPartReader(v_io_size maxDataSize)
|
||||
: m_maxDataSize(maxDataSize)
|
||||
{}
|
||||
|
||||
@ -103,7 +103,7 @@ async::CoroutineStarter AsyncInMemoryPartReader::onNewPartAsync(const std::share
|
||||
|
||||
}
|
||||
|
||||
async::CoroutineStarter AsyncInMemoryPartReader::onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) {
|
||||
async::CoroutineStarter AsyncInMemoryPartReader::onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) {
|
||||
|
||||
auto tag = part->getTagObject();
|
||||
if(!tag) {
|
||||
@ -139,12 +139,12 @@ async::CoroutineStarter AsyncInMemoryPartReader::onPartDataAsync(const std::shar
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Other functions
|
||||
|
||||
std::shared_ptr<PartReader> createInMemoryPartReader(data::v_io_size maxDataSize) {
|
||||
std::shared_ptr<PartReader> createInMemoryPartReader(v_io_size maxDataSize) {
|
||||
return std::make_shared<InMemoryPartReader>(maxDataSize);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<AsyncPartReader> createAsyncInMemoryPartReader(data::v_io_size maxDataSize) {
|
||||
std::shared_ptr<AsyncPartReader> createAsyncInMemoryPartReader(v_io_size maxDataSize) {
|
||||
return std::make_shared<AsyncInMemoryPartReader>(maxDataSize);
|
||||
}
|
||||
|
||||
|
@ -36,14 +36,14 @@ class InMemoryPartReader : public PartReader {
|
||||
private:
|
||||
static const char* const TAG_NAME;
|
||||
private:
|
||||
oatpp::data::v_io_size m_maxDataSize;
|
||||
oatpp::v_io_size m_maxDataSize;
|
||||
public:
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param maxDataSize
|
||||
*/
|
||||
InMemoryPartReader(data::v_io_size maxDataSize = 64 * 1024);
|
||||
InMemoryPartReader(v_io_size maxDataSize = 64 * 1024);
|
||||
|
||||
/**
|
||||
* Called when new part headers are parsed and part object is created.
|
||||
@ -58,7 +58,7 @@ public:
|
||||
* @param data - pointer to buffer containing chunk data.
|
||||
* @param size - size of the buffer.
|
||||
*/
|
||||
void onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) override;
|
||||
void onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) override;
|
||||
|
||||
};
|
||||
|
||||
@ -69,14 +69,14 @@ class AsyncInMemoryPartReader : public AsyncPartReader {
|
||||
private:
|
||||
static const char* const TAG_NAME;
|
||||
private:
|
||||
oatpp::data::v_io_size m_maxDataSize;
|
||||
oatpp::v_io_size m_maxDataSize;
|
||||
public:
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param maxDataSize
|
||||
*/
|
||||
AsyncInMemoryPartReader(data::v_io_size maxDataSize = 64 * 1024);
|
||||
AsyncInMemoryPartReader(v_io_size maxDataSize = 64 * 1024);
|
||||
|
||||
/**
|
||||
* Called when new part headers are parsed and part object is created.
|
||||
@ -93,7 +93,7 @@ public:
|
||||
* @param size - size of the buffer.
|
||||
* @return - &id:oatpp::async::CoroutineStarter;.
|
||||
*/
|
||||
async::CoroutineStarter onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) override;
|
||||
async::CoroutineStarter onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) override;
|
||||
|
||||
};
|
||||
|
||||
@ -102,14 +102,14 @@ public:
|
||||
* @param maxDataSize - max size of the received data.
|
||||
* @return - `std::shared_ptr` to &id:oatpp::web::mime::multipart::PartReader;.
|
||||
*/
|
||||
std::shared_ptr<PartReader> createInMemoryPartReader(data::v_io_size maxDataSize = 64 * 1024);
|
||||
std::shared_ptr<PartReader> createInMemoryPartReader(v_io_size maxDataSize = 64 * 1024);
|
||||
|
||||
/**
|
||||
* Create in-memory part reader. <br>
|
||||
* @param maxDataSize - max size of the received data.
|
||||
* @return - `std::shared_ptr` to &id:oatpp::web::mime::multipart::AsyncPartReader;.
|
||||
*/
|
||||
std::shared_ptr<AsyncPartReader> createAsyncInMemoryPartReader(data::v_io_size maxDataSize = 64 * 1024);
|
||||
std::shared_ptr<AsyncPartReader> createAsyncInMemoryPartReader(v_io_size maxDataSize = 64 * 1024);
|
||||
|
||||
}}}}
|
||||
|
||||
|
@ -141,7 +141,7 @@ Reader::Reader(Multipart* multipart)
|
||||
, m_parser(multipart->getBoundary(), m_partsParser, nullptr)
|
||||
{}
|
||||
|
||||
data::v_io_size Reader::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size Reader::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
data::buffer::InlineWriteData inlineData(data, count);
|
||||
m_parser.parseNext(inlineData, action);
|
||||
return count - inlineData.bytesLeft;
|
||||
@ -164,7 +164,7 @@ AsyncReader::AsyncReader(const std::shared_ptr<Multipart>& multipart)
|
||||
, m_multipart(multipart)
|
||||
{}
|
||||
|
||||
data::v_io_size AsyncReader::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
v_io_size AsyncReader::write(const void *data, v_buff_size count, async::Action& action) {
|
||||
|
||||
data::buffer::InlineWriteData inlineData(data, count);
|
||||
while(inlineData.bytesLeft > 0 && !m_parser.finished() && action.isNone()) {
|
||||
|
@ -56,7 +56,7 @@ public:
|
||||
* @param data - pointer to buffer containing chunk data.
|
||||
* @param size - size of the buffer.
|
||||
*/
|
||||
virtual void onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) = 0;
|
||||
virtual void onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) = 0;
|
||||
|
||||
};
|
||||
|
||||
@ -86,7 +86,7 @@ public:
|
||||
* @param size - size of the buffer.
|
||||
* @return - &id:oatpp::async::CoroutineStarter;.
|
||||
*/
|
||||
virtual async::CoroutineStarter onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) = 0;
|
||||
virtual async::CoroutineStarter onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) = 0;
|
||||
|
||||
};
|
||||
|
||||
@ -184,7 +184,7 @@ public:
|
||||
*/
|
||||
Reader(Multipart* multipart);
|
||||
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set named part reader. <br>
|
||||
@ -220,7 +220,7 @@ public:
|
||||
*/
|
||||
AsyncReader(const std::shared_ptr<Multipart>& multipart);
|
||||
|
||||
data::v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
v_io_size write(const void *data, v_buff_size count, async::Action& action) override;
|
||||
|
||||
/**
|
||||
* Set named part reader. <br>
|
||||
|
@ -138,7 +138,7 @@ StatefulParser::ListenerCall StatefulParser::parseNext_Boundary(data::buffer::In
|
||||
auto size = inlineData.bytesLeft;
|
||||
|
||||
p_char8 sampleData = m_nextBoundarySample->getData();
|
||||
data::v_io_size sampleSize = m_nextBoundarySample->getSize();
|
||||
v_io_size sampleSize = m_nextBoundarySample->getSize();
|
||||
|
||||
if (m_currPartIndex == 0) {
|
||||
sampleData = m_firstBoundarySample->getData();
|
||||
@ -148,7 +148,7 @@ StatefulParser::ListenerCall StatefulParser::parseNext_Boundary(data::buffer::In
|
||||
sampleSize = m_nextBoundarySample->getSize();
|
||||
}
|
||||
|
||||
data::v_io_size checkSize = sampleSize - m_currBoundaryCharIndex;
|
||||
v_io_size checkSize = sampleSize - m_currBoundaryCharIndex;
|
||||
if(checkSize > size) {
|
||||
checkSize = size;
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ private:
|
||||
|
||||
v_int32 callType;
|
||||
p_char8 data;
|
||||
data::v_io_size size;
|
||||
v_io_size size;
|
||||
|
||||
void setOnHeadersCall();
|
||||
void setOnDataCall(p_char8 pData, v_buff_size pSize);
|
||||
|
@ -29,7 +29,7 @@ namespace oatpp { namespace web { namespace mime { namespace multipart {
|
||||
const char* const StreamPartReader::TAG_NAME = "[oatpp::web::mime::multipart::StreamPartReader::TAG]";
|
||||
|
||||
StreamPartReader::StreamPartReader(const std::shared_ptr<PartReaderStreamProvider>& streamProvider,
|
||||
data::v_io_size maxDataSize)
|
||||
v_io_size maxDataSize)
|
||||
: m_streamProvider(streamProvider)
|
||||
, m_maxDataSize(maxDataSize)
|
||||
{}
|
||||
@ -53,7 +53,7 @@ void StreamPartReader::onNewPart(const std::shared_ptr<Part>& part) {
|
||||
|
||||
}
|
||||
|
||||
void StreamPartReader::onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) {
|
||||
void StreamPartReader::onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) {
|
||||
|
||||
auto tag = part->getTagObject();
|
||||
if(!tag) {
|
||||
@ -92,7 +92,7 @@ void StreamPartReader::onPartData(const std::shared_ptr<Part>& part, p_char8 dat
|
||||
const char* const AsyncStreamPartReader::TAG_NAME = "[oatpp::web::mime::multipart::AsyncStreamPartReader::TAG]";
|
||||
|
||||
AsyncStreamPartReader::AsyncStreamPartReader(const std::shared_ptr<AsyncPartReaderStreamProvider>& streamProvider,
|
||||
data::v_io_size maxDataSize)
|
||||
v_io_size maxDataSize)
|
||||
: m_streamProvider(streamProvider)
|
||||
, m_maxDataSize(maxDataSize)
|
||||
{}
|
||||
@ -142,7 +142,7 @@ async::CoroutineStarter AsyncStreamPartReader::onNewPartAsync(const std::shared_
|
||||
|
||||
}
|
||||
|
||||
async::CoroutineStarter AsyncStreamPartReader::onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) {
|
||||
async::CoroutineStarter AsyncStreamPartReader::onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) {
|
||||
|
||||
auto tag = part->getTagObject();
|
||||
if(!tag) {
|
||||
|
@ -121,13 +121,13 @@ private:
|
||||
|
||||
class TagObject : public oatpp::base::Countable {
|
||||
public:
|
||||
data::v_io_size size = 0;
|
||||
v_io_size size = 0;
|
||||
std::shared_ptr<oatpp::data::stream::OutputStream> outputStream;
|
||||
};
|
||||
|
||||
private:
|
||||
std::shared_ptr<PartReaderStreamProvider> m_streamProvider;
|
||||
data::v_io_size m_maxDataSize;
|
||||
v_io_size m_maxDataSize;
|
||||
public:
|
||||
|
||||
/**
|
||||
@ -136,7 +136,7 @@ public:
|
||||
* @param maxDataSize - use `-1` for no limit.
|
||||
*/
|
||||
StreamPartReader(const std::shared_ptr<PartReaderStreamProvider>& streamProvider,
|
||||
data::v_io_size maxDataSize = -1);
|
||||
v_io_size maxDataSize = -1);
|
||||
|
||||
/**
|
||||
* Called when new part headers are parsed and part object is created.
|
||||
@ -151,7 +151,7 @@ public:
|
||||
* @param data - pointer to buffer containing chunk data.
|
||||
* @param size - size of the buffer.
|
||||
*/
|
||||
void onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) override;
|
||||
void onPartData(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) override;
|
||||
|
||||
};
|
||||
|
||||
@ -165,7 +165,7 @@ private:
|
||||
|
||||
class TagObject : public oatpp::base::Countable {
|
||||
public:
|
||||
data::v_io_size size = 0;
|
||||
v_io_size size = 0;
|
||||
std::shared_ptr<oatpp::data::stream::OutputStream> outputStream;
|
||||
};
|
||||
|
||||
@ -173,7 +173,7 @@ private:
|
||||
async::CoroutineStarter onPartDone(const std::shared_ptr<Part>& part);
|
||||
private:
|
||||
std::shared_ptr<AsyncPartReaderStreamProvider> m_streamProvider;
|
||||
data::v_io_size m_maxDataSize;
|
||||
v_io_size m_maxDataSize;
|
||||
public:
|
||||
|
||||
/**
|
||||
@ -182,7 +182,7 @@ public:
|
||||
* @param maxDataSize - use `-1` for no limit.
|
||||
*/
|
||||
AsyncStreamPartReader(const std::shared_ptr<AsyncPartReaderStreamProvider>& streamProvider,
|
||||
data::v_io_size maxDataSize = -1);
|
||||
v_io_size maxDataSize = -1);
|
||||
|
||||
/**
|
||||
* Called when new part headers are parsed and part object is created.
|
||||
@ -199,7 +199,7 @@ public:
|
||||
* @param size - size of the buffer.
|
||||
* @return - &id:oatpp::async::CoroutineStarter;.
|
||||
*/
|
||||
async::CoroutineStarter onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::data::v_io_size size) override;
|
||||
async::CoroutineStarter onPartDataAsync(const std::shared_ptr<Part>& part, p_char8 data, oatpp::v_io_size size) override;
|
||||
|
||||
};
|
||||
|
||||
|
@ -26,13 +26,13 @@
|
||||
|
||||
namespace oatpp { namespace web { namespace protocol {
|
||||
|
||||
CommunicationError::CommunicationError(oatpp::data::v_io_size ioStatus, const oatpp::String& message)
|
||||
CommunicationError::CommunicationError(oatpp::v_io_size ioStatus, const oatpp::String& message)
|
||||
:std::runtime_error(message->std_str())
|
||||
, m_ioStatus(ioStatus)
|
||||
, m_message(message)
|
||||
{}
|
||||
|
||||
oatpp::data::v_io_size CommunicationError::getIOStatus() {
|
||||
oatpp::v_io_size CommunicationError::getIOStatus() {
|
||||
return m_ioStatus;
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef oatpp_web_protocol_CommunicationError_hpp
|
||||
#define oatpp_web_protocol_CommunicationError_hpp
|
||||
|
||||
#include "oatpp/core/data/IODefinitions.hpp"
|
||||
#include "oatpp/core/IODefinitions.hpp"
|
||||
|
||||
namespace oatpp { namespace web { namespace protocol {
|
||||
|
||||
@ -34,22 +34,22 @@ namespace oatpp { namespace web { namespace protocol {
|
||||
*/
|
||||
class CommunicationError : public std::runtime_error {
|
||||
private:
|
||||
oatpp::data::v_io_size m_ioStatus;
|
||||
oatpp::v_io_size m_ioStatus;
|
||||
oatpp::String m_message;
|
||||
public:
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param ioStatus - I/O error. See &id:oatpp::data::v_io_size;.
|
||||
* @param ioStatus - I/O error. See &id:oatpp::v_io_size;.
|
||||
* @param message - error message.
|
||||
*/
|
||||
CommunicationError(oatpp::data::v_io_size ioStatus, const oatpp::String& message);
|
||||
CommunicationError(oatpp::v_io_size ioStatus, const oatpp::String& message);
|
||||
|
||||
/**
|
||||
* Get I/O error. See &id:oatpp::data::v_io_size;.
|
||||
* @return &id:oatpp::data::v_io_size;.
|
||||
* Get I/O error. See &id:oatpp::v_io_size;.
|
||||
* @return &id:oatpp::v_io_size;.
|
||||
*/
|
||||
oatpp::data::v_io_size getIOStatus();
|
||||
oatpp::v_io_size getIOStatus();
|
||||
|
||||
/**
|
||||
* Get error message.
|
||||
@ -74,10 +74,10 @@ struct ProtocolErrorInfo {
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param pIOStatus - I/O level error. See &id:oatpp::data::v_io_size;.
|
||||
* @param pIOStatus - I/O level error. See &id:oatpp::v_io_size;.
|
||||
* @param pStatus - configurable arbitrary data type.
|
||||
*/
|
||||
ProtocolErrorInfo(oatpp::data::v_io_size pIOStatus, const Status& pStatus)
|
||||
ProtocolErrorInfo(oatpp::v_io_size pIOStatus, const Status& pStatus)
|
||||
: ioStatus(pIOStatus)
|
||||
, status(pStatus)
|
||||
{}
|
||||
@ -85,7 +85,7 @@ struct ProtocolErrorInfo {
|
||||
/**
|
||||
* Get I/O level error.
|
||||
*/
|
||||
oatpp::data::v_io_size ioStatus;
|
||||
oatpp::v_io_size ioStatus;
|
||||
|
||||
/**
|
||||
* Configurable arbitrary data type.
|
||||
@ -135,7 +135,7 @@ public:
|
||||
* @tparam Status - arbitrary data type.
|
||||
*/
|
||||
template<class Status>
|
||||
class AsyncProtocolError : public oatpp::data::AsyncIOError {
|
||||
class AsyncProtocolError : public oatpp::AsyncIOError {
|
||||
public:
|
||||
/**
|
||||
* Cenvenience typedef for ProtocolErrorInfo
|
||||
@ -152,7 +152,7 @@ public:
|
||||
* @param message - error message.
|
||||
*/
|
||||
AsyncProtocolError(const Info& info, const oatpp::String& message)
|
||||
: oatpp::data::AsyncIOError("AsyncProtocolError", info.ioStatus)
|
||||
: oatpp::AsyncIOError("AsyncProtocolError", info.ioStatus)
|
||||
, m_info(info)
|
||||
, m_message(message)
|
||||
{}
|
||||
|
@ -31,7 +31,7 @@ namespace oatpp { namespace web { namespace protocol { namespace http { namespac
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// EncoderChunked
|
||||
|
||||
data::v_io_size EncoderChunked::suggestInputStreamReadSize() {
|
||||
v_io_size EncoderChunked::suggestInputStreamReadSize() {
|
||||
return 32767;
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ DecoderChunked::DecoderChunked()
|
||||
, m_lastFlush(0)
|
||||
{}
|
||||
|
||||
data::v_io_size DecoderChunked::suggestInputStreamReadSize() {
|
||||
v_io_size DecoderChunked::suggestInputStreamReadSize() {
|
||||
if(m_currentChunkSize > 0) {
|
||||
return m_currentChunkSize;
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ private:
|
||||
bool m_writeChunkHeader = true;
|
||||
bool m_firstChunk = true;
|
||||
bool m_finished = false;
|
||||
data::v_io_size m_lastFlush = 0;
|
||||
v_io_size m_lastFlush = 0;
|
||||
public:
|
||||
|
||||
/**
|
||||
@ -47,7 +47,7 @@ public:
|
||||
* the client MAY ask the processor for a suggested read size.
|
||||
* @return - suggested read size.
|
||||
*/
|
||||
data::v_io_size suggestInputStreamReadSize() override;
|
||||
v_io_size suggestInputStreamReadSize() override;
|
||||
|
||||
/**
|
||||
* Process data.
|
||||
@ -68,10 +68,10 @@ public:
|
||||
static constexpr v_int32 ERROR_CHUNK_HEADER_TOO_LONG = 100;
|
||||
private:
|
||||
data::stream::BufferOutputStream m_chunkHeaderBuffer;
|
||||
data::v_io_size m_currentChunkSize;
|
||||
v_io_size m_currentChunkSize;
|
||||
bool m_firstChunk;
|
||||
bool m_finished;
|
||||
data::v_io_size m_lastFlush;
|
||||
v_io_size m_lastFlush;
|
||||
private:
|
||||
v_int32 readHeader(data::buffer::InlineReadData& dataIn);
|
||||
public:
|
||||
@ -86,7 +86,7 @@ public:
|
||||
* the client MAY ask the processor for a suggested read size.
|
||||
* @return - suggested read size.
|
||||
*/
|
||||
data::v_io_size suggestInputStreamReadSize() override;
|
||||
v_io_size suggestInputStreamReadSize() override;
|
||||
|
||||
/**
|
||||
* Process data.
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
namespace oatpp { namespace web { namespace protocol { namespace http { namespace incoming {
|
||||
|
||||
data::v_io_size RequestHeadersReader::readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
v_io_size RequestHeadersReader::readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
data::stream::InputStreamBufferedProxy* stream,
|
||||
async::Action& action)
|
||||
{
|
||||
@ -85,7 +85,7 @@ RequestHeadersReader::Result RequestHeadersReader::readHeaders(data::stream::Inp
|
||||
|
||||
if(error.ioStatus > 0) {
|
||||
continue;
|
||||
} else if(error.ioStatus == data::IOError::RETRY_READ || error.ioStatus == data::IOError::RETRY_WRITE) {
|
||||
} else if(error.ioStatus == IOError::RETRY_READ || error.ioStatus == IOError::RETRY_WRITE) {
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
@ -142,7 +142,7 @@ RequestHeadersReader::readHeadersAsync(const std::shared_ptr<data::stream::Input
|
||||
|
||||
if (res > 0) {
|
||||
return repeat();
|
||||
} else if (res == data::IOError::RETRY_READ || res == data::IOError::RETRY_WRITE) {
|
||||
} else if (res == IOError::RETRY_READ || res == IOError::RETRY_WRITE) {
|
||||
return repeat();
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ private:
|
||||
};
|
||||
|
||||
private:
|
||||
data::v_io_size readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
v_io_size readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
data::stream::InputStreamBufferedProxy* stream,
|
||||
async::Action& action);
|
||||
private:
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
namespace oatpp { namespace web { namespace protocol { namespace http { namespace incoming {
|
||||
|
||||
data::v_io_size ResponseHeadersReader::readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
v_io_size ResponseHeadersReader::readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
const std::shared_ptr<oatpp::data::stream::IOStream>& connection,
|
||||
data::stream::ConsistentOutputStream* bufferStream,
|
||||
Result& result,
|
||||
@ -85,7 +85,7 @@ ResponseHeadersReader::Result ResponseHeadersReader::readHeaders(const std::shar
|
||||
|
||||
if(error.ioStatus > 0) {
|
||||
continue;
|
||||
} else if(error.ioStatus == data::IOError::RETRY_READ || error.ioStatus == data::IOError::RETRY_WRITE) {
|
||||
} else if(error.ioStatus == IOError::RETRY_READ || error.ioStatus == IOError::RETRY_WRITE) {
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
@ -143,7 +143,7 @@ ResponseHeadersReader::readHeadersAsync(const std::shared_ptr<oatpp::data::strea
|
||||
|
||||
if (res > 0) {
|
||||
return repeat();
|
||||
} else if (res == data::IOError::RETRY_READ || res == data::IOError::RETRY_WRITE) {
|
||||
} else if (res == IOError::RETRY_READ || res == IOError::RETRY_WRITE) {
|
||||
return repeat();
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ private:
|
||||
|
||||
private:
|
||||
|
||||
data::v_io_size readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
v_io_size readHeadersSectionIterative(ReadHeadersIteration& iteration,
|
||||
const std::shared_ptr<oatpp::data::stream::IOStream>& connection,
|
||||
data::stream::ConsistentOutputStream* bufferStream,
|
||||
Result& result,
|
||||
|
@ -74,7 +74,7 @@ public:
|
||||
/**
|
||||
* Should return the known size of the body (if known).
|
||||
* If body size is unknown then should return -1.
|
||||
* @return - &id:oatpp::data::v_io_size;.
|
||||
* @return - &id:oatpp::v_io_size;.
|
||||
*/
|
||||
virtual v_buff_size getKnownSize() = 0;
|
||||
|
||||
|
@ -73,7 +73,7 @@ public:
|
||||
|
||||
/**
|
||||
* Body size of chunked body is unknown.
|
||||
* @return - `-1`. &id:oatpp::data::v_io_size;.
|
||||
* @return - `-1`. &id:oatpp::v_io_size;.
|
||||
*/
|
||||
v_buff_size getKnownSize() override;
|
||||
|
||||
|
@ -38,7 +38,7 @@ MultipartBody::MultipartReadCallback::MultipartReadCallback(const std::shared_pt
|
||||
, m_readStream(nullptr, nullptr, 0)
|
||||
{}
|
||||
|
||||
data::v_io_size MultipartBody::MultipartReadCallback::readBody(void *buffer, v_buff_size count, async::Action& action) {
|
||||
v_io_size MultipartBody::MultipartReadCallback::readBody(void *buffer, v_buff_size count, async::Action& action) {
|
||||
auto& part = *m_iterator;
|
||||
const auto& stream = part->getInputStream();
|
||||
if(!stream) {
|
||||
@ -53,16 +53,16 @@ data::v_io_size MultipartBody::MultipartReadCallback::readBody(void *buffer, v_b
|
||||
return res;
|
||||
}
|
||||
|
||||
data::v_io_size MultipartBody::MultipartReadCallback::read(void *buffer, v_buff_size count, async::Action& action) {
|
||||
v_io_size MultipartBody::MultipartReadCallback::read(void *buffer, v_buff_size count, async::Action& action) {
|
||||
|
||||
if(m_state == STATE_FINISHED) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
p_char8 currBufferPtr = (p_char8) buffer;
|
||||
data::v_io_size bytesLeft = count;
|
||||
v_io_size bytesLeft = count;
|
||||
|
||||
data::v_io_size res = 0;
|
||||
v_io_size res = 0;
|
||||
|
||||
while(bytesLeft > 0 && action.isNone()) {
|
||||
|
||||
@ -114,7 +114,7 @@ data::v_io_size MultipartBody::MultipartReadCallback::read(void *buffer, v_buff_
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// MultipartBody
|
||||
|
||||
data::v_io_size MultipartBody::readBoundary(const std::shared_ptr<Multipart>& multipart,
|
||||
v_io_size MultipartBody::readBoundary(const std::shared_ptr<Multipart>& multipart,
|
||||
std::list<std::shared_ptr<Part>>::const_iterator& iterator,
|
||||
data::stream::BufferInputStream& readStream,
|
||||
void *buffer,
|
||||
@ -144,7 +144,7 @@ data::v_io_size MultipartBody::readBoundary(const std::shared_ptr<Multipart>& mu
|
||||
return res;
|
||||
}
|
||||
|
||||
data::v_io_size MultipartBody::readHeaders(const std::shared_ptr<Multipart>& multipart,
|
||||
v_io_size MultipartBody::readHeaders(const std::shared_ptr<Multipart>& multipart,
|
||||
std::list<std::shared_ptr<Part>>::const_iterator& iterator,
|
||||
data::stream::BufferInputStream& readStream,
|
||||
void *buffer,
|
||||
|
@ -62,13 +62,13 @@ private:
|
||||
static constexpr v_int32 STATE_FINISHED = 4;
|
||||
|
||||
private:
|
||||
static data::v_io_size readBoundary(const std::shared_ptr<Multipart>& multipart,
|
||||
static v_io_size readBoundary(const std::shared_ptr<Multipart>& multipart,
|
||||
std::list<std::shared_ptr<Part>>::const_iterator& iterator,
|
||||
data::stream::BufferInputStream& readStream,
|
||||
void *buffer,
|
||||
v_buff_size count);
|
||||
|
||||
static data::v_io_size readHeaders(const std::shared_ptr<Multipart>& multipart,
|
||||
static v_io_size readHeaders(const std::shared_ptr<Multipart>& multipart,
|
||||
std::list<std::shared_ptr<Part>>::const_iterator& iterator,
|
||||
data::stream::BufferInputStream& readStream,
|
||||
void *buffer,
|
||||
@ -82,11 +82,11 @@ private:
|
||||
v_int32 m_state;
|
||||
oatpp::data::stream::BufferInputStream m_readStream;
|
||||
private:
|
||||
data::v_io_size readBody(void *buffer, v_buff_size count, async::Action& action);
|
||||
v_io_size readBody(void *buffer, v_buff_size count, async::Action& action);
|
||||
public:
|
||||
|
||||
MultipartReadCallback(const std::shared_ptr<Multipart>& multipart);
|
||||
data::v_io_size read(void *buffer, v_buff_size count, async::Action& action) override;
|
||||
v_io_size read(void *buffer, v_buff_size count, async::Action& action) override;
|
||||
|
||||
};
|
||||
|
||||
|
@ -272,9 +272,9 @@ HttpProcessor::Coroutine::Action HttpProcessor::Coroutine::handleError(Error* er
|
||||
|
||||
if(error) {
|
||||
|
||||
if(error->is<oatpp::data::AsyncIOError>()) {
|
||||
auto aioe = static_cast<oatpp::data::AsyncIOError*>(error);
|
||||
if(aioe->getCode() == oatpp::data::IOError::BROKEN_PIPE) {
|
||||
if(error->is<oatpp::AsyncIOError>()) {
|
||||
auto aioe = static_cast<oatpp::AsyncIOError*>(error);
|
||||
if(aioe->getCode() == oatpp::IOError::BROKEN_PIPE) {
|
||||
return aioe; // do not report BROKEN_PIPE error
|
||||
}
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ public:
|
||||
: m_bufferSize(bufferSize)
|
||||
{}
|
||||
|
||||
oatpp::data::v_io_size suggestInputStreamReadSize() override {
|
||||
oatpp::v_io_size suggestInputStreamReadSize() override {
|
||||
return m_bufferSize;
|
||||
}
|
||||
|
||||
|
@ -36,11 +36,11 @@ typedef oatpp::network::ConnectionPool ConnectionPool;
|
||||
class StubStream : public oatpp::data::stream::IOStream, public oatpp::base::Countable {
|
||||
public:
|
||||
|
||||
data::v_io_size write(const void *buff, v_buff_size count, async::Action& actions) override {
|
||||
v_io_size write(const void *buff, v_buff_size count, async::Action& actions) override {
|
||||
throw std::runtime_error("It's a stub!");
|
||||
}
|
||||
|
||||
data::v_io_size read(void *buff, v_buff_size count, async::Action& action) override {
|
||||
v_io_size read(void *buff, v_buff_size count, async::Action& action) override {
|
||||
throw std::runtime_error("It's a stub!");
|
||||
}
|
||||
|
||||
|
@ -153,8 +153,8 @@ public:
|
||||
}
|
||||
|
||||
Action handleError(Error* error) override {
|
||||
if(error->is<oatpp::data::AsyncIOError>()) {
|
||||
auto e = static_cast<oatpp::data::AsyncIOError*>(error);
|
||||
if(error->is<oatpp::AsyncIOError>()) {
|
||||
auto e = static_cast<oatpp::AsyncIOError*>(error);
|
||||
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_getRootAsync::handleError()]", "AsyncIOError. %s, %d", e->what(), e->getCode());
|
||||
} else {
|
||||
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_getRootAsync::handleError()]", "Error. %s", error->what());
|
||||
@ -195,8 +195,8 @@ public:
|
||||
}
|
||||
|
||||
Action handleError(Error* error) override {
|
||||
if(error->is<oatpp::data::AsyncIOError>()) {
|
||||
auto e = static_cast<oatpp::data::AsyncIOError*>(error);
|
||||
if(error->is<oatpp::AsyncIOError>()) {
|
||||
auto e = static_cast<oatpp::AsyncIOError*>(error);
|
||||
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_postBodyAsync::handleError()]", "AsyncIOError. %s, %d", e->what(), e->getCode());
|
||||
} else {
|
||||
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_postBodyAsync::handleError()]", "Error. %s", error->what());
|
||||
@ -242,8 +242,8 @@ public:
|
||||
|
||||
Action handleError(Error* error) override {
|
||||
if(error) {
|
||||
if(error->is<oatpp::data::AsyncIOError>()) {
|
||||
auto e = static_cast<oatpp::data::AsyncIOError*>(error);
|
||||
if(error->is<oatpp::AsyncIOError>()) {
|
||||
auto e = static_cast<oatpp::AsyncIOError*>(error);
|
||||
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_echoBodyAsync::handleError()]", "AsyncIOError. %s, %d", e->what(), e->getCode());
|
||||
} else {
|
||||
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_echoBodyAsync::handleError()]", "Error. %s", error->what());
|
||||
|
@ -177,7 +177,7 @@ public:
|
||||
, m_iterations(iterations)
|
||||
{}
|
||||
|
||||
data::v_io_size read(void *buffer, v_buff_size count, async::Action& action) override {
|
||||
v_io_size read(void *buffer, v_buff_size count, async::Action& action) override {
|
||||
(void)count;
|
||||
if(m_counter < m_iterations) {
|
||||
std::memcpy(buffer, m_text->getData(), m_text->getSize());
|
||||
|
@ -151,7 +151,7 @@ public:
|
||||
, m_iterations(iterations)
|
||||
{}
|
||||
|
||||
data::v_io_size read(void *buffer, v_buff_size count, async::Action& action) override {
|
||||
v_io_size read(void *buffer, v_buff_size count, async::Action& action) override {
|
||||
if(m_counter < m_iterations) {
|
||||
std::memcpy(buffer, m_text->getData(), m_text->getSize());
|
||||
m_counter ++;
|
||||
|
@ -65,7 +65,7 @@ namespace {
|
||||
|
||||
oatpp::data::stream::BufferInputStream stream(text.getPtr(), text->getData(), text->getSize());
|
||||
std::unique_ptr<v_char8> buffer(new v_char8[step]);
|
||||
data::v_io_size size;
|
||||
v_io_size size;
|
||||
while((size = stream.readSimple(buffer.get(), step)) != 0) {
|
||||
oatpp::data::buffer::InlineWriteData inlineData(buffer.get(), size);
|
||||
while(inlineData.bytesLeft > 0 && !parser.finished()) {
|
||||
|
Loading…
Reference in New Issue
Block a user