11 #include <l4/cxx/unique_ptr> 15 #include <l4/l4virtio/virtio.h> 16 #include <l4/l4virtio/virtio_block.h> 17 #include <l4/l4virtio/server/l4virtio> 18 #include <l4/sys/cxx/ipc_epiface> 27 template<
typename Ds_data>
43 Data_block() =
default;
68 rp.
start(_mem_list, _request, &data);
70 unsigned total = data.len;
76 rp.
next(_mem_list, &data);
86 if (total < Header_size + 1)
89 return total - Header_size - 1;
98 while (_data.len == 0 && _rp.has_more())
99 _rp.next(_mem_list, &_data);
102 return (_data.len > 1 || _rp.has_more());
121 "No more data blocks in virtio request");
123 if (_todo_blocks == 0)
127 _rp.next(_mem_list, &_data);
130 if (_data.len > _max_block_size)
139 _data.addr =
static_cast<char *
>(_data.addr) + out.len;
154 : _mem_list(mem_list),
156 _todo_blocks(max_blocks),
157 _max_block_size(max_block_size)
160 _rp.start(mem_list, _request, &_data);
163 if (_data.len < Header_size)
168 _data.addr =
static_cast<char *
>(_data.addr) + Header_size;
169 _data.len -= Header_size;
172 if (!_rp.has_more() && _data.len == 0)
183 while (_rp.next(_mem_list, &_data) && _todo_blocks > 0)
186 if (_todo_blocks > 0 && _data.len > 0)
187 *(
static_cast<l4_uint8_t *
>(_data.addr) + _data.len - 1) = status;
191 else if (_data.len > 0)
192 *(
static_cast<l4_uint8_t *
>(_data.addr)) = status;
216 Virtqueue::Request _request;
218 unsigned _todo_blocks;
225 Block_features() =
default;
229 CXX_BITFIELD_MEMBER( 1, 1, size_max, raw);
231 CXX_BITFIELD_MEMBER( 2, 2, seg_max, raw);
233 CXX_BITFIELD_MEMBER( 4, 4, geometry, raw);
235 CXX_BITFIELD_MEMBER( 5, 5, ro, raw);
237 CXX_BITFIELD_MEMBER( 6, 6, blk_size, raw);
239 CXX_BITFIELD_MEMBER(10, 10, topology, raw);
248 template <
typename Ds_data>
251 public L4::Epiface_t<Block_dev<Ds_data>, L4virtio::Device>
254 class Irq_object :
public L4::Irqep_t<Irq_object>
267 Irq_object _irq_handler;
273 Dev_config_t<l4virtio_block_config_t> _dev_config;
279 Block_features device_features()
const 280 {
return _dev_config.host_features(0); }
282 void set_device_features(Block_features df)
283 { _dev_config.host_features(0) = df.raw; }
296 _dev_config.priv_config()->size_max = sz;
297 Block_features df = device_features();
298 df.size_max() =
true;
299 set_device_features(df);
301 _max_block_size = sz;
310 _dev_config.priv_config()->seg_max = sz;
311 Block_features df = device_features();
313 set_device_features(df);
322 pc->geometry.cylinders = cylinders;
323 pc->geometry.heads = heads;
324 pc->geometry.sectors = sectors;
325 Block_features df = device_features();
326 df.geometry() =
true;
327 set_device_features(df);
338 _dev_config.priv_config()->blk_size = sz;
339 Block_features df = device_features();
340 df.blk_size() =
true;
341 set_device_features(df);
358 pc->topology.physical_block_exp = physical_block_exp;
359 pc->topology.alignment_offset = alignment_offset;
360 pc->topology.min_io_size = min_io_size;
361 pc->topology.opt_io_size = opt_io_size;
362 Block_features df = device_features();
363 df.topology() =
true;
364 set_device_features(df);
380 _irq_handler(this), _vq_max(queue_size),
384 this->reset_queue_config(0, queue_size);
386 Block_features df(0);
387 df.ring_indirect_desc() =
true;
389 set_device_features(df);
391 _dev_config.priv_config()->capacity = capacity;
392 _dev_config.reset_hdr();
410 virtual bool process_request(cxx::unique_ptr<Request> &&req) = 0;
415 virtual void reset_device() = 0;
420 virtual bool queue_stopped() = 0;
436 if (_dev_config.status().failed())
439 if (req->release_request(&_queue, status, sz) < 0)
440 this->device_error();
444 _kick_guest_irq->trigger();
451 if (idx == 0 && this->setup_queue(&_queue, 0, _vq_max))
468 char const *service = 0)
484 return L4::Epiface::server_iface();
492 while (!_dev_config.status().failed())
500 cxx::unique_ptr<Request>
501 cur{
new Request(r, &(this->_mem_info), _vq_max, _max_block_size)};
503 if (!process_request(cxx::move(cur)))
508 this->device_error();
521 void register_single_driver_irq()
523 _kick_guest_irq =
L4Re::chkcap(server_iface()->
template rcv_cap<L4::Irq>(0));
Abstract interface for object registries.
bool has_more() const
Are there more chained descriptors ?
Descriptor in the descriptor table.
L4::Cap< void > register_obj(L4::Registry_iface *registry, char const *service=0)
Attach device to an object registry.
Virtqueue implementation for the device.
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
Server-side L4-VIRTIO device stub.
l4virtio_block_header_t const & header() const
Return the block request header.
Interface for server-loop related functions.
Request finished successfully.
unsigned short int l4_uint16_t
Unsigned 16bit value.
Block_dev(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
Exception used by Queue to indicate descriptor errors.
void set_geometry(l4_uint16_t cylinders, l4_uint8_t heads, l4_uint8_t sectors)
Set disk geometry that is reported to the client.
int reconfig_queue(unsigned idx)
callback for client queue-config request
bool ready() const
Test if this queue is in working state.
Device configuration for block devices.
void start(DESC_MAN *dm, Virtqueue *ring, Virtqueue::Head_desc const &request, ARGS... args)
Start processing a new request.
unsigned data_size() const
Compute the total size of the data in the request.
void consumed(Head_desc const &r, l4_uint32_t len=0)
Put the given descriptor into the used ring.
void disable()
Completely disable the queue.
Request next_avail()
Get the next available descriptor from the available ring.
Encapsulate the state for processing a VIRTIO request.
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
Exception for an abstract runtime error.
Base class for virtio block devices.
Data_block next_block()
Return next block in scatter-gather list.
_Cap_alloc & cap_alloc
Capability allocator.
bool next(DESC_MAN *dm, ARGS... args)
Switch to the next descriptor in a descriptor chain.
Ptr< void > addr
Address stored in descriptor.
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
A request to read or write data.
T * local(Ptr< T > p) const
Get the local address for driver address p.
long chksys(long err, char const *extra="", long ret=0)
Generate C++ exception on error.
unsigned char l4_uint8_t
Unsigned 8bit value.
Type for device feature bitmap.
virtual L4::Cap< L4::Irq > register_irq_obj(L4::Epiface *o)=0
Register o as server-side object for asynchronous IRQs.
bool has_more()
Check if the request contains more data blocks.
Invalid size of memory block.
Cap< T > cap_cast(Cap< F > const &c)
static_cast for capabilities.
unsigned long long l4_uint64_t
Unsigned 64bit value.
L4-VIRTIO Transport C++ API.
C++ interface for capabilities.
void set_blk_size(l4_uint32_t sz)
Sets block disk size to be reported to the client.
l4_uint32_t len
Length of described buffer.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
struct l4virtio_block_header_t l4virtio_block_header_t
Header structure of a request for a block device.
unsigned int l4_uint32_t
Unsigned 32bit value.
void set_topology(l4_uint8_t physical_block_exp, l4_uint8_t alignment_offset, l4_uint32_t min_io_size, l4_uint32_t opt_io_size)
Sets the I/O alignment information reported back to the client.