Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions lib/host-sp-messages/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,15 @@ pub enum HostToSp {
// We use a raw `u8` here for the same reason as in `KeyLookup` above.
key: u8,
},
// ApobWrite is followed by a binary data blob, to be written to flash
ApobWrite {
offset: u64,
},
// ApobRead returns an ApobResult followed by trailing data
ApobRead {
offset: u64,
size: u64,
},
}

/// The order of these cases is critical! We are relying on hubpack's encoding
Expand Down Expand Up @@ -185,6 +194,7 @@ pub enum SpToHost {
name: [u8; 32],
},
KeySetResult(#[count(children)] KeySetResult),
ApobResult(u8),
}

#[derive(Debug, Clone, Copy, PartialEq, Eq, num_derive::FromPrimitive)]
Expand Down
133 changes: 133 additions & 0 deletions task/host-sp-comms/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,16 @@ enum Trace {
#[count(children)]
message: SpToHost,
},
ApobWriteError {
offset: u64,
#[count(children)]
err: ApobError,
},
ApobReadError {
offset: u64,
#[count(children)]
err: ApobError,
},
}

counted_ringbuf!(Trace, 20, Trace::None);
Expand All @@ -171,6 +181,34 @@ enum Timers {
TxPeriodicZeroByte,
}

#[derive(Copy, Clone, Debug, Eq, PartialEq, counters::Count)]
enum ApobError {
OffsetOverflow {
page_offset: u64,
},
SizeOverflow {
size: u64,
},
NotErased {
page_offset: u32,
},
EraseFailed {
page_offset: u32,
#[count(children)]
err: drv_hf_api::HfError,
},
WriteFailed {
page_offset: u32,
#[count(children)]
err: drv_hf_api::HfError,
},
ReadFailed {
page_offset: u32,
#[count(children)]
err: drv_hf_api::HfError,
},
}

#[export_name = "main"]
fn main() -> ! {
let mut server = ServerImpl::claim_static_resources();
Expand Down Expand Up @@ -993,6 +1031,20 @@ impl ServerImpl {
}),
}
}
HostToSp::ApobWrite { offset } => {
Some(match Self::apob_write(&self.hf, offset, data) {
Ok(()) => SpToHost::ApobResult(0),
Err(err) => {
ringbuf_entry!(Trace::ApobWriteError { offset, err });
SpToHost::ApobResult(1)
}
})
}
HostToSp::ApobRead { offset, size } => {
// apob_read does serialization itself
self.apob_read(header.sequence, offset, size);
None
}
};

if let Some(response) = response {
Expand Down Expand Up @@ -1021,6 +1073,87 @@ impl ServerImpl {
Ok(())
}

/// Write data to the bonus region of flash
///
/// This does not take `&self` because we need to force a split borrow
fn apob_write(
hf: &HostFlash,
mut offset: u64,
data: &[u8],
) -> Result<(), ApobError> {
Comment on lines +1079 to +1083
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So we need something in the broader API to discover what the erase size granularity is or at least make sure that we're sending stuff that is page size aligned. This gets to what @jgallagher gets at below. But if the host sent things that wasn't page aligned then we'd erase the entire page because our API is not doing a read-modify-write.

for chunk in data.chunks(drv_hf_api::PAGE_SIZE_BYTES) {
Self::apob_write_page(
hf,
offset.try_into().map_err(|_| ApobError::OffsetOverflow {
page_offset: offset,
})?,
chunk,
)?;
offset += chunk.len() as u64;
}
Ok(())
}

/// Write a single page of data to the bonus region of flash
///
/// This does not take `&self` because we need to force a split borrow
fn apob_write_page(
hf: &HostFlash,
page_offset: u32,
data: &[u8],
) -> Result<(), ApobError> {
if (page_offset as usize).is_multiple_of(drv_hf_api::SECTOR_SIZE_BYTES)
{
hf.bonus_sector_erase(page_offset)
.map_err(|err| ApobError::EraseFailed { page_offset, err })?;
} else {
// Read back the page and confirm that it's all empty
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could this fail if there's a blip in the IPCC path and the host resends an APOB request? (I'm not sure what the expectations are for the offsets the host is providing.)

let mut scratch = [0u8; drv_hf_api::PAGE_SIZE_BYTES];
hf.bonus_read(page_offset, &mut scratch[..data.len()])
.map_err(|err| ApobError::ReadFailed { page_offset, err })?;
if !scratch[..data.len()].iter().all(|b| *b == 0xFF) {
return Err(ApobError::NotErased { page_offset });
}
}
hf.bonus_page_program(page_offset, data)
.map_err(|err| ApobError::WriteFailed { page_offset, err })
}

/// Reads and encodes data from the bonus region of flash
fn apob_read(&mut self, sequence: u64, offset: u64, size: u64) {
self.tx_buf.try_encode_response(
sequence,
&SpToHost::ApobResult(0),
|buf| match Self::apob_read_inner(buf, &self.hf, offset, size) {
Ok(n) => Ok(n),
Err(err) => {
ringbuf_entry!(Trace::ApobReadError { offset, err });
Err(SpToHost::ApobResult(1))
}
},
);
}

fn apob_read_inner(
buf: &mut [u8],
hf: &HostFlash,
offset: u64,
size: u64,
) -> Result<usize, ApobError> {
let size = usize::try_from(size)
.map_err(|_| ApobError::SizeOverflow { size })?;
for d in (0..size).step_by(drv_hf_api::PAGE_SIZE_BYTES) {
let chunk_size = (size - d).min(drv_hf_api::PAGE_SIZE_BYTES);
let page_offset = offset + d as u64;
let page_offset = u32::try_from(page_offset)
.map_err(|_| ApobError::OffsetOverflow { page_offset })?;

hf.bonus_read(page_offset, &mut buf[d..][..chunk_size])
.map_err(|err| ApobError::ReadFailed { page_offset, err })?;
}
Ok(size)
}

fn handle_sprot(
&mut self,
sequence: u64,
Expand Down
Loading