Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 59 additions & 0 deletions service_contracts/src/FilecoinWarmStorageService.sol
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
pragma solidity ^0.8.20;

import {PDPListener} from "@pdp/PDPVerifier.sol";
import {IPDPVerifier} from "@pdp/interfaces/IPDPVerifier.sol";
import {Cids} from "@pdp/Cids.sol";
import {SessionKeyRegistry} from "@session-key-registry/SessionKeyRegistry.sol";
import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol";
Expand Down Expand Up @@ -834,6 +835,13 @@ contract FilecoinWarmStorageService is
// Verify the signature
verifyAddPiecesSignature(payer, info.clientDataSetId, pieceData, nonce, metadataKeys, metadataValues, signature);

// Validate payer/operator approvals and available funds for the new pieces
// This checks the payer has sufficient available funds and operator allowances
// to cover the increased per-epoch rate and the 30-day lockup implied by the
// new total leaf count after adding these pieces.
FilecoinPayV1 payments = FilecoinPayV1(paymentsContractAddress);
validatePayerOperatorApprovalAndFundsForPieces(payments, payer, dataSetId, pieceData);

// Store metadata for each new piece
for (uint256 i = 0; i < pieceData.length; i++) {
uint256 pieceId = firstAdded + i;
Expand Down Expand Up @@ -873,6 +881,57 @@ contract FilecoinWarmStorageService is
}
}

/// @notice Validate payer/operator approvals and funds for adding pieces
/// @dev Computes the new storage rate and corresponding 30-day lockup after adding `pieceData`
/// and validates the payer has sufficient available funds and operator allowances.
function validatePayerOperatorApprovalAndFundsForPieces(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about instead of making a new function here, you just extend validatePayerOperatorApprovalAndFunds and allow it to take a leaf count - zero in the case of createDataSet and the actual count for the piecesAdded function - then we don't have so much code duplication.

Perhaps there's some nuance I'm missing between that case and this one but it seems to me that they are basically the same, just one has leaves and the other deals in minimums. In fact, there's some logic in validatePayerOperatorApprovalAndFunds that does minimum rate calculation that could be replaced with a call to _calculateStorageRate; so it may be that there's no need for special casing inside the function, it just needs to take a leafCount which may or may not be zero.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay

FilecoinPayV1 payments,
address payer,
uint256 dataSetId,
Cids.Cid[] memory pieceData
) internal view {
// Get existing leaf count from the PDP verifier
uint256 leaves = IPDPVerifier(pdpVerifierAddress).getDataSetLeafCount(dataSetId);

uint256 totalBytes = leaves * BYTES_PER_LEAF;
uint256 storageRatePerEpoch = _calculateStorageRate(totalBytes);
uint256 lockupRequired = storageRatePerEpoch * DEFAULT_LOCKUP_PERIOD;

// Check available funds
(,, uint256 availableFunds,) = payments.getAccountInfoIfSettled(usdfcTokenAddress, payer);
require(availableFunds >= lockupRequired, Errors.InsufficientLockupFunds(payer, lockupRequired, availableFunds));

// Check operator approvals for this contract
(
bool isApproved,
uint256 rateAllowance,
uint256 lockupAllowance,
uint256 rateUsage,
uint256 lockupUsage,
uint256 maxLockupPeriod
) = payments.operatorApprovals(usdfcTokenAddress, payer, address(this));

require(isApproved, Errors.OperatorNotApproved(payer, address(this)));

// Verify rate allowance is sufficient for the new per-epoch rate
require(
rateAllowance >= rateUsage + storageRatePerEpoch,
Errors.InsufficientRateAllowance(payer, address(this), rateAllowance, rateUsage, storageRatePerEpoch)
);

// Verify lockup allowance is sufficient for the new 30-day lockup amount
require(
lockupAllowance >= lockupUsage + lockupRequired,
Errors.InsufficientLockupAllowance(payer, address(this), lockupAllowance, lockupUsage, lockupRequired)
);

// Verify max lockup period
require(
maxLockupPeriod >= DEFAULT_LOCKUP_PERIOD,
Errors.InsufficientMaxLockupPeriod(payer, address(this), maxLockupPeriod, DEFAULT_LOCKUP_PERIOD)
);
}

function piecesScheduledRemove(uint256 dataSetId, uint256[] memory pieceIds, bytes calldata extraData)
external
onlyPDPVerifier
Expand Down
79 changes: 70 additions & 9 deletions service_contracts/test/FilecoinWarmStorageService.t.sol
Original file line number Diff line number Diff line change
Expand Up @@ -803,9 +803,9 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest {

// First batch (3 pieces) with key "meta" => metadataShort
Cids.Cid[] memory pieceData1 = new Cids.Cid[](3);
pieceData1[0].data = bytes("1_0:1111");
pieceData1[1].data = bytes("1_1:111100000");
pieceData1[2].data = bytes("1_2:11110000000000");
pieceData1[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("1_0:1111")));
pieceData1[1] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("1_1:111100000")));
pieceData1[2] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("1_2:11110000000000")));
string[] memory keys1 = new string[](1);
string[] memory values1 = new string[](1);
keys1[0] = "meta";
Expand All @@ -817,8 +817,10 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest {

// Second batch (2 pieces) with key "meta" => metadataLong
Cids.Cid[] memory pieceData2 = new Cids.Cid[](2);
pieceData2[0].data = bytes("2_0:22222222222222222222");
pieceData2[1].data = bytes("2_1:222222222222222222220000000000000000000000000000000000000000");
pieceData2[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("2_0:22222222222222222222")));
pieceData2[1] = Cids.CommPv2FromDigest(
0, 4, keccak256(abi.encodePacked("2_1:222222222222222222220000000000000000000000000000000000000000000"))
);
string[] memory keys2 = new string[](1);
string[] memory values2 = new string[](1);
keys2[0] = "meta";
Expand Down Expand Up @@ -1161,6 +1163,65 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest {
assertEq(dataSetId, 1, "Dataset should be created with above-minimum funds");
}

// function testInsufficientFunds_AddPieces() public {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what's going on here? why is it commented out?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

like i made this PR for initial review like if the implementation sg than i can work on tests

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, feel free to proceed, I think you have a good handle on the task at hand, just look for opportunities to minimise duplication as I mentioned above

// // Setup: Client with exactly the minimum funds (0.06 USDFC)
// address exactClient = makeAddr("exactClient");
// uint256 exactAmount = 6e16; // Exactly 0.06 USDFC

// // Transfer tokens from test contract to the test client
// mockUSDFC.safeTransfer(exactClient, exactAmount);

// vm.startPrank(exactClient);
// payments.setOperatorApproval(mockUSDFC, address(pdpServiceWithPayments), true, 1000e18, 1000e18, 365 days);
// mockUSDFC.approve(address(payments), exactAmount);
// payments.deposit(mockUSDFC, exactClient, exactAmount);
// vm.stopPrank();

// // Prepare dataset creation data
// (string[] memory dsKeys, string[] memory dsValues) = _getSingleMetadataKV("label", "Exact Minimum Test");
// FilecoinWarmStorageService.DataSetCreateData memory createData = FilecoinWarmStorageService.DataSetCreateData({
// payer: exactClient,
// clientDataSetId: 1000,
// metadataKeys: dsKeys,
// metadataValues: dsValues,
// signature: FAKE_SIGNATURE
// });

// bytes memory encodedCreateData = abi.encode(
// createData.payer,
// createData.clientDataSetId,
// createData.metadataKeys,
// createData.metadataValues,
// createData.signature
// );

// // Should succeed with exact minimum
// makeSignaturePass(exactClient);
// vm.prank(serviceProvider);
// uint256 dataSetId = mockPDPVerifier.createDataSet(pdpServiceWithPayments, encodedCreateData);

// vm.prank(exactClient);
// payments.withdraw(mockUSDFC, 59999999999961600); // Withdraw all funds, leaving 0 balance

// // Prepare piece addition data
// Cids.Cid[] memory pieceData = new Cids.Cid[](1);
// pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("piece:belowMinimum")));
// string[] memory keys = new string[](0);
// string[] memory values = new string[](0);
// uint256 firstAdded = 0;
// // Expect revert when adding pieces due to insufficient funds
// makeSignaturePass(exactClient);
// vm.expectRevert(
// abi.encodeWithSelector(
// Errors.InsufficientLockupFunds.selector, exactClient, 6e16, 5e16
// )
// );
// vm.prank(serviceProvider);
// mockPDPVerifier.addPieces(
// pdpServiceWithPayments, dataSetId, firstAdded, pieceData, 0, FAKE_SIGNATURE, keys, values
// );
// }

// Operator Approval Validation Tests
function testOperatorApproval_NotApproved() public {
// Setup: Client with sufficient funds but no operator approval
Expand Down Expand Up @@ -4558,7 +4619,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest {

// Prepare piece data
Cids.Cid[] memory pieceData = new Cids.Cid[](1);
pieceData[0].data = bytes("test_piece_1");
pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1")));
string[] memory keys = new string[](0);
string[] memory values = new string[](0);

Expand Down Expand Up @@ -4604,7 +4665,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest {

// Prepare piece data
Cids.Cid[] memory pieceData = new Cids.Cid[](1);
pieceData[0].data = bytes("test_piece");
pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1")));
string[] memory keys = new string[](0);
string[] memory values = new string[](0);

Expand Down Expand Up @@ -4678,7 +4739,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest {

// Prepare piece data
Cids.Cid[] memory pieceData = new Cids.Cid[](1);
pieceData[0].data = bytes("test");
pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1")));
string[] memory keys = new string[](0);
string[] memory values = new string[](0);

Expand Down Expand Up @@ -4725,7 +4786,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest {

// Prepare piece data
Cids.Cid[] memory pieceData = new Cids.Cid[](1);
pieceData[0].data = bytes("test");
pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1")));
string[] memory keys = new string[](0);
string[] memory values = new string[](0);

Expand Down
28 changes: 28 additions & 0 deletions service_contracts/test/mocks/SharedMocks.sol
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ contract MockPDPVerifier {

// Track data set service providers for testing
mapping(uint256 => address) public dataSetServiceProviders;
// Track simple leaf counts per data set for tests (approximate via bytes length)
mapping(uint256 => uint256) public dataSetLeafCount;

event DataSetCreated(uint256 indexed setId, address indexed owner);
event DataSetServiceProviderChanged(
Expand All @@ -118,6 +120,9 @@ contract MockPDPVerifier {
// Track service provider
dataSetServiceProviders[setId] = msg.sender;

// initialize leaf count to 0
dataSetLeafCount[setId] = 0;

emit DataSetCreated(setId, msg.sender);
return setId;
}
Expand All @@ -128,6 +133,7 @@ contract MockPDPVerifier {
}

delete dataSetServiceProviders[setId];
delete dataSetLeafCount[setId];
emit DataSetDeleted(setId, 0);
}

Expand All @@ -150,9 +156,31 @@ contract MockPDPVerifier {
}

bytes memory extraData = abi.encode(nonce, allKeys, allValues, signature);

// // Update simple leaf count estimate for each piece using bytes length -> 32-byte leaves
// uint256 addedLeaves = 0;
// for (uint256 i = 0; i < pieceData.length; i++) {
// uint256 dataLen = pieceData[i].data.length;
// // estimate leaf count as ceil(dataLen / 32), minimum 1
// uint256 leaves = dataLen == 0 ? 1 : (dataLen + 31) / 32;
// addedLeaves += leaves;
// }
// dataSetLeafCount[dataSetId] += addedLeaves;
uint256 leafCount = 0;
for (uint256 i = 0; i < pieceData.length; i++) {
(uint256 padding, uint8 height,) = Cids.validateCommPv2(pieceData[i]);
leafCount += Cids.leafCount(padding, height);
}
dataSetLeafCount[dataSetId] += leafCount;

listenerAddr.piecesAdded(dataSetId, firstAdded, pieceData, extraData);
}

// Expose leaf count similar to real PDPVerifier
function getDataSetLeafCount(uint256 setId) external view returns (uint256) {
return dataSetLeafCount[setId];
}

/**
* @notice Simulates service provider change for testing purposes
* @dev This function mimics the PDPVerifier's claimDataSetOwnership functionality
Expand Down
Loading