mirror of
https://github.com/Fishwaldo/bl_mcu_sdk.git
synced 2025-03-15 19:31:43 +00:00
[update] add missing littlefs by gitigore
This commit is contained in:
parent
e0ebebd4e9
commit
1602152550
48 changed files with 23370 additions and 0 deletions
26
components/fs/littlefs/littlefs/.github/workflows/post-release.yml
vendored
Normal file
26
components/fs/littlefs/littlefs/.github/workflows/post-release.yml
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
name: post-release
|
||||
on:
|
||||
release:
|
||||
branches: [master]
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
post-release:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
# trigger post-release in dependency repo, this indirection allows the
|
||||
# dependency repo to be updated often without affecting this repo. At
|
||||
# the time of this comment, the dependency repo is responsible for
|
||||
# creating PRs for other dependent repos post-release.
|
||||
- name: trigger-post-release
|
||||
continue-on-error: true
|
||||
run: |
|
||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \
|
||||
-d "$(jq -n '{
|
||||
event_type: "post-release",
|
||||
client_payload: {
|
||||
repo: env.GITHUB_REPOSITORY,
|
||||
version: "${{github.event.release.tag_name}}"}}' \
|
||||
| tee /dev/stderr)"
|
||||
|
196
components/fs/littlefs/littlefs/.github/workflows/release.yml
vendored
Normal file
196
components/fs/littlefs/littlefs/.github/workflows/release.yml
vendored
Normal file
|
@ -0,0 +1,196 @@
|
|||
name: release
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [test]
|
||||
branches: [master]
|
||||
types: [completed]
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
# need to manually check for a couple things
|
||||
# - tests passed?
|
||||
# - we are the most recent commit on master?
|
||||
if: ${{github.event.workflow_run.conclusion == 'success' &&
|
||||
github.event.workflow_run.head_sha == github.sha}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{github.event.workflow_run.head_sha}}
|
||||
# need workflow access since we push branches
|
||||
# containing workflows
|
||||
token: ${{secrets.BOT_TOKEN}}
|
||||
# need all tags
|
||||
fetch-depth: 0
|
||||
|
||||
# try to get results from tests
|
||||
- uses: dawidd6/action-download-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
workflow: ${{github.event.workflow_run.name}}
|
||||
run_id: ${{github.event.workflow_run.id}}
|
||||
name: results
|
||||
path: results
|
||||
|
||||
- name: find-version
|
||||
run: |
|
||||
# rip version from lfs.h
|
||||
LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \
|
||||
| awk '{print $3}')"
|
||||
LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))"
|
||||
LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >> 0)))"
|
||||
|
||||
# find a new patch version based on what we find in our tags
|
||||
LFS_VERSION_PATCH="$( \
|
||||
( git describe --tags --abbrev=0 \
|
||||
--match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \
|
||||
|| echo 'v0.0.-1' ) \
|
||||
| awk -F '.' '{print $3+1}')"
|
||||
|
||||
# found new version
|
||||
LFS_VERSION="v$LFS_VERSION_MAJOR`
|
||||
`.$LFS_VERSION_MINOR`
|
||||
`.$LFS_VERSION_PATCH"
|
||||
echo "LFS_VERSION=$LFS_VERSION"
|
||||
echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV
|
||||
echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV
|
||||
echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV
|
||||
echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV
|
||||
|
||||
# try to find previous version?
|
||||
- name: find-prev-version
|
||||
continue-on-error: true
|
||||
run: |
|
||||
LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
|
||||
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION"
|
||||
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
|
||||
|
||||
# try to find results from tests
|
||||
- name: collect-results
|
||||
run: |
|
||||
# previous results to compare against?
|
||||
[ -n "$LFS_PREV_VERSION" ] && curl -sS \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
|
||||
`status/$LFS_PREV_VERSION?per_page=100" \
|
||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
|
||||
>> prev-results.json \
|
||||
|| true
|
||||
|
||||
# build table for GitHub
|
||||
echo "<table>" >> results.txt
|
||||
echo "<thead>" >> results.txt
|
||||
echo "<tr>" >> results.txt
|
||||
echo "<th align=left>Configuration</th>" >> results.txt
|
||||
for r in Code Stack Structs Coverage
|
||||
do
|
||||
echo "<th align=right>$r</th>" >> results.txt
|
||||
done
|
||||
echo "</tr>" >> results.txt
|
||||
echo "</thead>" >> results.txt
|
||||
|
||||
echo "<tbody>" >> results.txt
|
||||
for c in "" readonly threadsafe migrate error-asserts
|
||||
do
|
||||
echo "<tr>" >> results.txt
|
||||
c_or_default=${c:-default}
|
||||
echo "<td align=left>${c_or_default^}</td>" >> results.txt
|
||||
for r in code stack structs
|
||||
do
|
||||
# per-config results
|
||||
echo "<td align=right>" >> results.txt
|
||||
[ -e results/thumb${c:+-$c}.csv ] && ( \
|
||||
export PREV="$(jq -re '
|
||||
select(.context == "'"results (thumb${c:+, $c}) / $r"'").description
|
||||
| capture("(?<result>[0-9∞]+)").result' \
|
||||
prev-results.json || echo 0)"
|
||||
./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk '
|
||||
NR==2 {printf "%s B",$2}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
|
||||
NR==2 {printf "\n"}' \
|
||||
| sed -e 's/ /\ /g' \
|
||||
>> results.txt)
|
||||
echo "</td>" >> results.txt
|
||||
done
|
||||
# coverage results
|
||||
if [ -z $c ]
|
||||
then
|
||||
echo "<td rowspan=0 align=right>" >> results.txt
|
||||
[ -e results/coverage.csv ] && ( \
|
||||
export PREV="$(jq -re '
|
||||
select(.context == "results / coverage").description
|
||||
| capture("(?<result>[0-9\\.]+)").result' \
|
||||
prev-results.json || echo 0)"
|
||||
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
|
||||
NR==2 {printf "%.1f%% of %d lines",$4,$3}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
|
||||
NR==2 {printf "\n"}' \
|
||||
| sed -e 's/ /\ /g' \
|
||||
>> results.txt)
|
||||
echo "</td>" >> results.txt
|
||||
fi
|
||||
echo "</tr>" >> results.txt
|
||||
done
|
||||
echo "</tbody>" >> results.txt
|
||||
echo "</table>" >> results.txt
|
||||
|
||||
cat results.txt
|
||||
|
||||
# find changes from history
|
||||
- name: collect-changes
|
||||
run: |
|
||||
[ -n "$LFS_PREV_VERSION" ] || exit 0
|
||||
# use explicit link to github commit so that release notes can
|
||||
# be copied elsewhere
|
||||
git log "$LFS_PREV_VERSION.." \
|
||||
--grep='^Merge' --invert-grep \
|
||||
--format="format:[\`%h\`](`
|
||||
`https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \
|
||||
> changes.txt
|
||||
echo "CHANGES:"
|
||||
cat changes.txt
|
||||
|
||||
# create and update major branches (vN and vN-prefix)
|
||||
- name: create-major-branches
|
||||
run: |
|
||||
# create major branch
|
||||
git branch "v$LFS_VERSION_MAJOR" HEAD
|
||||
|
||||
# create major prefix branch
|
||||
git config user.name ${{secrets.BOT_USER}}
|
||||
git config user.email ${{secrets.BOT_EMAIL}}
|
||||
git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
|
||||
"v$LFS_VERSION_MAJOR-prefix" || true
|
||||
./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
|
||||
git branch "v$LFS_VERSION_MAJOR-prefix" $( \
|
||||
git commit-tree $(git write-tree) \
|
||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||
-p HEAD \
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
|
||||
# push!
|
||||
git push --atomic origin \
|
||||
"v$LFS_VERSION_MAJOR" \
|
||||
"v$LFS_VERSION_MAJOR-prefix"
|
||||
|
||||
# build release notes
|
||||
- name: create-release
|
||||
run: |
|
||||
# create release and patch version tag (vN.N.N)
|
||||
# only draft if not a patch release
|
||||
[ -e results.txt ] && export RESULTS="$(cat results.txt)"
|
||||
[ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
|
||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
|
||||
-d "$(jq -n '{
|
||||
tag_name: env.LFS_VERSION,
|
||||
name: env.LFS_VERSION | rtrimstr(".0"),
|
||||
target_commitish: "${{github.event.workflow_run.head_sha}}",
|
||||
draft: env.LFS_VERSION | endswith(".0"),
|
||||
body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
|
||||
| tee /dev/stderr)"
|
||||
|
55
components/fs/littlefs/littlefs/.github/workflows/status.yml
vendored
Normal file
55
components/fs/littlefs/littlefs/.github/workflows/status.yml
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
name: status
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [test]
|
||||
types: [completed]
|
||||
|
||||
jobs:
|
||||
status:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
# custom statuses?
|
||||
- uses: dawidd6/action-download-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
workflow: ${{github.event.workflow_run.name}}
|
||||
run_id: ${{github.event.workflow_run.id}}
|
||||
name: status
|
||||
path: status
|
||||
- name: update-status
|
||||
continue-on-error: true
|
||||
run: |
|
||||
ls status
|
||||
for s in $(shopt -s nullglob ; echo status/*.json)
|
||||
do
|
||||
# parse requested status
|
||||
export STATE="$(jq -er '.state' $s)"
|
||||
export CONTEXT="$(jq -er '.context' $s)"
|
||||
export DESCRIPTION="$(jq -er '.description' $s)"
|
||||
# help lookup URL for job/steps because GitHub makes
|
||||
# it VERY HARD to link to specific jobs
|
||||
export TARGET_URL="$(
|
||||
jq -er '.target_url // empty' $s || (
|
||||
export TARGET_JOB="$(jq -er '.target_job' $s)"
|
||||
export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
|
||||
curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
|
||||
`${{github.event.workflow_run.id}}/jobs" \
|
||||
| jq -er '.jobs[]
|
||||
| select(.name == env.TARGET_JOB)
|
||||
| .html_url
|
||||
+ "?check_suite_focus=true"
|
||||
+ ((.steps[]
|
||||
| select(.name == env.TARGET_STEP)
|
||||
| "#step:\(.number):0") // "")'))"
|
||||
# update status
|
||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
|
||||
`${{github.event.workflow_run.head_sha}}" \
|
||||
-d "$(jq -n '{
|
||||
state: env.STATE,
|
||||
context: env.CONTEXT,
|
||||
description: env.DESCRIPTION,
|
||||
target_url: env.TARGET_URL}' \
|
||||
| tee /dev/stderr)"
|
||||
done
|
493
components/fs/littlefs/littlefs/.github/workflows/test.yml
vendored
Normal file
493
components/fs/littlefs/littlefs/.github/workflows/test.yml
vendored
Normal file
|
@ -0,0 +1,493 @@
|
|||
name: test
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
CFLAGS: -Werror
|
||||
MAKEFLAGS: -j
|
||||
|
||||
jobs:
|
||||
# run tests
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [x86_64, thumb, mips, powerpc]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need a few additional tools
|
||||
#
|
||||
# note this includes gcc-10, which is required for -fcallgraph-info=su
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq gcc-10 python3 python3-pip lcov
|
||||
sudo pip3 install toml
|
||||
echo "CC=gcc-10" >> $GITHUB_ENV
|
||||
gcc-10 --version
|
||||
lcov --version
|
||||
python3 --version
|
||||
|
||||
# need newer lcov version for gcc-10
|
||||
#sudo apt-get remove lcov
|
||||
#wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb
|
||||
#sudo apt install ./lcov_1.15-1_all.deb
|
||||
#lcov --version
|
||||
#which lcov
|
||||
#ls -lha /usr/bin/lcov
|
||||
wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz
|
||||
tar xf lcov-1.15.tar.gz
|
||||
sudo make -C lcov-1.15 install
|
||||
|
||||
# setup a ram-backed disk to speed up reentrant tests
|
||||
mkdir disks
|
||||
sudo mount -t tmpfs -o size=100m tmpfs disks
|
||||
TESTFLAGS="$TESTFLAGS --disk=disks/disk"
|
||||
|
||||
# collect coverage
|
||||
mkdir -p coverage
|
||||
TESTFLAGS="$TESTFLAGS --coverage=`
|
||||
`coverage/${{github.job}}-${{matrix.arch}}.info"
|
||||
|
||||
echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
|
||||
|
||||
# cross-compile with ARM Thumb (32-bit, little-endian)
|
||||
- name: install-thumb
|
||||
if: ${{matrix.arch == 'thumb'}}
|
||||
run: |
|
||||
sudo apt-get install -qq \
|
||||
gcc-10-arm-linux-gnueabi \
|
||||
libc6-dev-armel-cross \
|
||||
qemu-user
|
||||
echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV
|
||||
echo "EXEC=qemu-arm" >> $GITHUB_ENV
|
||||
arm-linux-gnueabi-gcc-10 --version
|
||||
qemu-arm -version
|
||||
# cross-compile with MIPS (32-bit, big-endian)
|
||||
- name: install-mips
|
||||
if: ${{matrix.arch == 'mips'}}
|
||||
run: |
|
||||
sudo apt-get install -qq \
|
||||
gcc-10-mips-linux-gnu \
|
||||
libc6-dev-mips-cross \
|
||||
qemu-user
|
||||
echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
|
||||
echo "EXEC=qemu-mips" >> $GITHUB_ENV
|
||||
mips-linux-gnu-gcc-10 --version
|
||||
qemu-mips -version
|
||||
# cross-compile with PowerPC (32-bit, big-endian)
|
||||
- name: install-powerpc
|
||||
if: ${{matrix.arch == 'powerpc'}}
|
||||
run: |
|
||||
sudo apt-get install -qq \
|
||||
gcc-10-powerpc-linux-gnu \
|
||||
libc6-dev-powerpc-cross \
|
||||
qemu-user
|
||||
echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
|
||||
echo "EXEC=qemu-ppc" >> $GITHUB_ENV
|
||||
powerpc-linux-gnu-gcc-10 --version
|
||||
qemu-ppc -version
|
||||
|
||||
# make sure example can at least compile
|
||||
- name: test-example
|
||||
run: |
|
||||
sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
|
||||
make all CFLAGS+=" \
|
||||
-Duser_provided_block_device_read=NULL \
|
||||
-Duser_provided_block_device_prog=NULL \
|
||||
-Duser_provided_block_device_erase=NULL \
|
||||
-Duser_provided_block_device_sync=NULL \
|
||||
-include stdio.h"
|
||||
rm test.c
|
||||
|
||||
# test configurations
|
||||
# normal+reentrant tests
|
||||
- name: test-default
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk"
|
||||
# NOR flash: read/prog = 1 block = 4KiB
|
||||
- name: test-nor
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||
# SD/eMMC: read/prog = 512 block = 512
|
||||
- name: test-emmc
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
|
||||
# NAND flash: read/prog = 4KiB block = 32KiB
|
||||
- name: test-nand
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
|
||||
# other extreme geometries that are useful for various corner cases
|
||||
- name: test-no-intrinsics
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_NO_INTRINSICS"
|
||||
- name: test-byte-writes
|
||||
# it just takes too long to test byte-level writes when in qemu,
|
||||
# should be plenty covered by the other configurations
|
||||
if: ${{matrix.arch == 'x86_64'}}
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
|
||||
- name: test-block-cycles
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_BLOCK_CYCLES=1"
|
||||
- name: test-odd-block-count
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||
- name: test-odd-block-size
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||
|
||||
# upload coverage for later coverage
|
||||
- name: upload-coverage
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
retention-days: 1
|
||||
|
||||
# update results
|
||||
- name: results
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR"
|
||||
cp lfs.csv results/${{matrix.arch}}.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}.csv
|
||||
- name: results-readonly
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-DLFS_READONLY"
|
||||
cp lfs.csv results/${{matrix.arch}}-readonly.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-readonly.csv
|
||||
- name: results-threadsafe
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-DLFS_THREADSAFE"
|
||||
cp lfs.csv results/${{matrix.arch}}-threadsafe.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv
|
||||
- name: results-migrate
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-DLFS_MIGRATE"
|
||||
cp lfs.csv results/${{matrix.arch}}-migrate.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-migrate.csv
|
||||
- name: results-error-asserts
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
|
||||
cp lfs.csv results/${{matrix.arch}}-error-asserts.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
|
||||
- name: upload-results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: results
|
||||
|
||||
# create statuses with results
|
||||
- name: collect-status
|
||||
run: |
|
||||
mkdir -p status
|
||||
for f in $(shopt -s nullglob ; echo results/*.csv)
|
||||
do
|
||||
export STEP="results$(
|
||||
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')"
|
||||
for r in code stack structs
|
||||
do
|
||||
export CONTEXT="results (${{matrix.arch}}$(
|
||||
echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r"
|
||||
export PREV="$(curl -sS \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
|
||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
|
||||
| select(.context == env.CONTEXT).description
|
||||
| capture("(?<result>[0-9∞]+)").result' \
|
||||
|| echo 0)"
|
||||
export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk '
|
||||
NR==2 {printf "%s B",$2}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
|
||||
jq -n '{
|
||||
state: "success",
|
||||
context: env.CONTEXT,
|
||||
description: env.DESCRIPTION,
|
||||
target_job: "${{github.job}} (${{matrix.arch}})",
|
||||
target_step: env.STEP}' \
|
||||
| tee status/$r-${{matrix.arch}}$(
|
||||
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
|
||||
done
|
||||
done
|
||||
- name: upload-status
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: status
|
||||
path: status
|
||||
retention-days: 1
|
||||
|
||||
# run under Valgrind to check for memory errors
|
||||
valgrind:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip
|
||||
sudo pip3 install toml
|
||||
- name: install-valgrind
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq valgrind
|
||||
valgrind --version
|
||||
# normal tests, we don't need to test all geometries
|
||||
- name: test-valgrind
|
||||
run: make test TESTFLAGS+="-k --valgrind"
|
||||
|
||||
# test that compilation is warning free under clang
|
||||
clang:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip
|
||||
sudo pip3 install toml
|
||||
- name: install-clang
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq clang
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
clang --version
|
||||
# no reason to not test again
|
||||
- name: test-clang
|
||||
run: make test TESTFLAGS+="-k"
|
||||
|
||||
# self-host with littlefs-fuse for a fuzz-like test
|
||||
fuse:
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{!endsWith(github.ref, '-prefix')}}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip libfuse-dev
|
||||
sudo pip3 install toml
|
||||
fusermount -V
|
||||
gcc --version
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: littlefs-project/littlefs-fuse
|
||||
ref: v2
|
||||
path: littlefs-fuse
|
||||
- name: setup
|
||||
run: |
|
||||
# copy our new version into littlefs-fuse
|
||||
rm -rf littlefs-fuse/littlefs/*
|
||||
cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
mkdir mount
|
||||
LOOP=$(sudo losetup -f)
|
||||
sudo chmod a+rw $LOOP
|
||||
dd if=/dev/zero bs=512 count=128K of=disk
|
||||
losetup $LOOP disk
|
||||
echo "LOOP=$LOOP" >> $GITHUB_ENV
|
||||
- name: test
|
||||
run: |
|
||||
# self-host test
|
||||
make -C littlefs-fuse
|
||||
|
||||
littlefs-fuse/lfs --format $LOOP
|
||||
littlefs-fuse/lfs $LOOP mount
|
||||
|
||||
ls mount
|
||||
mkdir mount/littlefs
|
||||
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
cd mount/littlefs
|
||||
stat .
|
||||
ls -flh
|
||||
make -B test
|
||||
|
||||
# test migration using littlefs-fuse
|
||||
migrate:
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{!endsWith(github.ref, '-prefix')}}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip libfuse-dev
|
||||
sudo pip3 install toml
|
||||
fusermount -V
|
||||
gcc --version
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: littlefs-project/littlefs-fuse
|
||||
ref: v2
|
||||
path: v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: littlefs-project/littlefs-fuse
|
||||
ref: v1
|
||||
path: v1
|
||||
- name: setup
|
||||
run: |
|
||||
# copy our new version into littlefs-fuse
|
||||
rm -rf v2/littlefs/*
|
||||
cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
mkdir mount
|
||||
LOOP=$(sudo losetup -f)
|
||||
sudo chmod a+rw $LOOP
|
||||
dd if=/dev/zero bs=512 count=128K of=disk
|
||||
losetup $LOOP disk
|
||||
echo "LOOP=$LOOP" >> $GITHUB_ENV
|
||||
- name: test
|
||||
run: |
|
||||
# compile v1 and v2
|
||||
make -C v1
|
||||
make -C v2
|
||||
|
||||
# run self-host test with v1
|
||||
v1/lfs --format $LOOP
|
||||
v1/lfs $LOOP mount
|
||||
|
||||
ls mount
|
||||
mkdir mount/littlefs
|
||||
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
cd mount/littlefs
|
||||
stat .
|
||||
ls -flh
|
||||
make -B test
|
||||
|
||||
# attempt to migrate
|
||||
cd ../..
|
||||
fusermount -u mount
|
||||
|
||||
v2/lfs --migrate $LOOP
|
||||
v2/lfs $LOOP mount
|
||||
|
||||
# run self-host test with v2 right where we left off
|
||||
ls mount
|
||||
cd mount/littlefs
|
||||
stat .
|
||||
ls -flh
|
||||
make -B test
|
||||
|
||||
# collect coverage info
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [test]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip lcov
|
||||
sudo pip3 install toml
|
||||
# yes we continue-on-error nearly every step, continue-on-error
|
||||
# at job level apparently still marks a job as failed, which isn't
|
||||
# what we want
|
||||
- uses: actions/download-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
- name: results-coverage
|
||||
continue-on-error: true
|
||||
run: |
|
||||
mkdir -p results
|
||||
lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
|
||||
-o results/coverage.info
|
||||
./scripts/coverage.py results/coverage.info -o results/coverage.csv
|
||||
- name: upload-results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: results
|
||||
- name: collect-status
|
||||
run: |
|
||||
mkdir -p status
|
||||
[ -e results/coverage.csv ] || exit 0
|
||||
export STEP="results-coverage"
|
||||
export CONTEXT="results / coverage"
|
||||
export PREV="$(curl -sS \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
|
||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
|
||||
| select(.context == env.CONTEXT).description
|
||||
| capture("(?<result>[0-9\\.]+)").result' \
|
||||
|| echo 0)"
|
||||
export DESCRIPTION="$(
|
||||
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
|
||||
NR==2 {printf "%.1f%% of %d lines",$4,$3}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
|
||||
jq -n '{
|
||||
state: "success",
|
||||
context: env.CONTEXT,
|
||||
description: env.DESCRIPTION,
|
||||
target_job: "${{github.job}}",
|
||||
target_step: env.STEP}' \
|
||||
| tee status/coverage.json
|
||||
- name: upload-status
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: status
|
||||
path: status
|
||||
retention-days: 1
|
14
components/fs/littlefs/littlefs/.gitignore
vendored
Normal file
14
components/fs/littlefs/littlefs/.gitignore
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# Compilation output
|
||||
*.o
|
||||
*.d
|
||||
*.a
|
||||
*.ci
|
||||
*.csv
|
||||
|
||||
# Testing things
|
||||
blocks/
|
||||
lfs
|
||||
test.c
|
||||
tests/*.toml.*
|
||||
scripts/__pycache__
|
||||
.gdb_history
|
2173
components/fs/littlefs/littlefs/DESIGN.md
Normal file
2173
components/fs/littlefs/littlefs/DESIGN.md
Normal file
File diff suppressed because it is too large
Load diff
25
components/fs/littlefs/littlefs/LICENSE.md
Normal file
25
components/fs/littlefs/littlefs/LICENSE.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2022, The littlefs authors.
|
||||
Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
- Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
- Neither the name of ARM nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific prior
|
||||
written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
172
components/fs/littlefs/littlefs/Makefile
Normal file
172
components/fs/littlefs/littlefs/Makefile
Normal file
|
@ -0,0 +1,172 @@
|
|||
ifdef BUILDDIR
|
||||
# make sure BUILDDIR ends with a slash
|
||||
override BUILDDIR := $(BUILDDIR)/
|
||||
# bit of a hack, but we want to make sure BUILDDIR directory structure
|
||||
# is correct before any commands
|
||||
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
|
||||
$(BUILDDIR) \
|
||||
$(BUILDDIR)bd \
|
||||
$(BUILDDIR)tests))
|
||||
endif
|
||||
|
||||
# overridable target/src/tools/flags/etc
|
||||
ifneq ($(wildcard test.c main.c),)
|
||||
TARGET ?= $(BUILDDIR)lfs
|
||||
else
|
||||
TARGET ?= $(BUILDDIR)lfs.a
|
||||
endif
|
||||
|
||||
|
||||
CC ?= gcc
|
||||
AR ?= ar
|
||||
SIZE ?= size
|
||||
CTAGS ?= ctags
|
||||
NM ?= nm
|
||||
OBJDUMP ?= objdump
|
||||
LCOV ?= lcov
|
||||
|
||||
SRC ?= $(wildcard *.c)
|
||||
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
|
||||
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
|
||||
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
|
||||
CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
|
||||
|
||||
ifdef DEBUG
|
||||
override CFLAGS += -O0
|
||||
else
|
||||
override CFLAGS += -Os
|
||||
endif
|
||||
ifdef TRACE
|
||||
override CFLAGS += -DLFS_YES_TRACE
|
||||
endif
|
||||
override CFLAGS += -g3
|
||||
override CFLAGS += -I.
|
||||
override CFLAGS += -std=c99 -Wall -Wextra -pedantic
|
||||
|
||||
ifdef VERBOSE
|
||||
override TESTFLAGS += -v
|
||||
override CALLSFLAGS += -v
|
||||
override CODEFLAGS += -v
|
||||
override DATAFLAGS += -v
|
||||
override STACKFLAGS += -v
|
||||
override STRUCTSFLAGS += -v
|
||||
override COVERAGEFLAGS += -v
|
||||
endif
|
||||
ifdef EXEC
|
||||
override TESTFLAGS += --exec="$(EXEC)"
|
||||
endif
|
||||
ifdef COVERAGE
|
||||
override TESTFLAGS += --coverage
|
||||
endif
|
||||
ifdef BUILDDIR
|
||||
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
endif
|
||||
ifneq ($(NM),nm)
|
||||
override CODEFLAGS += --nm-tool="$(NM)"
|
||||
override DATAFLAGS += --nm-tool="$(NM)"
|
||||
endif
|
||||
ifneq ($(OBJDUMP),objdump)
|
||||
override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
|
||||
endif
|
||||
|
||||
|
||||
# commands
|
||||
.PHONY: all build
|
||||
all build: $(TARGET)
|
||||
|
||||
.PHONY: asm
|
||||
asm: $(ASM)
|
||||
|
||||
.PHONY: size
|
||||
size: $(OBJ)
|
||||
$(SIZE) -t $^
|
||||
|
||||
.PHONY: tags
|
||||
tags:
|
||||
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
|
||||
|
||||
.PHONY: calls
|
||||
calls: $(CGI)
|
||||
./scripts/calls.py $^ $(CALLSFLAGS)
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
./scripts/test.py $(TESTFLAGS)
|
||||
.SECONDEXPANSION:
|
||||
test%: tests/test$$(firstword $$(subst \#, ,%)).toml
|
||||
./scripts/test.py $@ $(TESTFLAGS)
|
||||
|
||||
.PHONY: code
|
||||
code: $(OBJ)
|
||||
./scripts/code.py $^ -S $(CODEFLAGS)
|
||||
|
||||
.PHONY: data
|
||||
data: $(OBJ)
|
||||
./scripts/data.py $^ -S $(DATAFLAGS)
|
||||
|
||||
.PHONY: stack
|
||||
stack: $(CGI)
|
||||
./scripts/stack.py $^ -S $(STACKFLAGS)
|
||||
|
||||
.PHONY: structs
|
||||
structs: $(OBJ)
|
||||
./scripts/structs.py $^ -S $(STRUCTSFLAGS)
|
||||
|
||||
.PHONY: coverage
|
||||
coverage:
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
|
||||
|
||||
.PHONY: summary
|
||||
summary: $(BUILDDIR)lfs.csv
|
||||
./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
|
||||
|
||||
|
||||
# rules
|
||||
-include $(DEP)
|
||||
.SUFFIXES:
|
||||
|
||||
$(BUILDDIR)lfs: $(OBJ)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)lfs.a: $(OBJ)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
|
||||
./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
|
||||
./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
|
||||
./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
|
||||
./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
|
||||
$(if $(COVERAGE),\
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
|
||||
-q -m $@ $(COVERAGEFLAGS) -o $@)
|
||||
|
||||
$(BUILDDIR)%.o: %.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
||||
|
||||
$(BUILDDIR)%.s: %.c
|
||||
$(CC) -S $(CFLAGS) $< -o $@
|
||||
|
||||
# gcc depends on the output file for intermediate file names, so
|
||||
# we can't omit to .o output. We also need to serialize with the
|
||||
# normal .o rule because otherwise we can end up with multiprocess
|
||||
# problems with two instances of gcc modifying the same .o
|
||||
$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
|
||||
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
|
||||
|
||||
# clean everything
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -f $(BUILDDIR)lfs
|
||||
rm -f $(BUILDDIR)lfs.a
|
||||
rm -f $(BUILDDIR)lfs.csv
|
||||
rm -f $(OBJ)
|
||||
rm -f $(CGI)
|
||||
rm -f $(DEP)
|
||||
rm -f $(ASM)
|
||||
rm -f $(BUILDDIR)tests/*.toml.*
|
258
components/fs/littlefs/littlefs/README.md
Normal file
258
components/fs/littlefs/littlefs/README.md
Normal file
|
@ -0,0 +1,258 @@
|
|||
## littlefs
|
||||
|
||||
A little fail-safe filesystem designed for microcontrollers.
|
||||
|
||||
```
|
||||
| | | .---._____
|
||||
.-----. | |
|
||||
--|o |---| littlefs |
|
||||
--| |---| |
|
||||
'-----' '----------'
|
||||
| | |
|
||||
```
|
||||
|
||||
**Power-loss resilience** - littlefs is designed to handle random power
|
||||
failures. All file operations have strong copy-on-write guarantees and if
|
||||
power is lost the filesystem will fall back to the last known good state.
|
||||
|
||||
**Dynamic wear leveling** - littlefs is designed with flash in mind, and
|
||||
provides wear leveling over dynamic blocks. Additionally, littlefs can
|
||||
detect bad blocks and work around them.
|
||||
|
||||
**Bounded RAM/ROM** - littlefs is designed to work with a small amount of
|
||||
memory. RAM usage is strictly bounded, which means RAM consumption does not
|
||||
change as the filesystem grows. The filesystem contains no unbounded
|
||||
recursion and dynamic memory is limited to configurable buffers that can be
|
||||
provided statically.
|
||||
|
||||
## Example
|
||||
|
||||
Here's a simple example that updates a file named `boot_count` every time
|
||||
main runs. The program can be interrupted at any time without losing track
|
||||
of how many times it has been booted and without corrupting the filesystem:
|
||||
|
||||
``` c
|
||||
#include "lfs.h"
|
||||
|
||||
// variables used by the filesystem
|
||||
lfs_t lfs;
|
||||
lfs_file_t file;
|
||||
|
||||
// configuration of the filesystem is provided by this struct
|
||||
const struct lfs_config cfg = {
|
||||
// block device operations
|
||||
.read = user_provided_block_device_read,
|
||||
.prog = user_provided_block_device_prog,
|
||||
.erase = user_provided_block_device_erase,
|
||||
.sync = user_provided_block_device_sync,
|
||||
|
||||
// block device configuration
|
||||
.read_size = 16,
|
||||
.prog_size = 16,
|
||||
.block_size = 4096,
|
||||
.block_count = 128,
|
||||
.cache_size = 16,
|
||||
.lookahead_size = 16,
|
||||
.block_cycles = 500,
|
||||
};
|
||||
|
||||
// entry point
|
||||
int main(void) {
|
||||
// mount the filesystem
|
||||
int err = lfs_mount(&lfs, &cfg);
|
||||
|
||||
// reformat if we can't mount the filesystem
|
||||
// this should only happen on the first boot
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg);
|
||||
lfs_mount(&lfs, &cfg);
|
||||
}
|
||||
|
||||
// read current count
|
||||
uint32_t boot_count = 0;
|
||||
lfs_file_open(&lfs, &file, "boot_count", LFS_O_RDWR | LFS_O_CREAT);
|
||||
lfs_file_read(&lfs, &file, &boot_count, sizeof(boot_count));
|
||||
|
||||
// update boot count
|
||||
boot_count += 1;
|
||||
lfs_file_rewind(&lfs, &file);
|
||||
lfs_file_write(&lfs, &file, &boot_count, sizeof(boot_count));
|
||||
|
||||
// remember the storage is not updated until the file is closed successfully
|
||||
lfs_file_close(&lfs, &file);
|
||||
|
||||
// release any resources we were using
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
// print the boot count
|
||||
printf("boot_count: %d\n", boot_count);
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Detailed documentation (or at least as much detail as is currently available)
|
||||
can be found in the comments in [lfs.h](lfs.h).
|
||||
|
||||
littlefs takes in a configuration structure that defines how the filesystem
|
||||
operates. The configuration struct provides the filesystem with the block
|
||||
device operations and dimensions, tweakable parameters that tradeoff memory
|
||||
usage for performance, and optional static buffers if the user wants to avoid
|
||||
dynamic memory.
|
||||
|
||||
The state of the littlefs is stored in the `lfs_t` type which is left up
|
||||
to the user to allocate, allowing multiple filesystems to be in use
|
||||
simultaneously. With the `lfs_t` and configuration struct, a user can
|
||||
format a block device or mount the filesystem.
|
||||
|
||||
Once mounted, the littlefs provides a full set of POSIX-like file and
|
||||
directory functions, with the deviation that the allocation of filesystem
|
||||
structures must be provided by the user.
|
||||
|
||||
All POSIX operations, such as remove and rename, are atomic, even in event
|
||||
of power-loss. Additionally, file updates are not actually committed to
|
||||
the filesystem until sync or close is called on the file.
|
||||
|
||||
## Other notes
|
||||
|
||||
Littlefs is written in C, and specifically should compile with any compiler
|
||||
that conforms to the `C99` standard.
|
||||
|
||||
All littlefs calls have the potential to return a negative error code. The
|
||||
errors can be either one of those found in the `enum lfs_error` in
|
||||
[lfs.h](lfs.h), or an error returned by the user's block device operations.
|
||||
|
||||
In the configuration struct, the `prog` and `erase` function provided by the
|
||||
user may return a `LFS_ERR_CORRUPT` error if the implementation already can
|
||||
detect corrupt blocks. However, the wear leveling does not depend on the return
|
||||
code of these functions, instead all data is read back and checked for
|
||||
integrity.
|
||||
|
||||
If your storage caches writes, make sure that the provided `sync` function
|
||||
flushes all the data to memory and ensures that the next read fetches the data
|
||||
from memory, otherwise data integrity can not be guaranteed. If the `write`
|
||||
function does not perform caching, and therefore each `read` or `write` call
|
||||
hits the memory, the `sync` function can simply return 0.
|
||||
|
||||
## Design
|
||||
|
||||
At a high level, littlefs is a block based filesystem that uses small logs to
|
||||
store metadata and larger copy-on-write (COW) structures to store file data.
|
||||
|
||||
In littlefs, these ingredients form a sort of two-layered cake, with the small
|
||||
logs (called metadata pairs) providing fast updates to metadata anywhere on
|
||||
storage, while the COW structures store file data compactly and without any
|
||||
wear amplification cost.
|
||||
|
||||
Both of these data structures are built out of blocks, which are fed by a
|
||||
common block allocator. By limiting the number of erases allowed on a block
|
||||
per allocation, the allocator provides dynamic wear leveling over the entire
|
||||
filesystem.
|
||||
|
||||
```
|
||||
root
|
||||
.--------.--------.
|
||||
| A'| B'| |
|
||||
| | |-> |
|
||||
| | | |
|
||||
'--------'--------'
|
||||
.----' '--------------.
|
||||
A v B v
|
||||
.--------.--------. .--------.--------.
|
||||
| C'| D'| | | E'|new| |
|
||||
| | |-> | | | E'|-> |
|
||||
| | | | | | | |
|
||||
'--------'--------' '--------'--------'
|
||||
.-' '--. | '------------------.
|
||||
v v .-' v
|
||||
.--------. .--------. v .--------.
|
||||
| C | | D | .--------. write | new E |
|
||||
| | | | | E | ==> | |
|
||||
| | | | | | | |
|
||||
'--------' '--------' | | '--------'
|
||||
'--------' .-' |
|
||||
.-' '-. .-------------|------'
|
||||
v v v v
|
||||
.--------. .--------. .--------.
|
||||
| F | | G | | new F |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
'--------' '--------' '--------'
|
||||
```
|
||||
|
||||
More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and
|
||||
[SPEC.md](SPEC.md).
|
||||
|
||||
- [DESIGN.md](DESIGN.md) - A fully detailed dive into how littlefs works.
|
||||
I would suggest reading it as the tradeoffs at work are quite interesting.
|
||||
|
||||
- [SPEC.md](SPEC.md) - The on-disk specification of littlefs with all the
|
||||
nitty-gritty details. May be useful for tooling development.
|
||||
|
||||
## Testing
|
||||
|
||||
The littlefs comes with a test suite designed to run on a PC using the
|
||||
[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
|
||||
The tests assume a Linux environment and can be started with make:
|
||||
|
||||
``` bash
|
||||
make test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
The littlefs is provided under the [BSD-3-Clause] license. See
|
||||
[LICENSE.md](LICENSE.md) for more information. Contributions to this project
|
||||
are accepted under the same license.
|
||||
|
||||
Individual files contain the following tag instead of the full license text.
|
||||
|
||||
SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
This enables machine processing of license information based on the SPDX
|
||||
License Identifiers that are here available: http://spdx.org/licenses/
|
||||
|
||||
## Related projects
|
||||
|
||||
- [littlefs-fuse] - A [FUSE] wrapper for littlefs. The project allows you to
|
||||
mount littlefs directly on a Linux machine. Can be useful for debugging
|
||||
littlefs if you have an SD card handy.
|
||||
|
||||
- [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would
|
||||
want this, but it is handy for demos. You can see it in action
|
||||
[here][littlefs-js-demo].
|
||||
|
||||
- [littlefs-python] - A Python wrapper for littlefs. The project allows you
|
||||
to create images of the filesystem on your PC. Check if littlefs will fit
|
||||
your needs, create images for a later download to the target memory or
|
||||
inspect the content of a binary image of the target memory.
|
||||
|
||||
- [mklfs] - A command line tool built by the [Lua RTOS] guys for making
|
||||
littlefs images from a host PC. Supports Windows, Mac OS, and Linux.
|
||||
|
||||
- [Mbed OS] - The easiest way to get started with littlefs is to jump into Mbed
|
||||
which already has block device drivers for most forms of embedded storage.
|
||||
littlefs is available in Mbed OS as the [LittleFileSystem] class.
|
||||
|
||||
- [SPIFFS] - Another excellent embedded filesystem for NOR flash. As a more
|
||||
traditional logging filesystem with full static wear-leveling, SPIFFS will
|
||||
likely outperform littlefs on small memories such as the internal flash on
|
||||
microcontrollers.
|
||||
|
||||
- [Dhara] - An interesting NAND flash translation layer designed for small
|
||||
MCUs. It offers static wear-leveling and power-resilience with only a fixed
|
||||
_O(|address|)_ pointer structure stored on each block and in RAM.
|
||||
|
||||
|
||||
[BSD-3-Clause]: https://spdx.org/licenses/BSD-3-Clause.html
|
||||
[littlefs-fuse]: https://github.com/geky/littlefs-fuse
|
||||
[FUSE]: https://github.com/libfuse/libfuse
|
||||
[littlefs-js]: https://github.com/geky/littlefs-js
|
||||
[littlefs-js-demo]:http://littlefs.geky.net/demo.html
|
||||
[mklfs]: https://github.com/whitecatboard/Lua-RTOS-ESP32/tree/master/components/mklfs/src
|
||||
[Lua RTOS]: https://github.com/whitecatboard/Lua-RTOS-ESP32
|
||||
[Mbed OS]: https://github.com/armmbed/mbed-os
|
||||
[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/latest/apis/littlefilesystem.html
|
||||
[SPIFFS]: https://github.com/pellepl/spiffs
|
||||
[Dhara]: https://github.com/dlbeer/dhara
|
||||
[littlefs-python]: https://pypi.org/project/littlefs-python/
|
787
components/fs/littlefs/littlefs/SPEC.md
Normal file
787
components/fs/littlefs/littlefs/SPEC.md
Normal file
|
@ -0,0 +1,787 @@
|
|||
## littlefs technical specification
|
||||
|
||||
This is the technical specification of the little filesystem. This document
|
||||
covers the technical details of how the littlefs is stored on disk for
|
||||
introspection and tooling. This document assumes you are familiar with the
|
||||
design of the littlefs, for more info on how littlefs works check
|
||||
out [DESIGN.md](DESIGN.md).
|
||||
|
||||
```
|
||||
| | | .---._____
|
||||
.-----. | |
|
||||
--|o |---| littlefs |
|
||||
--| |---| |
|
||||
'-----' '----------'
|
||||
| | |
|
||||
```
|
||||
|
||||
## Some quick notes
|
||||
|
||||
- littlefs is a block-based filesystem. The disk is divided into an array of
|
||||
evenly sized blocks that are used as the logical unit of storage.
|
||||
|
||||
- Block pointers are stored in 32 bits, with the special value `0xffffffff`
|
||||
representing a null block address.
|
||||
|
||||
- In addition to the logical block size (which usually matches the erase
|
||||
block size), littlefs also uses a program block size and read block size.
|
||||
These determine the alignment of block device operations, but don't need
|
||||
to be consistent for portability.
|
||||
|
||||
- By default, all values in littlefs are stored in little-endian byte order.
|
||||
|
||||
## Directories / Metadata pairs
|
||||
|
||||
Metadata pairs form the backbone of littlefs and provide a system for
|
||||
distributed atomic updates. Even the superblock is stored in a metadata pair.
|
||||
|
||||
As their name suggests, a metadata pair is stored in two blocks, with one block
|
||||
providing a backup during erase cycles in case power is lost. These two blocks
|
||||
are not necessarily sequential and may be anywhere on disk, so a "pointer" to a
|
||||
metadata pair is stored as two block pointers.
|
||||
|
||||
On top of this, each metadata block behaves as an appendable log, containing a
|
||||
variable number of commits. Commits can be appended to the metadata log in
|
||||
order to update the metadata without requiring an erase cycles. Note that
|
||||
successive commits may supersede the metadata in previous commits. Only the
|
||||
most recent metadata should be considered valid.
|
||||
|
||||
The high-level layout of a metadata block is fairly simple:
|
||||
|
||||
```
|
||||
.---------------------------------------.
|
||||
.-| revision count | entries | \
|
||||
| |-------------------+ | |
|
||||
| | | |
|
||||
| | | +-- 1st commit
|
||||
| | | |
|
||||
| | +-------------------| |
|
||||
| | | CRC | /
|
||||
| |-------------------+-------------------|
|
||||
| | entries | \
|
||||
| | | |
|
||||
| | | +-- 2nd commit
|
||||
| | +-------------------+--------------| |
|
||||
| | | CRC | padding | /
|
||||
| |----+-------------------+--------------|
|
||||
| | entries | \
|
||||
| | | |
|
||||
| | | +-- 3rd commit
|
||||
| | +-------------------+---------| |
|
||||
| | | CRC | | /
|
||||
| |---------+-------------------+ |
|
||||
| | unwritten storage | more commits
|
||||
| | | |
|
||||
| | | v
|
||||
| | |
|
||||
| | |
|
||||
| '---------------------------------------'
|
||||
'---------------------------------------'
|
||||
```
|
||||
|
||||
Each metadata block contains a 32-bit revision count followed by a number of
|
||||
commits. Each commit contains a variable number of metadata entries followed
|
||||
by a 32-bit CRC.
|
||||
|
||||
Note also that entries aren't necessarily word-aligned. This allows us to
|
||||
store metadata more compactly, however we can only write to addresses that are
|
||||
aligned to our program block size. This means each commit may have padding for
|
||||
alignment.
|
||||
|
||||
Metadata block fields:
|
||||
|
||||
1. **Revision count (32-bits)** - Incremented every erase cycle. If both blocks
|
||||
contain valid commits, only the block with the most recent revision count
|
||||
should be used. Sequence comparison must be used to avoid issues with
|
||||
integer overflow.
|
||||
|
||||
2. **CRC (32-bits)** - Detects corruption from power-loss or other write
|
||||
issues. Uses a CRC-32 with a polynomial of `0x04c11db7` initialized
|
||||
with `0xffffffff`.
|
||||
|
||||
Entries themselves are stored as a 32-bit tag followed by a variable length
|
||||
blob of data. But exactly how these tags are stored is a little bit tricky.
|
||||
|
||||
Metadata blocks support both forward and backward iteration. In order to do
|
||||
this without duplicating the space for each tag, neighboring entries have their
|
||||
tags XORed together, starting with `0xffffffff`.
|
||||
|
||||
```
|
||||
Forward iteration Backward iteration
|
||||
|
||||
.-------------------. 0xffffffff .-------------------.
|
||||
| revision count | | | revision count |
|
||||
|-------------------| v |-------------------|
|
||||
| tag ~A |---> xor -> tag A | tag ~A |---> xor -> 0xffffffff
|
||||
|-------------------| | |-------------------| ^
|
||||
| data A | | | data A | |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
|-------------------| v |-------------------| |
|
||||
| tag AxB |---> xor -> tag B | tag AxB |---> xor -> tag A
|
||||
|-------------------| | |-------------------| ^
|
||||
| data B | | | data B | |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
|-------------------| v |-------------------| |
|
||||
| tag BxC |---> xor -> tag C | tag BxC |---> xor -> tag B
|
||||
|-------------------| |-------------------| ^
|
||||
| data C | | data C | |
|
||||
| | | | tag C
|
||||
| | | |
|
||||
| | | |
|
||||
'-------------------' '-------------------'
|
||||
```
|
||||
|
||||
One last thing to note before we get into the details around tag encoding. Each
|
||||
tag contains a valid bit used to indicate if the tag and containing commit is
|
||||
valid. This valid bit is the first bit found in the tag and the commit and can
|
||||
be used to tell if we've attempted to write to the remaining space in the
|
||||
block.
|
||||
|
||||
Here's a more complete example of metadata block containing 4 entries:
|
||||
|
||||
```
|
||||
.---------------------------------------.
|
||||
.-| revision count | tag ~A | \
|
||||
| |-------------------+-------------------| |
|
||||
| | data A | |
|
||||
| | | |
|
||||
| |-------------------+-------------------| |
|
||||
| | tag AxB | data B | <--. |
|
||||
| |-------------------+ | | |
|
||||
| | | | +-- 1st commit
|
||||
| | +-------------------+---------| | |
|
||||
| | | tag BxC | | <-.| |
|
||||
| |---------+-------------------+ | || |
|
||||
| | data C | || |
|
||||
| | | || |
|
||||
| |-------------------+-------------------| || |
|
||||
| | tag CxCRC | CRC | || /
|
||||
| |-------------------+-------------------| ||
|
||||
| | tag CRCxA' | data A' | || \
|
||||
| |-------------------+ | || |
|
||||
| | | || |
|
||||
| | +-------------------+----| || +-- 2nd commit
|
||||
| | | tag CRCxA' | | || |
|
||||
| |--------------+-------------------+----| || |
|
||||
| | CRC | padding | || /
|
||||
| |--------------+----+-------------------| ||
|
||||
| | tag CRCxA'' | data A'' | <---. \
|
||||
| |-------------------+ | ||| |
|
||||
| | | ||| |
|
||||
| | +-------------------+---------| ||| |
|
||||
| | | tag A''xD | | < ||| |
|
||||
| |---------+-------------------+ | |||| +-- 3rd commit
|
||||
| | data D | |||| |
|
||||
| | +---------| |||| |
|
||||
| | | tag Dx| |||| |
|
||||
| |---------+-------------------+---------| |||| |
|
||||
| |CRC | CRC | | |||| /
|
||||
| |---------+-------------------+ | ||||
|
||||
| | unwritten storage | |||| more commits
|
||||
| | | |||| |
|
||||
| | | |||| v
|
||||
| | | ||||
|
||||
| | | ||||
|
||||
| '---------------------------------------' ||||
|
||||
'---------------------------------------' |||'- most recent A
|
||||
||'-- most recent B
|
||||
|'--- most recent C
|
||||
'---- most recent D
|
||||
```
|
||||
|
||||
## Metadata tags
|
||||
|
||||
So in littlefs, 32-bit tags describe every type of metadata. And this means
|
||||
_every_ type of metadata, including file entries, directory fields, and
|
||||
global state. Even the CRCs used to mark the end of commits get their own tag.
|
||||
|
||||
Because of this, the tag format contains some densely packed information. Note
|
||||
that there are multiple levels of types which break down into more info:
|
||||
|
||||
```
|
||||
[---- 32 ----]
|
||||
[1|-- 11 --|-- 10 --|-- 10 --]
|
||||
^. ^ . ^ ^- length
|
||||
|. | . '------------ id
|
||||
|. '-----.------------------ type (type3)
|
||||
'.-----------.------------------ valid bit
|
||||
[-3-|-- 8 --]
|
||||
^ ^- chunk
|
||||
'------- type (type1)
|
||||
```
|
||||
|
||||
|
||||
Before we go further, there's one important thing to note. These tags are
|
||||
**not** stored in little-endian. Tags stored in commits are actually stored
|
||||
in big-endian (and is the only thing in littlefs stored in big-endian). This
|
||||
little bit of craziness comes from the fact that the valid bit must be the
|
||||
first bit in a commit, and when converted to little-endian, the valid bit finds
|
||||
itself in byte 4. We could restructure the tag to store the valid bit lower,
|
||||
but, because none of the fields are byte-aligned, this would be more
|
||||
complicated than just storing the tag in big-endian.
|
||||
|
||||
Another thing to note is that both the tags `0x00000000` and `0xffffffff` are
|
||||
invalid and can be used for null values.
|
||||
|
||||
Metadata tag fields:
|
||||
|
||||
1. **Valid bit (1-bit)** - Indicates if the tag is valid.
|
||||
|
||||
2. **Type3 (11-bits)** - Type of the tag. This field is broken down further
|
||||
into a 3-bit abstract type and an 8-bit chunk field. Note that the value
|
||||
`0x000` is invalid and not assigned a type.
|
||||
|
||||
1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
|
||||
8 categories that facilitate bitmasked lookups.
|
||||
|
||||
2. **Chunk (8-bits)** - Chunk field used for various purposes by the different
|
||||
abstract types. type1+chunk+id form a unique identifier for each tag in the
|
||||
metadata block.
|
||||
|
||||
3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
|
||||
block gets a unique id which is used to associate tags with that file. The
|
||||
special value `0x3ff` is used for any tags that are not associated with a
|
||||
file, such as directory and global metadata.
|
||||
|
||||
4. **Length (10-bits)** - Length of the data in bytes. The special value
|
||||
`0x3ff` indicates that this tag has been deleted.
|
||||
|
||||
## Metadata types
|
||||
|
||||
What follows is an exhaustive list of metadata in littlefs.
|
||||
|
||||
---
|
||||
#### `0x401` LFS_TYPE_CREATE
|
||||
|
||||
Creates a new file with this id. Note that files in a metadata block
|
||||
don't necessarily need a create tag. All a create does is move over any
|
||||
files using this id. In this sense a create is similar to insertion into
|
||||
an imaginary array of files.
|
||||
|
||||
The create and delete tags allow littlefs to keep files in a directory
|
||||
ordered alphabetically by filename.
|
||||
|
||||
---
|
||||
#### `0x4ff` LFS_TYPE_DELETE
|
||||
|
||||
Deletes the file with this id. An inverse to create, this tag moves over
|
||||
any files neighboring this id similar to a deletion from an imaginary
|
||||
array of files.
|
||||
|
||||
---
|
||||
#### `0x0xx` LFS_TYPE_NAME
|
||||
|
||||
Associates the id with a file name and file type.
|
||||
|
||||
The data contains the file name stored as an ASCII string (may be expanded to
|
||||
UTF8 in the future).
|
||||
|
||||
The chunk field in this tag indicates an 8-bit file type which can be one of
|
||||
the following.
|
||||
|
||||
Currently, the name tag must precede any other tags associated with the id and
|
||||
can not be reassigned without deleting the file.
|
||||
|
||||
Layout of the name tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][--- variable length ---]
|
||||
[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
|
||||
^ ^ ^ ^ ^- size ^- file name
|
||||
| | | '------ id
|
||||
| | '----------- file type
|
||||
| '-------------- type1 (0x0)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Name fields:
|
||||
|
||||
1. **file type (8-bits)** - Type of the file.
|
||||
|
||||
2. **file name** - File name stored as an ASCII string.
|
||||
|
||||
---
|
||||
#### `0x001` LFS_TYPE_REG
|
||||
|
||||
Initializes the id + name as a regular file.
|
||||
|
||||
How each file is stored depends on its struct tag, which is described below.
|
||||
|
||||
---
|
||||
#### `0x002` LFS_TYPE_DIR
|
||||
|
||||
Initializes the id + name as a directory.
|
||||
|
||||
Directories in littlefs are stored on disk as a linked-list of metadata pairs,
|
||||
each pair containing any number of files in alphabetical order. A pointer to
|
||||
the directory is stored in the struct tag, which is described below.
|
||||
|
||||
---
|
||||
#### `0x0ff` LFS_TYPE_SUPERBLOCK
|
||||
|
||||
Initializes the id as a superblock entry.
|
||||
|
||||
The superblock entry is a special entry used to store format-time configuration
|
||||
and identify the filesystem.
|
||||
|
||||
The name is a bit of a misnomer. While the superblock entry serves the same
|
||||
purpose as a superblock found in other filesystems, in littlefs the superblock
|
||||
does not get a dedicated block. Instead, the superblock entry is duplicated
|
||||
across a linked-list of metadata pairs rooted on the blocks 0 and 1. The last
|
||||
metadata pair doubles as the root directory of the filesystem.
|
||||
|
||||
```
|
||||
.--------. .--------. .--------. .--------. .--------.
|
||||
.| super |->| super |->| super |->| super |->| file B |
|
||||
|| block | || block | || block | || block | || file C |
|
||||
|| | || | || | || file A | || file D |
|
||||
|'--------' |'--------' |'--------' |'--------' |'--------'
|
||||
'--------' '--------' '--------' '--------' '--------'
|
||||
|
||||
\----------------+----------------/ \----------+----------/
|
||||
superblock pairs root directory
|
||||
```
|
||||
|
||||
The filesystem starts with only the root directory. The superblock metadata
|
||||
pairs grow every time the root pair is compacted in order to prolong the
|
||||
life of the device exponentially.
|
||||
|
||||
The contents of the superblock entry are stored in a name tag with the
|
||||
superblock type and an inline-struct tag. The name tag contains the magic
|
||||
string "littlefs", while the inline-struct tag contains version and
|
||||
configuration information.
|
||||
|
||||
Layout of the superblock name tag and inline-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][--- 64 ---]
|
||||
^ ^ ^ ^- size (8) ^- magic string ("littlefs")
|
||||
| | '------ id (0)
|
||||
| '------------ type (0x0ff)
|
||||
'----------------- valid bit
|
||||
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --|-- 32 --]
|
||||
^ ^ ^ ^ ^- version ^- block size ^- block count
|
||||
| | | | [-- 32 --|-- 32 --|-- 32 --]
|
||||
| | | | [-- 32 --|-- 32 --|-- 32 --]
|
||||
| | | | ^- name max ^- file max ^- attr max
|
||||
| | | '- size (24)
|
||||
| | '------ id (0)
|
||||
| '------------ type (0x201)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Superblock fields:
|
||||
|
||||
1. **Magic string (8-bytes)** - Magic string indicating the presence of
|
||||
littlefs on the device. Must be the string "littlefs".
|
||||
|
||||
2. **Version (32-bits)** - The version of littlefs at format time. The version
|
||||
is encoded in a 32-bit value with the upper 16-bits containing the major
|
||||
version, and the lower 16-bits containing the minor version.
|
||||
|
||||
This specification describes version 2.0 (`0x00020000`).
|
||||
|
||||
3. **Block size (32-bits)** - Size of the logical block size used by the
|
||||
filesystem in bytes.
|
||||
|
||||
4. **Block count (32-bits)** - Number of blocks in the filesystem.
|
||||
|
||||
5. **Name max (32-bits)** - Maximum size of file names in bytes.
|
||||
|
||||
6. **File max (32-bits)** - Maximum size of files in bytes.
|
||||
|
||||
7. **Attr max (32-bits)** - Maximum size of file attributes in bytes.
|
||||
|
||||
The superblock must always be the first entry (id 0) in a metadata pair as well
|
||||
as be the first entry written to the block. This means that the superblock
|
||||
entry can be read from a device using offsets alone.
|
||||
|
||||
---
|
||||
#### `0x2xx` LFS_TYPE_STRUCT
|
||||
|
||||
Associates the id with an on-disk data structure.
|
||||
|
||||
The exact layout of the data depends on the data structure type stored in the
|
||||
chunk field and can be one of the following.
|
||||
|
||||
Any type of struct supersedes all other structs associated with the id. For
|
||||
example, appending a ctz-struct replaces an inline-struct on the same file.
|
||||
|
||||
---
|
||||
#### `0x200` LFS_TYPE_DIRSTRUCT
|
||||
|
||||
Gives the id a directory data structure.
|
||||
|
||||
Directories in littlefs are stored on disk as a linked-list of metadata pairs,
|
||||
each pair containing any number of files in alphabetical order.
|
||||
|
||||
```
|
||||
|
|
||||
v
|
||||
.--------. .--------. .--------. .--------. .--------. .--------.
|
||||
.| file A |->| file D |->| file G |->| file I |->| file J |->| file M |
|
||||
|| file B | || file E | || file H | || | || file K | || file N |
|
||||
|| file C | || file F | || | || | || file L | || |
|
||||
|'--------' |'--------' |'--------' |'--------' |'--------' |'--------'
|
||||
'--------' '--------' '--------' '--------' '--------' '--------'
|
||||
```
|
||||
|
||||
The dir-struct tag contains only the pointer to the first metadata-pair in the
|
||||
directory. The directory size is not known without traversing the directory.
|
||||
|
||||
The pointer to the next metadata-pair in the directory is stored in a tail tag,
|
||||
which is described below.
|
||||
|
||||
Layout of the dir-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][--- 64 ---]
|
||||
^ ^ ^ ^- size (8) ^- metadata pair
|
||||
| | '------ id
|
||||
| '------------ type (0x200)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Dir-struct fields:
|
||||
|
||||
1. **Metadata pair (8-bytes)** - Pointer to the first metadata-pair
|
||||
in the directory.
|
||||
|
||||
---
|
||||
#### `0x201` LFS_TYPE_INLINESTRUCT
|
||||
|
||||
Gives the id an inline data structure.
|
||||
|
||||
Inline structs store small files that can fit in the metadata pair. In this
|
||||
case, the file data is stored directly in the tag's data area.
|
||||
|
||||
Layout of the inline-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][--- variable length ---]
|
||||
[1|- 11 -| 10 | 10 ][--- (size * 8) ---]
|
||||
^ ^ ^ ^- size ^- inline data
|
||||
| | '------ id
|
||||
| '------------ type (0x201)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Inline-struct fields:
|
||||
|
||||
1. **Inline data** - File data stored directly in the metadata-pair.
|
||||
|
||||
---
|
||||
#### `0x202` LFS_TYPE_CTZSTRUCT
|
||||
|
||||
Gives the id a CTZ skip-list data structure.
|
||||
|
||||
CTZ skip-lists store files that can not fit in the metadata pair. These files
|
||||
are stored in a skip-list in reverse, with a pointer to the head of the
|
||||
skip-list. Note that the head of the skip-list and the file size is enough
|
||||
information to read the file.
|
||||
|
||||
How exactly CTZ skip-lists work is a bit complicated. A full explanation can be
|
||||
found in the [DESIGN.md](DESIGN.md#ctz-skip-lists).
|
||||
|
||||
A quick summary: For every _n_‍th block where _n_ is divisible by
|
||||
2‍_ˣ_, that block contains a pointer to block _n_-2‍_ˣ_.
|
||||
These pointers are stored in increasing order of _x_ in each block of the file
|
||||
before the actual data.
|
||||
|
||||
```
|
||||
|
|
||||
v
|
||||
.--------. .--------. .--------. .--------. .--------. .--------.
|
||||
| A |<-| D |<-| G |<-| J |<-| M |<-| P |
|
||||
| B |<-| E |--| H |<-| K |--| N | | Q |
|
||||
| C |<-| F |--| I |--| L |--| O | | |
|
||||
'--------' '--------' '--------' '--------' '--------' '--------'
|
||||
block 0 block 1 block 2 block 3 block 4 block 5
|
||||
1 skip 2 skips 1 skip 3 skips 1 skip
|
||||
```
|
||||
|
||||
Note that the maximum number of pointers in a block is bounded by the maximum
|
||||
file size divided by the block size. With 32 bits for file size, this results
|
||||
in a minimum block size of 104 bytes.
|
||||
|
||||
Layout of the CTZ-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
|
||||
^ ^ ^ ^ ^ ^- file size
|
||||
| | | | '-------------------- file head
|
||||
| | | '- size (8)
|
||||
| | '------ id
|
||||
| '------------ type (0x202)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
CTZ-struct fields:
|
||||
|
||||
1. **File head (32-bits)** - Pointer to the block that is the head of the
|
||||
file's CTZ skip-list.
|
||||
|
||||
2. **File size (32-bits)** - Size of the file in bytes.
|
||||
|
||||
---
|
||||
#### `0x3xx` LFS_TYPE_USERATTR
|
||||
|
||||
Attaches a user attribute to an id.
|
||||
|
||||
littlefs has a concept of "user attributes". These are small user-provided
|
||||
attributes that can be used to store things like timestamps, hashes,
|
||||
permissions, etc.
|
||||
|
||||
Each user attribute is uniquely identified by an 8-bit type which is stored in
|
||||
the chunk field, and the user attribute itself can be found in the tag's data.
|
||||
|
||||
There are currently no standard user attributes and a portable littlefs
|
||||
implementation should work with any user attributes missing.
|
||||
|
||||
Layout of the user-attr tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][--- variable length ---]
|
||||
[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
|
||||
^ ^ ^ ^ ^- size ^- attr data
|
||||
| | | '------ id
|
||||
| | '----------- attr type
|
||||
| '-------------- type1 (0x3)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
User-attr fields:
|
||||
|
||||
1. **Attr type (8-bits)** - Type of the user attributes.
|
||||
|
||||
2. **Attr data** - The data associated with the user attribute.
|
||||
|
||||
---
|
||||
#### `0x6xx` LFS_TYPE_TAIL
|
||||
|
||||
Provides the tail pointer for the metadata pair itself.
|
||||
|
||||
The metadata pair's tail pointer is used in littlefs for a linked-list
|
||||
containing all metadata pairs. The chunk field contains the type of the tail,
|
||||
which indicates if the following metadata pair is a part of the directory
|
||||
(hard-tail) or only used to traverse the filesystem (soft-tail).
|
||||
|
||||
```
|
||||
.--------.
|
||||
.| dir A |-.
|
||||
||softtail| |
|
||||
.--------| |-'
|
||||
| |'--------'
|
||||
| '---|--|-'
|
||||
| .-' '-------------.
|
||||
| v v
|
||||
| .--------. .--------. .--------.
|
||||
'->| dir B |->| dir B |->| dir C |
|
||||
||hardtail| ||softtail| || |
|
||||
|| | || | || |
|
||||
|'--------' |'--------' |'--------'
|
||||
'--------' '--------' '--------'
|
||||
```
|
||||
|
||||
Currently any type supersedes any other preceding tails in the metadata pair,
|
||||
but this may change if additional metadata pair state is added.
|
||||
|
||||
A note about the metadata pair linked-list: Normally, this linked-list contains
|
||||
every metadata pair in the filesystem. However, there are some operations that
|
||||
can cause this linked-list to become out of sync if a power-loss were to occur.
|
||||
When this happens, littlefs sets the "sync" flag in the global state. How
|
||||
exactly this flag is stored is described below.
|
||||
|
||||
When the sync flag is set:
|
||||
|
||||
1. The linked-list may contain an orphaned directory that has been removed in
|
||||
the filesystem.
|
||||
2. The linked-list may contain a metadata pair with a bad block that has been
|
||||
replaced in the filesystem.
|
||||
|
||||
If the sync flag is set, the threaded linked-list must be checked for these
|
||||
errors before it can be used reliably. Note that the threaded linked-list can
|
||||
be ignored if littlefs is mounted read-only.
|
||||
|
||||
Layout of the tail tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1| 3| 8 | 10 | 10 ][--- 64 ---]
|
||||
^ ^ ^ ^ ^- size (8) ^- metadata pair
|
||||
| | | '------ id
|
||||
| | '---------- tail type
|
||||
| '------------- type1 (0x6)
|
||||
'---------------- valid bit
|
||||
```
|
||||
|
||||
Tail fields:
|
||||
|
||||
1. **Tail type (8-bits)** - Type of the tail pointer.
|
||||
|
||||
2. **Metadata pair (8-bytes)** - Pointer to the next metadata-pair.
|
||||
|
||||
---
|
||||
#### `0x600` LFS_TYPE_SOFTTAIL
|
||||
|
||||
Provides a tail pointer that points to the next metadata pair in the
|
||||
filesystem.
|
||||
|
||||
In this case, the next metadata pair is not a part of our current directory
|
||||
and should only be followed when traversing the entire filesystem.
|
||||
|
||||
---
|
||||
#### `0x601` LFS_TYPE_HARDTAIL
|
||||
|
||||
Provides a tail pointer that points to the next metadata pair in the
|
||||
directory.
|
||||
|
||||
In this case, the next metadata pair belongs to the current directory. Note
|
||||
that because directories in littlefs are sorted alphabetically, the next
|
||||
metadata pair should only contain filenames greater than any filename in the
|
||||
current pair.
|
||||
|
||||
---
|
||||
#### `0x7xx` LFS_TYPE_GSTATE
|
||||
|
||||
Provides delta bits for global state entries.
|
||||
|
||||
littlefs has a concept of "global state". This is a small set of state that
|
||||
can be updated by a commit to _any_ metadata pair in the filesystem.
|
||||
|
||||
The way this works is that the global state is stored as a set of deltas
|
||||
distributed across the filesystem such that the global state can be found by
|
||||
the xor-sum of these deltas.
|
||||
|
||||
```
|
||||
.--------. .--------. .--------. .--------. .--------.
|
||||
.| |->| gdelta |->| |->| gdelta |->| gdelta |
|
||||
|| | || 0x23 | || | || 0xff | || 0xce |
|
||||
|| | || | || | || | || |
|
||||
|'--------' |'--------' |'--------' |'--------' |'--------'
|
||||
'--------' '----|---' '--------' '----|---' '----|---'
|
||||
v v v
|
||||
0x00 --> xor ------------------> xor ------> xor --> gstate = 0x12
|
||||
```
|
||||
|
||||
Note that storing globals this way is very expensive in terms of storage usage,
|
||||
so any global state should be kept very small.
|
||||
|
||||
The size and format of each piece of global state depends on the type, which
|
||||
is stored in the chunk field. Currently, the only global state is move state,
|
||||
which is outlined below.
|
||||
|
||||
---
|
||||
#### `0x7ff` LFS_TYPE_MOVESTATE
|
||||
|
||||
Provides delta bits for the global move state.
|
||||
|
||||
The move state in littlefs is used to store info about operations that could
|
||||
cause to filesystem to go out of sync if the power is lost. The operations
|
||||
where this could occur is moves of files between metadata pairs and any
|
||||
operation that changes metadata pairs on the threaded linked-list.
|
||||
|
||||
In the case of moves, the move state contains a tag + metadata pair describing
|
||||
the source of the ongoing move. If this tag is non-zero, that means that power
|
||||
was lost during a move, and the file exists in two different locations. If this
|
||||
happens, the source of the move should be considered deleted, and the move
|
||||
should be completed (the source should be deleted) before any other write
|
||||
operations to the filesystem.
|
||||
|
||||
In the case of operations to the threaded linked-list, a single "sync" bit is
|
||||
used to indicate that a modification is ongoing. If this sync flag is set, the
|
||||
threaded linked-list will need to be checked for errors before it can be used
|
||||
reliably. The exact cases to check for are described above in the tail tag.
|
||||
|
||||
Layout of the move state:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][1|- 11 -| 10 | 10 |--- 64 ---]
|
||||
^ ^ ^ ^ ^ ^ ^ ^- padding (0) ^- metadata pair
|
||||
| | | | | | '------ move id
|
||||
| | | | | '------------ move type
|
||||
| | | | '----------------- sync bit
|
||||
| | | |
|
||||
| | | '- size (12)
|
||||
| | '------ id (0x3ff)
|
||||
| '------------ type (0x7ff)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Move state fields:
|
||||
|
||||
1. **Sync bit (1-bit)** - Indicates if the metadata pair threaded linked-list
|
||||
is in-sync. If set, the threaded linked-list should be checked for errors.
|
||||
|
||||
2. **Move type (11-bits)** - Type of move being performed. Must be either
|
||||
`0x000`, indicating no move, or `0x4ff` indicating the source file should
|
||||
be deleted.
|
||||
|
||||
3. **Move id (10-bits)** - The file id being moved.
|
||||
|
||||
4. **Metadata pair (8-bytes)** - Pointer to the metadata-pair containing
|
||||
the move.
|
||||
|
||||
---
|
||||
#### `0x5xx` LFS_TYPE_CRC
|
||||
|
||||
Last but not least, the CRC tag marks the end of a commit and provides a
|
||||
checksum for any commits to the metadata block.
|
||||
|
||||
The first 32-bits of the data contain a CRC-32 with a polynomial of
|
||||
`0x04c11db7` initialized with `0xffffffff`. This CRC provides a checksum for
|
||||
all metadata since the previous CRC tag, including the CRC tag itself. For
|
||||
the first commit, this includes the revision count for the metadata block.
|
||||
|
||||
However, the size of the data is not limited to 32-bits. The data field may
|
||||
larger to pad the commit to the next program-aligned boundary.
|
||||
|
||||
In addition, the CRC tag's chunk field contains a set of flags which can
|
||||
change the behaviour of commits. Currently the only flag in use is the lowest
|
||||
bit, which determines the expected state of the valid bit for any following
|
||||
tags. This is used to guarantee that unwritten storage in a metadata block
|
||||
will be detected as invalid.
|
||||
|
||||
Layout of the CRC tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|--- variable length ---]
|
||||
[1| 3| 8 | 10 | 10 ][-- 32 --|--- (size * 8 - 32) ---]
|
||||
^ ^ ^ ^ ^ ^- crc ^- padding
|
||||
| | | | '- size
|
||||
| | | '------ id (0x3ff)
|
||||
| | '----------- valid state
|
||||
| '-------------- type1 (0x5)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
CRC fields:
|
||||
|
||||
1. **Valid state (1-bit)** - Indicates the expected value of the valid bit for
|
||||
any tags in the next commit.
|
||||
|
||||
2. **CRC (32-bits)** - CRC-32 with a polynomial of `0x04c11db7` initialized
|
||||
with `0xffffffff`.
|
||||
|
||||
3. **Padding** - Padding to the next program-aligned boundary. No guarantees
|
||||
are made about the contents.
|
||||
|
||||
---
|
219
components/fs/littlefs/littlefs/bd/lfs_filebd.c
Normal file
219
components/fs/littlefs/littlefs/bd/lfs_filebd.c
Normal file
|
@ -0,0 +1,219 @@
|
|||
/*
|
||||
* Block device emulated in a file
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "bd/lfs_filebd.h"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_filebd_config *bdcfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\", "
|
||||
"%p {.erase_value=%"PRId32"})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path, (void*)bdcfg, bdcfg->erase_value);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
|
||||
// open file
|
||||
#ifdef _WIN32
|
||||
bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666);
|
||||
#else
|
||||
bd->fd = open(path, O_RDWR | O_CREAT, 0666);
|
||||
#endif
|
||||
|
||||
if (bd->fd < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_create(const struct lfs_config *cfg, const char *path) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\")",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path);
|
||||
static const struct lfs_filebd_config defaults = {.erase_value=-1};
|
||||
int err = lfs_filebd_createcfg(cfg, path, &defaults);
|
||||
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_filebd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
int err = close(bd->fd);
|
||||
if (err < 0) {
|
||||
err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", err);
|
||||
return err;
|
||||
}
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(off % cfg->read_size == 0);
|
||||
LFS_ASSERT(size % cfg->read_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// zero for reproducibility (in case file is truncated)
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(buffer, bd->cfg->erase_value, size);
|
||||
}
|
||||
|
||||
// read
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t res2 = read(bd->fd, buffer, size);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(off % cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// check that data was erased? only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (lfs_off_t i = 0; i < size; i++) {
|
||||
uint8_t c;
|
||||
ssize_t res2 = read(bd->fd, &c, 1);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_ASSERT(c == bd->cfg->erase_value);
|
||||
}
|
||||
}
|
||||
|
||||
// program data
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t res2 = write(bd->fd, buffer, size);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// erase, only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
off_t res1 = lseek(bd->fd, (off_t)block*cfg->block_size, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i++) {
|
||||
ssize_t res2 = write(bd->fd, &(uint8_t){bd->cfg->erase_value}, 1);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_sync(const struct lfs_config *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
|
||||
// file sync
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
#ifdef _WIN32
|
||||
int err = FlushFileBuffers((HANDLE) _get_osfhandle(bd->fd)) ? 0 : -1;
|
||||
#else
|
||||
int err = fsync(bd->fd);
|
||||
#endif
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
74
components/fs/littlefs/littlefs/bd/lfs_filebd.h
Normal file
74
components/fs/littlefs/littlefs/bd/lfs_filebd.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Block device emulated in a file
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_FILEBD_H
|
||||
#define LFS_FILEBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Block device specific tracing
|
||||
#ifdef LFS_FILEBD_YES_TRACE
|
||||
#define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
|
||||
#else
|
||||
#define LFS_FILEBD_TRACE(...)
|
||||
#endif
|
||||
|
||||
// filebd config (optional)
|
||||
struct lfs_filebd_config {
|
||||
// 8-bit erase value to use for simulating erases. -1 does not simulate
|
||||
// erases, which can speed up testing by avoiding all the extra block-device
|
||||
// operations to store the erase value.
|
||||
int32_t erase_value;
|
||||
};
|
||||
|
||||
// filebd state
|
||||
typedef struct lfs_filebd {
|
||||
int fd;
|
||||
const struct lfs_filebd_config *cfg;
|
||||
} lfs_filebd_t;
|
||||
|
||||
|
||||
// Create a file block device using the geometry in lfs_config
|
||||
int lfs_filebd_create(const struct lfs_config *cfg, const char *path);
|
||||
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_filebd_config *bdcfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_filebd_destroy(const struct lfs_config *cfg);
|
||||
|
||||
// Read a block
|
||||
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_filebd_sync(const struct lfs_config *cfg);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
143
components/fs/littlefs/littlefs/bd/lfs_rambd.c
Normal file
143
components/fs/littlefs/littlefs/bd/lfs_rambd.c
Normal file
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Block device emulated in RAM
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "bd/lfs_rambd.h"
|
||||
|
||||
int lfs_rambd_createcfg(const struct lfs_config *cfg,
|
||||
const struct lfs_rambd_config *bdcfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_createcfg(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"%p {.erase_value=%"PRId32", .buffer=%p})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
(void*)bdcfg, bdcfg->erase_value, bdcfg->buffer);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
|
||||
// allocate buffer?
|
||||
if (bd->cfg->buffer) {
|
||||
bd->buffer = bd->cfg->buffer;
|
||||
} else {
|
||||
bd->buffer = lfs_malloc(cfg->block_size * cfg->block_count);
|
||||
if (!bd->buffer) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
// zero for reproducibility?
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(bd->buffer, bd->cfg->erase_value,
|
||||
cfg->block_size * cfg->block_count);
|
||||
} else {
|
||||
memset(bd->buffer, 0, cfg->block_size * cfg->block_count);
|
||||
}
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_create(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count);
|
||||
static const struct lfs_rambd_config defaults = {.erase_value=-1};
|
||||
int err = lfs_rambd_createcfg(cfg, &defaults);
|
||||
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_rambd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg);
|
||||
// clean up memory
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
if (!bd->cfg->buffer) {
|
||||
lfs_free(bd->buffer);
|
||||
}
|
||||
LFS_RAMBD_TRACE("lfs_rambd_destroy -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(off % cfg->read_size == 0);
|
||||
LFS_ASSERT(size % cfg->read_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// read data
|
||||
memcpy(buffer, &bd->buffer[block*cfg->block_size + off], size);
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(off % cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// check that data was erased? only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
for (lfs_off_t i = 0; i < size; i++) {
|
||||
LFS_ASSERT(bd->buffer[block*cfg->block_size + off + i] ==
|
||||
bd->cfg->erase_value);
|
||||
}
|
||||
}
|
||||
|
||||
// program data
|
||||
memcpy(&bd->buffer[block*cfg->block_size + off], buffer, size);
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// erase, only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(&bd->buffer[block*cfg->block_size],
|
||||
bd->cfg->erase_value, cfg->block_size);
|
||||
}
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_sync(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg);
|
||||
// sync does nothing because we aren't backed by anything real
|
||||
(void)cfg;
|
||||
LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
76
components/fs/littlefs/littlefs/bd/lfs_rambd.h
Normal file
76
components/fs/littlefs/littlefs/bd/lfs_rambd.h
Normal file
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Block device emulated in RAM
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_RAMBD_H
|
||||
#define LFS_RAMBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Block device specific tracing
|
||||
#ifdef LFS_RAMBD_YES_TRACE
|
||||
#define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
|
||||
#else
|
||||
#define LFS_RAMBD_TRACE(...)
|
||||
#endif
|
||||
|
||||
// rambd config (optional)
|
||||
struct lfs_rambd_config {
|
||||
// 8-bit erase value to simulate erasing with. -1 indicates no erase
|
||||
// occurs, which is still a valid block device
|
||||
int32_t erase_value;
|
||||
|
||||
// Optional statically allocated buffer for the block device.
|
||||
void *buffer;
|
||||
};
|
||||
|
||||
// rambd state
|
||||
typedef struct lfs_rambd {
|
||||
uint8_t *buffer;
|
||||
const struct lfs_rambd_config *cfg;
|
||||
} lfs_rambd_t;
|
||||
|
||||
|
||||
// Create a RAM block device using the geometry in lfs_config
|
||||
int lfs_rambd_create(const struct lfs_config *cfg);
|
||||
int lfs_rambd_createcfg(const struct lfs_config *cfg,
|
||||
const struct lfs_rambd_config *bdcfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_rambd_destroy(const struct lfs_config *cfg);
|
||||
|
||||
// Read a block
|
||||
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_rambd_sync(const struct lfs_config *cfg);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
303
components/fs/littlefs/littlefs/bd/lfs_testbd.c
Normal file
303
components/fs/littlefs/littlefs/bd/lfs_testbd.c
Normal file
|
@ -0,0 +1,303 @@
|
|||
/*
|
||||
* Testing block device, wraps filebd and rambd while providing a bunch
|
||||
* of hooks for testing littlefs in various conditions.
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "bd/lfs_testbd.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_testbd_config *bdcfg) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\", "
|
||||
"%p {.erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
|
||||
".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
|
||||
".buffer=%p, .wear_buffer=%p})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path, (void*)bdcfg, bdcfg->erase_value, bdcfg->erase_cycles,
|
||||
bdcfg->badblock_behavior, bdcfg->power_cycles,
|
||||
bdcfg->buffer, bdcfg->wear_buffer);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
|
||||
// setup testing things
|
||||
bd->persist = path;
|
||||
bd->power_cycles = bd->cfg->power_cycles;
|
||||
|
||||
if (bd->cfg->erase_cycles) {
|
||||
if (bd->cfg->wear_buffer) {
|
||||
bd->wear = bd->cfg->wear_buffer;
|
||||
} else {
|
||||
bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t)*cfg->block_count);
|
||||
if (!bd->wear) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
memset(bd->wear, 0, sizeof(lfs_testbd_wear_t) * cfg->block_count);
|
||||
}
|
||||
|
||||
// create underlying block device
|
||||
if (bd->persist) {
|
||||
bd->u.file.cfg = (struct lfs_filebd_config){
|
||||
.erase_value = bd->cfg->erase_value,
|
||||
};
|
||||
int err = lfs_filebd_createcfg(cfg, path, &bd->u.file.cfg);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
|
||||
return err;
|
||||
} else {
|
||||
bd->u.ram.cfg = (struct lfs_rambd_config){
|
||||
.erase_value = bd->cfg->erase_value,
|
||||
.buffer = bd->cfg->buffer,
|
||||
};
|
||||
int err = lfs_rambd_createcfg(cfg, &bd->u.ram.cfg);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
int lfs_testbd_create(const struct lfs_config *cfg, const char *path) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\")",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path);
|
||||
static const struct lfs_testbd_config defaults = {.erase_value=-1};
|
||||
int err = lfs_testbd_createcfg(cfg, path, &defaults);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_testbd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_destroy(%p)", (void*)cfg);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->cfg->erase_cycles && !bd->cfg->wear_buffer) {
|
||||
lfs_free(bd->wear);
|
||||
}
|
||||
|
||||
if (bd->persist) {
|
||||
int err = lfs_filebd_destroy(cfg);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
|
||||
return err;
|
||||
} else {
|
||||
int err = lfs_rambd_destroy(cfg);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal mapping to block devices ///
|
||||
static int lfs_testbd_rawread(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_read(cfg, block, off, buffer, size);
|
||||
} else {
|
||||
return lfs_rambd_read(cfg, block, off, buffer, size);
|
||||
}
|
||||
}
|
||||
|
||||
static int lfs_testbd_rawprog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_prog(cfg, block, off, buffer, size);
|
||||
} else {
|
||||
return lfs_rambd_prog(cfg, block, off, buffer, size);
|
||||
}
|
||||
}
|
||||
|
||||
static int lfs_testbd_rawerase(const struct lfs_config *cfg,
|
||||
lfs_block_t block) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_erase(cfg, block);
|
||||
} else {
|
||||
return lfs_rambd_erase(cfg, block);
|
||||
}
|
||||
}
|
||||
|
||||
static int lfs_testbd_rawsync(const struct lfs_config *cfg) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_sync(cfg);
|
||||
} else {
|
||||
return lfs_rambd_sync(cfg);
|
||||
}
|
||||
}
|
||||
|
||||
/// block device API ///
|
||||
int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(off % cfg->read_size == 0);
|
||||
LFS_ASSERT(size % cfg->read_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles &&
|
||||
bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_READERROR) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_read -> %d", LFS_ERR_CORRUPT);
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
// read
|
||||
int err = lfs_testbd_rawread(cfg, block, off, buffer, size);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_read -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(off % cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles) {
|
||||
if (bd->cfg->badblock_behavior ==
|
||||
LFS_TESTBD_BADBLOCK_PROGERROR) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", LFS_ERR_CORRUPT);
|
||||
return LFS_ERR_CORRUPT;
|
||||
} else if (bd->cfg->badblock_behavior ==
|
||||
LFS_TESTBD_BADBLOCK_PROGNOOP ||
|
||||
bd->cfg->badblock_behavior ==
|
||||
LFS_TESTBD_BADBLOCK_ERASENOOP) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// prog
|
||||
int err = lfs_testbd_rawprog(cfg, block, off, buffer, size);
|
||||
if (err) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
// lose power?
|
||||
if (bd->power_cycles > 0) {
|
||||
bd->power_cycles -= 1;
|
||||
if (bd->power_cycles == 0) {
|
||||
// sync to make sure we persist the last changes
|
||||
LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
|
||||
// simulate power loss
|
||||
exit(33);
|
||||
}
|
||||
}
|
||||
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles) {
|
||||
if (bd->wear[block] >= bd->cfg->erase_cycles) {
|
||||
if (bd->cfg->badblock_behavior ==
|
||||
LFS_TESTBD_BADBLOCK_ERASEERROR) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", LFS_ERR_CORRUPT);
|
||||
return LFS_ERR_CORRUPT;
|
||||
} else if (bd->cfg->badblock_behavior ==
|
||||
LFS_TESTBD_BADBLOCK_ERASENOOP) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
// mark wear
|
||||
bd->wear[block] += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// erase
|
||||
int err = lfs_testbd_rawerase(cfg, block);
|
||||
if (err) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
// lose power?
|
||||
if (bd->power_cycles > 0) {
|
||||
bd->power_cycles -= 1;
|
||||
if (bd->power_cycles == 0) {
|
||||
// sync to make sure we persist the last changes
|
||||
LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
|
||||
// simulate power loss
|
||||
exit(33);
|
||||
}
|
||||
}
|
||||
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_testbd_sync(const struct lfs_config *cfg) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_sync(%p)", (void*)cfg);
|
||||
int err = lfs_testbd_rawsync(cfg);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_sync -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
/// simulated wear operations ///
|
||||
lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
|
||||
lfs_block_t block) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)cfg, block);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
|
||||
// check if block is valid
|
||||
LFS_ASSERT(bd->cfg->erase_cycles);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
LFS_TESTBD_TRACE("lfs_testbd_getwear -> %"PRIu32, bd->wear[block]);
|
||||
return bd->wear[block];
|
||||
}
|
||||
|
||||
int lfs_testbd_setwear(const struct lfs_config *cfg,
|
||||
lfs_block_t block, lfs_testbd_wear_t wear) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)cfg, block);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
|
||||
// check if block is valid
|
||||
LFS_ASSERT(bd->cfg->erase_cycles);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
bd->wear[block] = wear;
|
||||
|
||||
LFS_TESTBD_TRACE("lfs_testbd_setwear -> %d", 0);
|
||||
return 0;
|
||||
}
|
142
components/fs/littlefs/littlefs/bd/lfs_testbd.h
Normal file
142
components/fs/littlefs/littlefs/bd/lfs_testbd.h
Normal file
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Testing block device, wraps filebd and rambd while providing a bunch
|
||||
* of hooks for testing littlefs in various conditions.
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_TESTBD_H
|
||||
#define LFS_TESTBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
#include "bd/lfs_rambd.h"
|
||||
#include "bd/lfs_filebd.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Block device specific tracing
|
||||
#ifdef LFS_TESTBD_YES_TRACE
|
||||
#define LFS_TESTBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
|
||||
#else
|
||||
#define LFS_TESTBD_TRACE(...)
|
||||
#endif
|
||||
|
||||
// Mode determining how "bad blocks" behave during testing. This simulates
|
||||
// some real-world circumstances such as progs not sticking (prog-noop),
|
||||
// a readonly disk (erase-noop), and ECC failures (read-error).
|
||||
//
|
||||
// Not that read-noop is not allowed. Read _must_ return a consistent (but
|
||||
// may be arbitrary) value on every read.
|
||||
enum lfs_testbd_badblock_behavior {
|
||||
LFS_TESTBD_BADBLOCK_PROGERROR,
|
||||
LFS_TESTBD_BADBLOCK_ERASEERROR,
|
||||
LFS_TESTBD_BADBLOCK_READERROR,
|
||||
LFS_TESTBD_BADBLOCK_PROGNOOP,
|
||||
LFS_TESTBD_BADBLOCK_ERASENOOP,
|
||||
};
|
||||
|
||||
// Type for measuring wear
|
||||
typedef uint32_t lfs_testbd_wear_t;
|
||||
typedef int32_t lfs_testbd_swear_t;
|
||||
|
||||
// testbd config, this is required for testing
|
||||
struct lfs_testbd_config {
|
||||
// 8-bit erase value to use for simulating erases. -1 does not simulate
|
||||
// erases, which can speed up testing by avoiding all the extra block-device
|
||||
// operations to store the erase value.
|
||||
int32_t erase_value;
|
||||
|
||||
// Number of erase cycles before a block becomes "bad". The exact behavior
|
||||
// of bad blocks is controlled by the badblock_mode.
|
||||
uint32_t erase_cycles;
|
||||
|
||||
// The mode determining how bad blocks fail
|
||||
uint8_t badblock_behavior;
|
||||
|
||||
// Number of write operations (erase/prog) before forcefully killing
|
||||
// the program with exit. Simulates power-loss. 0 disables.
|
||||
uint32_t power_cycles;
|
||||
|
||||
// Optional buffer for RAM block device.
|
||||
void *buffer;
|
||||
|
||||
// Optional buffer for wear
|
||||
void *wear_buffer;
|
||||
};
|
||||
|
||||
// testbd state
|
||||
typedef struct lfs_testbd {
|
||||
union {
|
||||
struct {
|
||||
lfs_filebd_t bd;
|
||||
struct lfs_filebd_config cfg;
|
||||
} file;
|
||||
struct {
|
||||
lfs_rambd_t bd;
|
||||
struct lfs_rambd_config cfg;
|
||||
} ram;
|
||||
} u;
|
||||
|
||||
bool persist;
|
||||
uint32_t power_cycles;
|
||||
lfs_testbd_wear_t *wear;
|
||||
|
||||
const struct lfs_testbd_config *cfg;
|
||||
} lfs_testbd_t;
|
||||
|
||||
|
||||
/// Block device API ///
|
||||
|
||||
// Create a test block device using the geometry in lfs_config
|
||||
//
|
||||
// Note that filebd is used if a path is provided, if path is NULL
|
||||
// testbd will use rambd which can be much faster.
|
||||
int lfs_testbd_create(const struct lfs_config *cfg, const char *path);
|
||||
int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_testbd_config *bdcfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_testbd_destroy(const struct lfs_config *cfg);
|
||||
|
||||
// Read a block
|
||||
int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_testbd_sync(const struct lfs_config *cfg);
|
||||
|
||||
|
||||
/// Additional extended API for driving test features ///
|
||||
|
||||
// Get simulated wear on a given block
|
||||
lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
|
||||
lfs_block_t block);
|
||||
|
||||
// Manually set simulated wear on a given block
|
||||
int lfs_testbd_setwear(const struct lfs_config *cfg,
|
||||
lfs_block_t block, lfs_testbd_wear_t wear);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
5817
components/fs/littlefs/littlefs/lfs.c
Normal file
5817
components/fs/littlefs/littlefs/lfs.c
Normal file
File diff suppressed because it is too large
Load diff
701
components/fs/littlefs/littlefs/lfs.h
Normal file
701
components/fs/littlefs/littlefs/lfs.h
Normal file
|
@ -0,0 +1,701 @@
|
|||
/*
|
||||
* The little filesystem
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_H
|
||||
#define LFS_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
/// Version info ///
|
||||
|
||||
// Software library version
|
||||
// Major (top-nibble), incremented on backwards incompatible changes
|
||||
// Minor (bottom-nibble), incremented on feature additions
|
||||
#define LFS_VERSION 0x00020005
|
||||
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
|
||||
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
|
||||
|
||||
// Version of On-disk data structures
|
||||
// Major (top-nibble), incremented on backwards incompatible changes
|
||||
// Minor (bottom-nibble), incremented on feature additions
|
||||
#define LFS_DISK_VERSION 0x00020000
|
||||
#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16))
|
||||
#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0))
|
||||
|
||||
|
||||
/// Definitions ///
|
||||
|
||||
// Type definitions
|
||||
typedef uint32_t lfs_size_t;
|
||||
typedef uint32_t lfs_off_t;
|
||||
|
||||
typedef int32_t lfs_ssize_t;
|
||||
typedef int32_t lfs_soff_t;
|
||||
|
||||
typedef uint32_t lfs_block_t;
|
||||
|
||||
// Maximum name size in bytes, may be redefined to reduce the size of the
|
||||
// info struct. Limited to <= 1022. Stored in superblock and must be
|
||||
// respected by other littlefs drivers.
|
||||
#ifndef LFS_NAME_MAX
|
||||
#define LFS_NAME_MAX 255
|
||||
#endif
|
||||
|
||||
// Maximum size of a file in bytes, may be redefined to limit to support other
|
||||
// drivers. Limited on disk to <= 4294967296. However, above 2147483647 the
|
||||
// functions lfs_file_seek, lfs_file_size, and lfs_file_tell will return
|
||||
// incorrect values due to using signed integers. Stored in superblock and
|
||||
// must be respected by other littlefs drivers.
|
||||
#ifndef LFS_FILE_MAX
|
||||
#define LFS_FILE_MAX 2147483647
|
||||
#endif
|
||||
|
||||
// Maximum size of custom attributes in bytes, may be redefined, but there is
|
||||
// no real benefit to using a smaller LFS_ATTR_MAX. Limited to <= 1022.
|
||||
#ifndef LFS_ATTR_MAX
|
||||
#define LFS_ATTR_MAX 1022
|
||||
#endif
|
||||
|
||||
// Possible error codes, these are negative to allow
|
||||
// valid positive return values
|
||||
enum lfs_error {
|
||||
LFS_ERR_OK = 0, // No error
|
||||
LFS_ERR_IO = -5, // Error during device operation
|
||||
LFS_ERR_CORRUPT = -84, // Corrupted
|
||||
LFS_ERR_NOENT = -2, // No directory entry
|
||||
LFS_ERR_EXIST = -17, // Entry already exists
|
||||
LFS_ERR_NOTDIR = -20, // Entry is not a dir
|
||||
LFS_ERR_ISDIR = -21, // Entry is a dir
|
||||
LFS_ERR_NOTEMPTY = -39, // Dir is not empty
|
||||
LFS_ERR_BADF = -9, // Bad file number
|
||||
LFS_ERR_FBIG = -27, // File too large
|
||||
LFS_ERR_INVAL = -22, // Invalid parameter
|
||||
LFS_ERR_NOSPC = -28, // No space left on device
|
||||
LFS_ERR_NOMEM = -12, // No more memory available
|
||||
LFS_ERR_NOATTR = -61, // No data/attr available
|
||||
LFS_ERR_NAMETOOLONG = -36, // File name too long
|
||||
};
|
||||
|
||||
// File types
|
||||
enum lfs_type {
|
||||
// file types
|
||||
LFS_TYPE_REG = 0x001,
|
||||
LFS_TYPE_DIR = 0x002,
|
||||
|
||||
// internally used types
|
||||
LFS_TYPE_SPLICE = 0x400,
|
||||
LFS_TYPE_NAME = 0x000,
|
||||
LFS_TYPE_STRUCT = 0x200,
|
||||
LFS_TYPE_USERATTR = 0x300,
|
||||
LFS_TYPE_FROM = 0x100,
|
||||
LFS_TYPE_TAIL = 0x600,
|
||||
LFS_TYPE_GLOBALS = 0x700,
|
||||
LFS_TYPE_CRC = 0x500,
|
||||
|
||||
// internally used type specializations
|
||||
LFS_TYPE_CREATE = 0x401,
|
||||
LFS_TYPE_DELETE = 0x4ff,
|
||||
LFS_TYPE_SUPERBLOCK = 0x0ff,
|
||||
LFS_TYPE_DIRSTRUCT = 0x200,
|
||||
LFS_TYPE_CTZSTRUCT = 0x202,
|
||||
LFS_TYPE_INLINESTRUCT = 0x201,
|
||||
LFS_TYPE_SOFTTAIL = 0x600,
|
||||
LFS_TYPE_HARDTAIL = 0x601,
|
||||
LFS_TYPE_MOVESTATE = 0x7ff,
|
||||
|
||||
// internal chip sources
|
||||
LFS_FROM_NOOP = 0x000,
|
||||
LFS_FROM_MOVE = 0x101,
|
||||
LFS_FROM_USERATTRS = 0x102,
|
||||
};
|
||||
|
||||
// File open flags
|
||||
enum lfs_open_flags {
|
||||
// open flags
|
||||
LFS_O_RDONLY = 1, // Open a file as read only
|
||||
#ifndef LFS_READONLY
|
||||
LFS_O_WRONLY = 2, // Open a file as write only
|
||||
LFS_O_RDWR = 3, // Open a file as read and write
|
||||
LFS_O_CREAT = 0x0100, // Create a file if it does not exist
|
||||
LFS_O_EXCL = 0x0200, // Fail if a file already exists
|
||||
LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
|
||||
LFS_O_APPEND = 0x0800, // Move to end of file on every write
|
||||
#endif
|
||||
|
||||
// internally used flags
|
||||
#ifndef LFS_READONLY
|
||||
LFS_F_DIRTY = 0x010000, // File does not match storage
|
||||
LFS_F_WRITING = 0x020000, // File has been written since last flush
|
||||
#endif
|
||||
LFS_F_READING = 0x040000, // File has been read since last flush
|
||||
#ifndef LFS_READONLY
|
||||
LFS_F_ERRED = 0x080000, // An error occurred during write
|
||||
#endif
|
||||
LFS_F_INLINE = 0x100000, // Currently inlined in directory entry
|
||||
};
|
||||
|
||||
// File seek flags
|
||||
enum lfs_whence_flags {
|
||||
LFS_SEEK_SET = 0, // Seek relative to an absolute position
|
||||
LFS_SEEK_CUR = 1, // Seek relative to the current file position
|
||||
LFS_SEEK_END = 2, // Seek relative to the end of the file
|
||||
};
|
||||
|
||||
|
||||
// Configuration provided during initialization of the littlefs
|
||||
struct lfs_config {
|
||||
// Opaque user provided context that can be used to pass
|
||||
// information to the block device operations
|
||||
void *context;
|
||||
|
||||
// Read a region in a block. Negative error codes are propagated
|
||||
// to the user.
|
||||
int (*read)(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a region in a block. The block must have previously
|
||||
// been erased. Negative error codes are propagated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*prog)(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block. A block must be erased before being programmed.
|
||||
// The state of an erased block is undefined. Negative error codes
|
||||
// are propagated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*erase)(const struct lfs_config *c, lfs_block_t block);
|
||||
|
||||
// Sync the state of the underlying block device. Negative error codes
|
||||
// are propagated to the user.
|
||||
int (*sync)(const struct lfs_config *c);
|
||||
|
||||
#ifdef LFS_THREADSAFE
|
||||
// Lock the underlying block device. Negative error codes
|
||||
// are propagated to the user.
|
||||
int (*lock)(const struct lfs_config *c);
|
||||
|
||||
// Unlock the underlying block device. Negative error codes
|
||||
// are propagated to the user.
|
||||
int (*unlock)(const struct lfs_config *c);
|
||||
#endif
|
||||
|
||||
// Minimum size of a block read in bytes. All read operations will be a
|
||||
// multiple of this value.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of a block program in bytes. All program operations will be
|
||||
// a multiple of this value.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erasable block in bytes. This does not impact ram consumption
|
||||
// and may be larger than the physical erase size. However, non-inlined
|
||||
// files take up at minimum one block. Must be a multiple of the read and
|
||||
// program sizes.
|
||||
lfs_size_t block_size;
|
||||
|
||||
// Number of erasable blocks on the device.
|
||||
lfs_size_t block_count;
|
||||
|
||||
// Number of erase cycles before littlefs evicts metadata logs and moves
|
||||
// the metadata to another block. Suggested values are in the
|
||||
// range 100-1000, with large values having better performance at the cost
|
||||
// of less consistent wear distribution.
|
||||
//
|
||||
// Set to -1 to disable block-level wear-leveling.
|
||||
int32_t block_cycles;
|
||||
|
||||
// Size of block caches in bytes. Each cache buffers a portion of a block in
|
||||
// RAM. The littlefs needs a read cache, a program cache, and one additional
|
||||
// cache per file. Larger caches can improve performance by storing more
|
||||
// data and reducing the number of disk accesses. Must be a multiple of the
|
||||
// read and program sizes, and a factor of the block size.
|
||||
lfs_size_t cache_size;
|
||||
|
||||
// Size of the lookahead buffer in bytes. A larger lookahead buffer
|
||||
// increases the number of blocks found during an allocation pass. The
|
||||
// lookahead buffer is stored as a compact bitmap, so each byte of RAM
|
||||
// can track 8 blocks. Must be a multiple of 8.
|
||||
lfs_size_t lookahead_size;
|
||||
|
||||
// Optional statically allocated read buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *read_buffer;
|
||||
|
||||
// Optional statically allocated program buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *prog_buffer;
|
||||
|
||||
// Optional statically allocated lookahead buffer. Must be lookahead_size
|
||||
// and aligned to a 32-bit boundary. By default lfs_malloc is used to
|
||||
// allocate this buffer.
|
||||
void *lookahead_buffer;
|
||||
|
||||
// Optional upper limit on length of file names in bytes. No downside for
|
||||
// larger names except the size of the info struct which is controlled by
|
||||
// the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX when zero. Stored in
|
||||
// superblock and must be respected by other littlefs drivers.
|
||||
lfs_size_t name_max;
|
||||
|
||||
// Optional upper limit on files in bytes. No downside for larger files
|
||||
// but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX when zero. Stored
|
||||
// in superblock and must be respected by other littlefs drivers.
|
||||
lfs_size_t file_max;
|
||||
|
||||
// Optional upper limit on custom attributes in bytes. No downside for
|
||||
// larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
|
||||
// LFS_ATTR_MAX when zero.
|
||||
lfs_size_t attr_max;
|
||||
|
||||
// Optional upper limit on total space given to metadata pairs in bytes. On
|
||||
// devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
|
||||
// can help bound the metadata compaction time. Must be <= block_size.
|
||||
// Defaults to block_size when zero.
|
||||
lfs_size_t metadata_max;
|
||||
};
|
||||
|
||||
// File info structure
|
||||
struct lfs_info {
|
||||
// Type of the file, either LFS_TYPE_REG or LFS_TYPE_DIR
|
||||
uint8_t type;
|
||||
|
||||
// Size of the file, only valid for REG files. Limited to 32-bits.
|
||||
lfs_size_t size;
|
||||
|
||||
// Name of the file stored as a null-terminated string. Limited to
|
||||
// LFS_NAME_MAX+1, which can be changed by redefining LFS_NAME_MAX to
|
||||
// reduce RAM. LFS_NAME_MAX is stored in superblock and must be
|
||||
// respected by other littlefs drivers.
|
||||
char name[LFS_NAME_MAX+1];
|
||||
};
|
||||
|
||||
// Custom attribute structure, used to describe custom attributes
|
||||
// committed atomically during file writes.
|
||||
struct lfs_attr {
|
||||
// 8-bit type of attribute, provided by user and used to
|
||||
// identify the attribute
|
||||
uint8_t type;
|
||||
|
||||
// Pointer to buffer containing the attribute
|
||||
void *buffer;
|
||||
|
||||
// Size of attribute in bytes, limited to LFS_ATTR_MAX
|
||||
lfs_size_t size;
|
||||
};
|
||||
|
||||
// Optional configuration provided during lfs_file_opencfg
|
||||
struct lfs_file_config {
|
||||
// Optional statically allocated file buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *buffer;
|
||||
|
||||
// Optional list of custom attributes related to the file. If the file
|
||||
// is opened with read access, these attributes will be read from disk
|
||||
// during the open call. If the file is opened with write access, the
|
||||
// attributes will be written to disk every file sync or close. This
|
||||
// write occurs atomically with update to the file's contents.
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller
|
||||
// than the buffer, it will be padded with zeros. If the stored attribute
|
||||
// is larger, then it will be silently truncated. If the attribute is not
|
||||
// found, it will be created implicitly.
|
||||
struct lfs_attr *attrs;
|
||||
|
||||
// Number of custom attributes in the list
|
||||
lfs_size_t attr_count;
|
||||
};
|
||||
|
||||
|
||||
/// internal littlefs data structures ///
|
||||
typedef struct lfs_cache {
|
||||
lfs_block_t block;
|
||||
lfs_off_t off;
|
||||
lfs_size_t size;
|
||||
uint8_t *buffer;
|
||||
} lfs_cache_t;
|
||||
|
||||
typedef struct lfs_mdir {
|
||||
lfs_block_t pair[2];
|
||||
uint32_t rev;
|
||||
lfs_off_t off;
|
||||
uint32_t etag;
|
||||
uint16_t count;
|
||||
bool erased;
|
||||
bool split;
|
||||
lfs_block_t tail[2];
|
||||
} lfs_mdir_t;
|
||||
|
||||
// littlefs directory type
|
||||
typedef struct lfs_dir {
|
||||
struct lfs_dir *next;
|
||||
uint16_t id;
|
||||
uint8_t type;
|
||||
lfs_mdir_t m;
|
||||
|
||||
lfs_off_t pos;
|
||||
lfs_block_t head[2];
|
||||
} lfs_dir_t;
|
||||
|
||||
// littlefs file type
|
||||
typedef struct lfs_file {
|
||||
struct lfs_file *next;
|
||||
uint16_t id;
|
||||
uint8_t type;
|
||||
lfs_mdir_t m;
|
||||
|
||||
struct lfs_ctz {
|
||||
lfs_block_t head;
|
||||
lfs_size_t size;
|
||||
} ctz;
|
||||
|
||||
uint32_t flags;
|
||||
lfs_off_t pos;
|
||||
lfs_block_t block;
|
||||
lfs_off_t off;
|
||||
lfs_cache_t cache;
|
||||
|
||||
const struct lfs_file_config *cfg;
|
||||
} lfs_file_t;
|
||||
|
||||
typedef struct lfs_superblock {
|
||||
uint32_t version;
|
||||
lfs_size_t block_size;
|
||||
lfs_size_t block_count;
|
||||
lfs_size_t name_max;
|
||||
lfs_size_t file_max;
|
||||
lfs_size_t attr_max;
|
||||
} lfs_superblock_t;
|
||||
|
||||
typedef struct lfs_gstate {
|
||||
uint32_t tag;
|
||||
lfs_block_t pair[2];
|
||||
} lfs_gstate_t;
|
||||
|
||||
// The littlefs filesystem type
|
||||
typedef struct lfs {
|
||||
lfs_cache_t rcache;
|
||||
lfs_cache_t pcache;
|
||||
|
||||
lfs_block_t root[2];
|
||||
struct lfs_mlist {
|
||||
struct lfs_mlist *next;
|
||||
uint16_t id;
|
||||
uint8_t type;
|
||||
lfs_mdir_t m;
|
||||
} *mlist;
|
||||
uint32_t seed;
|
||||
|
||||
lfs_gstate_t gstate;
|
||||
lfs_gstate_t gdisk;
|
||||
lfs_gstate_t gdelta;
|
||||
|
||||
struct lfs_free {
|
||||
lfs_block_t off;
|
||||
lfs_block_t size;
|
||||
lfs_block_t i;
|
||||
lfs_block_t ack;
|
||||
uint32_t *buffer;
|
||||
} free;
|
||||
|
||||
const struct lfs_config *cfg;
|
||||
lfs_size_t name_max;
|
||||
lfs_size_t file_max;
|
||||
lfs_size_t attr_max;
|
||||
|
||||
#ifdef LFS_MIGRATE
|
||||
struct lfs1 *lfs1;
|
||||
#endif
|
||||
} lfs_t;
|
||||
|
||||
|
||||
/// Filesystem functions ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Format a block device with the littlefs
|
||||
//
|
||||
// Requires a littlefs object and config struct. This clobbers the littlefs
|
||||
// object, and does not leave the filesystem mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_format(lfs_t *lfs, const struct lfs_config *config);
|
||||
#endif
|
||||
|
||||
// Mounts a littlefs
|
||||
//
|
||||
// Requires a littlefs object and config struct. Multiple filesystems
|
||||
// may be mounted simultaneously with multiple littlefs objects. Both
|
||||
// lfs and config must be allocated while mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_mount(lfs_t *lfs, const struct lfs_config *config);
|
||||
|
||||
// Unmounts a littlefs
|
||||
//
|
||||
// Does nothing besides releasing any allocated resources.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_unmount(lfs_t *lfs);
|
||||
|
||||
/// General operations ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Removes a file or directory
|
||||
//
|
||||
// If removing a directory, the directory must be empty.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_remove(lfs_t *lfs, const char *path);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Rename or move a file or directory
|
||||
//
|
||||
// If the destination exists, it must match the source in type.
|
||||
// If the destination is a directory, the directory must be empty.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath);
|
||||
#endif
|
||||
|
||||
// Find info about a file or directory
|
||||
//
|
||||
// Fills out the info structure, based on the specified file or directory.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info);
|
||||
|
||||
// Get a custom attribute
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller than
|
||||
// the buffer, it will be padded with zeros. If the stored attribute is larger,
|
||||
// then it will be silently truncated. If no attribute is found, the error
|
||||
// LFS_ERR_NOATTR is returned and the buffer is filled with zeros.
|
||||
//
|
||||
// Returns the size of the attribute, or a negative error code on failure.
|
||||
// Note, the returned size is the size of the attribute on disk, irrespective
|
||||
// of the size of the buffer. This can be used to dynamically allocate a buffer
|
||||
// or check for existence.
|
||||
lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
|
||||
uint8_t type, void *buffer, lfs_size_t size);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Set custom attributes
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. If an attribute is not found, it will be
|
||||
// implicitly created.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_setattr(lfs_t *lfs, const char *path,
|
||||
uint8_t type, const void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Removes a custom attribute
|
||||
//
|
||||
// If an attribute is not found, nothing happens.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
|
||||
#endif
|
||||
|
||||
|
||||
/// File operations ///
|
||||
|
||||
#ifndef LFS_NO_MALLOC
|
||||
// Open a file
|
||||
//
|
||||
// The mode that the file is opened in is determined by the flags, which
|
||||
// are values from the enum lfs_open_flags that are bitwise-ored together.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
|
||||
const char *path, int flags);
|
||||
|
||||
// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM
|
||||
// thus use lfs_file_opencfg() with config.buffer set.
|
||||
#endif
|
||||
|
||||
// Open a file with extra configuration
|
||||
//
|
||||
// The mode that the file is opened in is determined by the flags, which
|
||||
// are values from the enum lfs_open_flags that are bitwise-ored together.
|
||||
//
|
||||
// The config struct provides additional config options per file as described
|
||||
// above. The config struct must remain allocated while the file is open, and
|
||||
// the config struct must be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
|
||||
const char *path, int flags,
|
||||
const struct lfs_file_config *config);
|
||||
|
||||
// Close a file
|
||||
//
|
||||
// Any pending writes are written out to storage as though
|
||||
// sync had been called and releases any allocated resources.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_close(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Synchronize a file on storage
|
||||
//
|
||||
// Any pending writes are written out to storage.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_sync(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Read data from file
|
||||
//
|
||||
// Takes a buffer and size indicating where to store the read data.
|
||||
// Returns the number of bytes read, or a negative error code on failure.
|
||||
lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
|
||||
void *buffer, lfs_size_t size);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Write data to file
|
||||
//
|
||||
// Takes a buffer and size indicating the data to write. The file will not
|
||||
// actually be updated on the storage until either sync or close is called.
|
||||
//
|
||||
// Returns the number of bytes written, or a negative error code on failure.
|
||||
lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
|
||||
const void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
|
||||
// Change the position of the file
|
||||
//
|
||||
// The change in position is determined by the offset and whence flag.
|
||||
// Returns the new position of the file, or a negative error code on failure.
|
||||
lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
|
||||
lfs_soff_t off, int whence);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Truncates the size of the file to the specified size
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size);
|
||||
#endif
|
||||
|
||||
// Return the position of the file
|
||||
//
|
||||
// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_CUR)
|
||||
// Returns the position of the file, or a negative error code on failure.
|
||||
lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Change the position of the file to the beginning of the file
|
||||
//
|
||||
// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_SET)
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Return the size of the file
|
||||
//
|
||||
// Similar to lfs_file_seek(lfs, file, 0, LFS_SEEK_END)
|
||||
// Returns the size of the file, or a negative error code on failure.
|
||||
lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
|
||||
/// Directory operations ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Create a directory
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_mkdir(lfs_t *lfs, const char *path);
|
||||
#endif
|
||||
|
||||
// Open a directory
|
||||
//
|
||||
// Once open a directory can be used with read to iterate over files.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path);
|
||||
|
||||
// Close a directory
|
||||
//
|
||||
// Releases any allocated resources.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir);
|
||||
|
||||
// Read an entry in the directory
|
||||
//
|
||||
// Fills out the info structure, based on the specified file or directory.
|
||||
// Returns a positive value on success, 0 at the end of directory,
|
||||
// or a negative error code on failure.
|
||||
int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info);
|
||||
|
||||
// Change the position of the directory
|
||||
//
|
||||
// The new off must be a value previous returned from tell and specifies
|
||||
// an absolute offset in the directory seek.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off);
|
||||
|
||||
// Return the position of the directory
|
||||
//
|
||||
// The returned offset is only meant to be consumed by seek and may not make
|
||||
// sense, but does indicate the current position in the directory iteration.
|
||||
//
|
||||
// Returns the position of the directory, or a negative error code on failure.
|
||||
lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir);
|
||||
|
||||
// Change the position of the directory to the beginning of the directory
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir);
|
||||
|
||||
|
||||
/// Filesystem-level filesystem operations
|
||||
|
||||
// Finds the current size of the filesystem
|
||||
//
|
||||
// Note: Result is best effort. If files share COW structures, the returned
|
||||
// size may be larger than the filesystem actually is.
|
||||
//
|
||||
// Returns the number of allocated blocks, or a negative error code on failure.
|
||||
lfs_ssize_t lfs_fs_size(lfs_t *lfs);
|
||||
|
||||
// Traverse through all blocks in use by the filesystem
|
||||
//
|
||||
// The provided callback will be called with each block address that is
|
||||
// currently in use by the filesystem. This can be used to determine which
|
||||
// blocks are in use or how much of the storage is available.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
#ifdef LFS_MIGRATE
|
||||
// Attempts to migrate a previous version of littlefs
|
||||
//
|
||||
// Behaves similarly to the lfs_format function. Attempts to mount
|
||||
// the previous version of littlefs and update the filesystem so it can be
|
||||
// mounted with the current version of littlefs.
|
||||
//
|
||||
// Requires a littlefs object and config struct. This clobbers the littlefs
|
||||
// object, and does not leave the filesystem mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
34
components/fs/littlefs/littlefs/lfs_util.c
Normal file
34
components/fs/littlefs/littlefs/lfs_util.c
Normal file
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* lfs util functions
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "lfs_util.h"
|
||||
|
||||
// Only compile if user does not provide custom config
|
||||
#ifndef LFS_CONFIG
|
||||
|
||||
|
||||
// Software CRC implementation with small lookup table
|
||||
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
|
||||
static const uint32_t rtable[16] = {
|
||||
0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
|
||||
0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
|
||||
0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
|
||||
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c,
|
||||
};
|
||||
|
||||
const uint8_t *data = buffer;
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf];
|
||||
crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf];
|
||||
}
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
243
components/fs/littlefs/littlefs/lfs_util.h
Normal file
243
components/fs/littlefs/littlefs/lfs_util.h
Normal file
|
@ -0,0 +1,243 @@
|
|||
/*
|
||||
* lfs utility functions
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_UTIL_H
|
||||
#define LFS_UTIL_H
|
||||
|
||||
// Users can override lfs_util.h with their own configuration by defining
|
||||
// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h).
|
||||
//
|
||||
// If LFS_CONFIG is used, none of the default utils will be emitted and must be
|
||||
// provided by the config file. To start, I would suggest copying lfs_util.h
|
||||
// and modifying as needed.
|
||||
#ifdef LFS_CONFIG
|
||||
#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x)
|
||||
#define LFS_STRINGIZE2(x) #x
|
||||
#include LFS_STRINGIZE(LFS_CONFIG)
|
||||
#else
|
||||
|
||||
// System includes
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#ifndef LFS_NO_MALLOC
|
||||
#include <stdlib.h>
|
||||
#endif
|
||||
#ifndef LFS_NO_ASSERT
|
||||
#include <assert.h>
|
||||
#endif
|
||||
#if !defined(LFS_NO_DEBUG) || \
|
||||
!defined(LFS_NO_WARN) || \
|
||||
!defined(LFS_NO_ERROR) || \
|
||||
defined(LFS_YES_TRACE)
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Macros, may be replaced by system specific wrappers. Arguments to these
|
||||
// macros must not have side-effects as the macros can be removed for a smaller
|
||||
// code footprint
|
||||
|
||||
// Logging functions
|
||||
#ifndef LFS_TRACE
|
||||
#ifdef LFS_YES_TRACE
|
||||
#define LFS_TRACE_(fmt, ...) \
|
||||
printf("%s:%d:trace: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_TRACE(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_DEBUG
|
||||
#ifndef LFS_NO_DEBUG
|
||||
#define LFS_DEBUG_(fmt, ...) \
|
||||
printf("%s:%d:debug: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_DEBUG(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_WARN
|
||||
#ifndef LFS_NO_WARN
|
||||
#define LFS_WARN_(fmt, ...) \
|
||||
printf("%s:%d:warn: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_WARN(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_ERROR
|
||||
#ifndef LFS_NO_ERROR
|
||||
#define LFS_ERROR_(fmt, ...) \
|
||||
printf("%s:%d:error: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_ERROR(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Runtime assertions
|
||||
#ifndef LFS_ASSERT
|
||||
#ifndef LFS_NO_ASSERT
|
||||
#define LFS_ASSERT(test) assert(test)
|
||||
#else
|
||||
#define LFS_ASSERT(test)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// Builtin functions, these may be replaced by more efficient
|
||||
// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more
|
||||
// expensive basic C implementation for debugging purposes
|
||||
|
||||
// Min/max functions for unsigned 32-bit numbers
|
||||
static inline uint32_t lfs_max(uint32_t a, uint32_t b) {
|
||||
return (a > b) ? a : b;
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_min(uint32_t a, uint32_t b) {
|
||||
return (a < b) ? a : b;
|
||||
}
|
||||
|
||||
// Align to nearest multiple of a size
|
||||
static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) {
|
||||
return a - (a % alignment);
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) {
|
||||
return lfs_aligndown(a + alignment-1, alignment);
|
||||
}
|
||||
|
||||
// Find the smallest power of 2 greater than or equal to a
|
||||
static inline uint32_t lfs_npw2(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
|
||||
return 32 - __builtin_clz(a-1);
|
||||
#else
|
||||
uint32_t r = 0;
|
||||
uint32_t s;
|
||||
a -= 1;
|
||||
s = (a > 0xffff) << 4; a >>= s; r |= s;
|
||||
s = (a > 0xff ) << 3; a >>= s; r |= s;
|
||||
s = (a > 0xf ) << 2; a >>= s; r |= s;
|
||||
s = (a > 0x3 ) << 1; a >>= s; r |= s;
|
||||
return (r | (a >> 1)) + 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Count the number of trailing binary zeros in a
|
||||
// lfs_ctz(0) may be undefined
|
||||
static inline uint32_t lfs_ctz(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__)
|
||||
return __builtin_ctz(a);
|
||||
#else
|
||||
return lfs_npw2((a & -a) + 1) - 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Count the number of binary ones in a
|
||||
static inline uint32_t lfs_popc(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
|
||||
return __builtin_popcount(a);
|
||||
#else
|
||||
a = a - ((a >> 1) & 0x55555555);
|
||||
a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
|
||||
return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Find the sequence comparison of a and b, this is the distance
|
||||
// between a and b ignoring overflow
|
||||
static inline int lfs_scmp(uint32_t a, uint32_t b) {
|
||||
return (int)(unsigned)(a - b);
|
||||
}
|
||||
|
||||
// Convert between 32-bit little-endian and native order
|
||||
static inline uint32_t lfs_fromle32(uint32_t a) {
|
||||
#if (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
return a;
|
||||
#elif !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
|
||||
return __builtin_bswap32(a);
|
||||
#else
|
||||
return (((uint8_t*)&a)[0] << 0) |
|
||||
(((uint8_t*)&a)[1] << 8) |
|
||||
(((uint8_t*)&a)[2] << 16) |
|
||||
(((uint8_t*)&a)[3] << 24);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_tole32(uint32_t a) {
|
||||
return lfs_fromle32(a);
|
||||
}
|
||||
|
||||
// Convert between 32-bit big-endian and native order
|
||||
static inline uint32_t lfs_frombe32(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
|
||||
return __builtin_bswap32(a);
|
||||
#elif (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
|
||||
return a;
|
||||
#else
|
||||
return (((uint8_t*)&a)[0] << 24) |
|
||||
(((uint8_t*)&a)[1] << 16) |
|
||||
(((uint8_t*)&a)[2] << 8) |
|
||||
(((uint8_t*)&a)[3] << 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_tobe32(uint32_t a) {
|
||||
return lfs_frombe32(a);
|
||||
}
|
||||
|
||||
// Calculate CRC-32 with polynomial = 0x04c11db7
|
||||
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
|
||||
|
||||
// Allocate memory, only used if buffers are not provided to littlefs
|
||||
// Note, memory must be 64-bit aligned
|
||||
static inline void *lfs_malloc(size_t size) {
|
||||
#ifndef LFS_NO_MALLOC
|
||||
return malloc(size);
|
||||
#else
|
||||
(void)size;
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Deallocate memory, only used if buffers are not provided to littlefs
|
||||
static inline void lfs_free(void *p) {
|
||||
#ifndef LFS_NO_MALLOC
|
||||
free(p);
|
||||
#else
|
||||
(void)p;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif
|
284
components/fs/littlefs/littlefs/scripts/code.py
Executable file
284
components/fs/littlefs/littlefs/scripts/code.py
Executable file
|
@ -0,0 +1,284 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find code size at the function level. Basically just a bit wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('code_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['code_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find code size at the function level.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find code sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level code sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total code size.")
|
||||
parser.add_argument('--type', default='tTrRdD',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
help="Path to the nm tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
323
components/fs/littlefs/littlefs/scripts/coverage.py
Executable file
323
components/fs/littlefs/littlefs/scripts/coverage.py
Executable file
|
@ -0,0 +1,323 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Parse and report coverage info from .info files generated by lcov
|
||||
#
|
||||
import os
|
||||
import glob
|
||||
import csv
|
||||
import re
|
||||
import collections as co
|
||||
import bisect as b
|
||||
|
||||
|
||||
INFO_PATHS = ['tests/*.toml.info']
|
||||
|
||||
def collect(paths, **args):
|
||||
file = None
|
||||
funcs = []
|
||||
lines = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<file>SF:/?(?P<file_name>.*))$'
|
||||
'|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$'
|
||||
'|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$')
|
||||
for path in paths:
|
||||
with open(path) as f:
|
||||
for line in f:
|
||||
m = pattern.match(line)
|
||||
if m and m.group('file'):
|
||||
file = m.group('file_name')
|
||||
elif m and file and m.group('func'):
|
||||
funcs.append((file, int(m.group('func_lineno')),
|
||||
m.group('func_name')))
|
||||
elif m and file and m.group('line'):
|
||||
lines[(file, int(m.group('line_lineno')))] += (
|
||||
int(m.group('line_hits')))
|
||||
|
||||
# map line numbers to functions
|
||||
funcs.sort()
|
||||
def func_from_lineno(file, lineno):
|
||||
i = b.bisect(funcs, (file, lineno))
|
||||
if i and funcs[i-1][0] == file:
|
||||
return funcs[i-1][2]
|
||||
else:
|
||||
return None
|
||||
|
||||
# reduce to function info
|
||||
reduced_funcs = co.defaultdict(lambda: (0, 0))
|
||||
for (file, line_lineno), line_hits in lines.items():
|
||||
func = func_from_lineno(file, line_lineno)
|
||||
if not func:
|
||||
continue
|
||||
hits, count = reduced_funcs[(file, func)]
|
||||
reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
|
||||
|
||||
results = []
|
||||
for (file, func), (hits, count) in reduced_funcs.items():
|
||||
# discard internal/testing functions (test_* injected with
|
||||
# internal testing)
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__') or func.startswith('test_'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
results.append((file, func, hits, count))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find coverage
|
||||
if not args.get('use'):
|
||||
# find *.info files
|
||||
paths = []
|
||||
for path in args['info_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.gcov'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .info files found in %r?' % args['info_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
|
||||
total_hits, total_count = 0, 0
|
||||
for _, _, hits, count in results:
|
||||
total_hits += hits
|
||||
total_count += count
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_hits, prev_total_count = 0, 0
|
||||
for _, _, hits, count in prev_results:
|
||||
prev_total_hits += hits
|
||||
prev_total_count += count
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('coverage_hits', None)
|
||||
result.pop('coverage_count', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, hits, count in results:
|
||||
merged_results[(file, func)]['coverage_hits'] = hits
|
||||
merged_results[(file, func)]['coverage_count'] = count
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0))
|
||||
for file, func, hits, count in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_hits, entry_count = entries[entry]
|
||||
entries[entry] = (entry_hits + hits, entry_count + count)
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
|
||||
for name, (new_hits, new_count) in news.items():
|
||||
diff[name] = (
|
||||
0, 0,
|
||||
new_hits, new_count,
|
||||
new_hits, new_count,
|
||||
(new_hits/new_count if new_count else 1.0) - 1.0)
|
||||
for name, (old_hits, old_count) in olds.items():
|
||||
_, _, new_hits, new_count, _, _, _ = diff[name]
|
||||
diff[name] = (
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
new_hits-old_hits, new_count-old_count,
|
||||
((new_hits/new_count if new_count else 1.0)
|
||||
- (old_hits/old_count if old_count else 1.0)))
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %19s' % (by, 'hits/line'))
|
||||
else:
|
||||
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, hits, count):
|
||||
print("%-36s %11s %7s" % (name,
|
||||
'%d/%d' % (hits, count)
|
||||
if count else '-',
|
||||
'%.1f%%' % (100*hits/count)
|
||||
if count else '-'))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio):
|
||||
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
|
||||
'%d/%d' % (old_hits, old_count)
|
||||
if old_count else '-',
|
||||
'%.1f%%' % (100*old_hits/old_count)
|
||||
if old_count else '-',
|
||||
'%d/%d' % (new_hits, new_count)
|
||||
if new_count else '-',
|
||||
'%.1f%%' % (100*new_hits/new_count)
|
||||
if new_count else '-',
|
||||
'%+d/%+d' % (diff_hits, diff_count),
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, (hits, count) in sorted_entries(entries.items()):
|
||||
print_entry(name, hits, count)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
|
||||
for name, (
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_hits, total_count)
|
||||
else:
|
||||
ratio = ((total_hits/total_count
|
||||
if total_count else 1.0)
|
||||
- (prev_total_hits/prev_total_count
|
||||
if prev_total_count else 1.0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_hits, prev_total_count,
|
||||
total_hits, total_count,
|
||||
total_hits-prev_total_hits, total_count-prev_total_count,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Parse and report coverage info from .info files \
|
||||
generated by lcov")
|
||||
parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
|
||||
help="Description of where to find *.info files. May be a directory \
|
||||
or list of paths. *.info files will be merged to show the total \
|
||||
coverage. Defaults to %r." % INFO_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't do any work, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--coverage-sort', action='store_true',
|
||||
help="Sort by coverage.")
|
||||
parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
|
||||
help="Sort by coverage, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level coverage.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total coverage.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
283
components/fs/littlefs/littlefs/scripts/data.py
Executable file
283
components/fs/littlefs/littlefs/scripts/data.py
Executable file
|
@ -0,0 +1,283 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find data size at the function level. Basically just a bit wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('data_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['data_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find data size at the function level.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find data sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff data size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level data sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total data size.")
|
||||
parser.add_argument('--type', default='dDbB',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
help="Path to the nm tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
383
components/fs/littlefs/littlefs/scripts/explode_asserts.py
Executable file
383
components/fs/littlefs/littlefs/scripts/explode_asserts.py
Executable file
|
@ -0,0 +1,383 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
PATTERN = ['LFS_ASSERT', 'assert']
|
||||
PREFIX = 'LFS'
|
||||
MAXWIDTH = 16
|
||||
|
||||
ASSERT = "__{PREFIX}_ASSERT_{TYPE}_{COMP}"
|
||||
FAIL = """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_fail_{type}(
|
||||
const char *file, int line, const char *comp,
|
||||
{ctype} lh, size_t lsize,
|
||||
{ctype} rh, size_t rsize) {{
|
||||
printf("%s:%d:assert: assert failed with ", file, line);
|
||||
__{prefix}_assert_print_{type}(lh, lsize);
|
||||
printf(", expected %s ", comp);
|
||||
__{prefix}_assert_print_{type}(rh, rsize);
|
||||
printf("\\n");
|
||||
fflush(NULL);
|
||||
raise(SIGABRT);
|
||||
}}
|
||||
"""
|
||||
|
||||
COMP = {
|
||||
'==': 'eq',
|
||||
'!=': 'ne',
|
||||
'<=': 'le',
|
||||
'>=': 'ge',
|
||||
'<': 'lt',
|
||||
'>': 'gt',
|
||||
}
|
||||
|
||||
TYPE = {
|
||||
'int': {
|
||||
'ctype': 'intmax_t',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
(void)size;
|
||||
printf("%"PRIiMAX, v);
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
|
||||
do {{
|
||||
__typeof__(lh) _lh = lh;
|
||||
__typeof__(lh) _rh = (__typeof__(lh))rh;
|
||||
if (!(_lh {op} _rh)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
(intmax_t)_lh, 0, (intmax_t)_rh, 0);
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
},
|
||||
'bool': {
|
||||
'ctype': 'bool',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
(void)size;
|
||||
printf("%s", v ? "true" : "false");
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
|
||||
do {{
|
||||
bool _lh = !!(lh);
|
||||
bool _rh = !!(rh);
|
||||
if (!(_lh {op} _rh)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
_lh, 0, _rh, 0);
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
},
|
||||
'mem': {
|
||||
'ctype': 'const void *',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
const uint8_t *s = v;
|
||||
printf("\\\"");
|
||||
for (size_t i = 0; i < size && i < {maxwidth}; i++) {{
|
||||
if (s[i] >= ' ' && s[i] <= '~') {{
|
||||
printf("%c", s[i]);
|
||||
}} else {{
|
||||
printf("\\\\x%02x", s[i]);
|
||||
}}
|
||||
}}
|
||||
if (size > {maxwidth}) {{
|
||||
printf("...");
|
||||
}}
|
||||
printf("\\\"");
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh, size)
|
||||
do {{
|
||||
const void *_lh = lh;
|
||||
const void *_rh = rh;
|
||||
if (!(memcmp(_lh, _rh, size) {op} 0)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
_lh, size, _rh, size);
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
},
|
||||
'str': {
|
||||
'ctype': 'const char *',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
__{prefix}_assert_print_mem(v, size);
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
|
||||
do {{
|
||||
const char *_lh = lh;
|
||||
const char *_rh = rh;
|
||||
if (!(strcmp(_lh, _rh) {op} 0)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
_lh, strlen(_lh), _rh, strlen(_rh));
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
def mkdecls(outf, maxwidth=16):
|
||||
outf.write("#include <stdio.h>\n")
|
||||
outf.write("#include <stdbool.h>\n")
|
||||
outf.write("#include <stdint.h>\n")
|
||||
outf.write("#include <inttypes.h>\n")
|
||||
outf.write("#include <signal.h>\n")
|
||||
|
||||
for type, desc in sorted(TYPE.items()):
|
||||
format = {
|
||||
'type': type.lower(), 'TYPE': type.upper(),
|
||||
'ctype': desc['ctype'],
|
||||
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
|
||||
'maxwidth': maxwidth,
|
||||
}
|
||||
outf.write(re.sub('\s+', ' ',
|
||||
desc['print'].strip().format(**format))+'\n')
|
||||
outf.write(re.sub('\s+', ' ',
|
||||
desc['fail'].strip().format(**format))+'\n')
|
||||
|
||||
for op, comp in sorted(COMP.items()):
|
||||
format.update({
|
||||
'comp': comp.lower(), 'COMP': comp.upper(),
|
||||
'op': op,
|
||||
})
|
||||
outf.write(re.sub('\s+', ' ',
|
||||
desc['assert'].strip().format(**format))+'\n')
|
||||
|
||||
def mkassert(type, comp, lh, rh, size=None):
|
||||
format = {
|
||||
'type': type.lower(), 'TYPE': type.upper(),
|
||||
'comp': comp.lower(), 'COMP': comp.upper(),
|
||||
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
|
||||
'lh': lh.strip(' '),
|
||||
'rh': rh.strip(' '),
|
||||
'size': size,
|
||||
}
|
||||
if size:
|
||||
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh}, {size})')
|
||||
.format(**format))
|
||||
else:
|
||||
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh})')
|
||||
.format(**format))
|
||||
|
||||
|
||||
# simple recursive descent parser
|
||||
LEX = {
|
||||
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
|
||||
'assert': PATTERN,
|
||||
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
|
||||
'arrow': ['=>'],
|
||||
'paren': ['\(', '\)'],
|
||||
'op': ['strcmp', 'memcmp', '->'],
|
||||
'comp': ['==', '!=', '<=', '>=', '<', '>'],
|
||||
'logic': ['\&\&', '\|\|'],
|
||||
'sep': [':', ';', '\{', '\}', ','],
|
||||
}
|
||||
|
||||
class ParseFailure(Exception):
|
||||
def __init__(self, expected, found):
|
||||
self.expected = expected
|
||||
self.found = found
|
||||
|
||||
def __str__(self):
|
||||
return "expected %r, found %s..." % (
|
||||
self.expected, repr(self.found)[:70])
|
||||
|
||||
class Parse:
|
||||
def __init__(self, inf, lexemes):
|
||||
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
|
||||
for n, l in lexemes.items())
|
||||
p = re.compile(p, re.DOTALL)
|
||||
data = inf.read()
|
||||
tokens = []
|
||||
while True:
|
||||
m = p.search(data)
|
||||
if m:
|
||||
if m.start() > 0:
|
||||
tokens.append((None, data[:m.start()]))
|
||||
tokens.append((m.lastgroup, m.group()))
|
||||
data = data[m.end():]
|
||||
else:
|
||||
tokens.append((None, data))
|
||||
break
|
||||
self.tokens = tokens
|
||||
self.off = 0
|
||||
|
||||
def lookahead(self, *pattern):
|
||||
if self.off < len(self.tokens):
|
||||
token = self.tokens[self.off]
|
||||
if token[0] in pattern or token[1] in pattern:
|
||||
self.m = token[1]
|
||||
return self.m
|
||||
self.m = None
|
||||
return self.m
|
||||
|
||||
def accept(self, *patterns):
|
||||
m = self.lookahead(*patterns)
|
||||
if m is not None:
|
||||
self.off += 1
|
||||
return m
|
||||
|
||||
def expect(self, *patterns):
|
||||
m = self.accept(*patterns)
|
||||
if not m:
|
||||
raise ParseFailure(patterns, self.tokens[self.off:])
|
||||
return m
|
||||
|
||||
def push(self):
|
||||
return self.off
|
||||
|
||||
def pop(self, state):
|
||||
self.off = state
|
||||
|
||||
def passert(p):
|
||||
def pastr(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
comp = p.expect('comp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('str', COMP[comp], lh, rh)
|
||||
|
||||
def pamem(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
p.expect('memcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
size = pexpr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
comp = p.expect('comp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('mem', COMP[comp], lh, rh, size)
|
||||
|
||||
def paint(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexpr(p) ; p.accept('ws')
|
||||
comp = p.expect('comp') ; p.accept('ws')
|
||||
rh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('int', COMP[comp], lh, rh)
|
||||
|
||||
def pabool(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexprs(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('bool', 'eq', lh, 'true')
|
||||
|
||||
def pa(p):
|
||||
return p.expect('assert')
|
||||
|
||||
state = p.push()
|
||||
lastf = None
|
||||
for pa in [pastr, pamem, paint, pabool, pa]:
|
||||
try:
|
||||
return pa(p)
|
||||
except ParseFailure as f:
|
||||
p.pop(state)
|
||||
lastf = f
|
||||
else:
|
||||
raise lastf
|
||||
|
||||
def pexpr(p):
|
||||
res = []
|
||||
while True:
|
||||
if p.accept('('):
|
||||
res.append(p.m)
|
||||
while True:
|
||||
res.append(pexprs(p))
|
||||
if p.accept('sep'):
|
||||
res.append(p.m)
|
||||
else:
|
||||
break
|
||||
res.append(p.expect(')'))
|
||||
elif p.lookahead('assert'):
|
||||
res.append(passert(p))
|
||||
elif p.accept('assert', 'ws', 'string', 'op', None):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def pexprs(p):
|
||||
res = []
|
||||
while True:
|
||||
res.append(pexpr(p))
|
||||
if p.accept('comp', 'logic', ','):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def pstmt(p):
|
||||
ws = p.accept('ws') or ''
|
||||
lh = pexprs(p)
|
||||
if p.accept('=>'):
|
||||
rh = pexprs(p)
|
||||
return ws + mkassert('int', 'eq', lh, rh)
|
||||
else:
|
||||
return ws + lh
|
||||
|
||||
|
||||
def main(args):
|
||||
inf = open(args.input, 'r') if args.input else sys.stdin
|
||||
outf = open(args.output, 'w') if args.output else sys.stdout
|
||||
|
||||
lexemes = LEX.copy()
|
||||
if args.pattern:
|
||||
lexemes['assert'] = args.pattern
|
||||
p = Parse(inf, lexemes)
|
||||
|
||||
# write extra verbose asserts
|
||||
mkdecls(outf, maxwidth=args.maxwidth)
|
||||
if args.input:
|
||||
outf.write("#line %d \"%s\"\n" % (1, args.input))
|
||||
|
||||
# parse and write out stmt at a time
|
||||
try:
|
||||
while True:
|
||||
outf.write(pstmt(p))
|
||||
if p.accept('sep'):
|
||||
outf.write(p.m)
|
||||
else:
|
||||
break
|
||||
except ParseFailure as f:
|
||||
pass
|
||||
|
||||
for i in range(p.off, len(p.tokens)):
|
||||
outf.write(p.tokens[i][1])
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cpp step that increases assert verbosity")
|
||||
parser.add_argument('input', nargs='?',
|
||||
help="Input C file after cpp.")
|
||||
parser.add_argument('-o', '--output', required=True,
|
||||
help="Output C file.")
|
||||
parser.add_argument('-p', '--pattern', action='append',
|
||||
help="Patterns to search for starting an assert statement.")
|
||||
parser.add_argument('--maxwidth', default=MAXWIDTH, type=int,
|
||||
help="Maximum number of characters to display for strcmp and memcmp.")
|
||||
main(parser.parse_args())
|
61
components/fs/littlefs/littlefs/scripts/prefix.py
Executable file
61
components/fs/littlefs/littlefs/scripts/prefix.py
Executable file
|
@ -0,0 +1,61 @@
|
|||
#!/usr/bin/env python2
|
||||
|
||||
# This script replaces prefixes of files, and symbols in that file.
|
||||
# Useful for creating different versions of the codebase that don't
|
||||
# conflict at compile time.
|
||||
#
|
||||
# example:
|
||||
# $ ./scripts/prefix.py lfs2
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import glob
|
||||
import itertools
|
||||
import tempfile
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
DEFAULT_PREFIX = "lfs"
|
||||
|
||||
def subn(from_prefix, to_prefix, name):
|
||||
name, count1 = re.subn('\\b'+from_prefix, to_prefix, name)
|
||||
name, count2 = re.subn('\\b'+from_prefix.upper(), to_prefix.upper(), name)
|
||||
name, count3 = re.subn('\\B-D'+from_prefix.upper(),
|
||||
'-D'+to_prefix.upper(), name)
|
||||
return name, count1+count2+count3
|
||||
|
||||
def main(from_prefix, to_prefix=None, files=None):
|
||||
if not to_prefix:
|
||||
from_prefix, to_prefix = DEFAULT_PREFIX, from_prefix
|
||||
|
||||
if not files:
|
||||
files = subprocess.check_output([
|
||||
'git', 'ls-tree', '-r', '--name-only', 'HEAD']).split()
|
||||
|
||||
for oldname in files:
|
||||
# Rename any matching file names
|
||||
newname, namecount = subn(from_prefix, to_prefix, oldname)
|
||||
if namecount:
|
||||
subprocess.check_call(['git', 'mv', oldname, newname])
|
||||
|
||||
# Rename any prefixes in file
|
||||
count = 0
|
||||
with open(newname+'~', 'w') as tempf:
|
||||
with open(newname) as newf:
|
||||
for line in newf:
|
||||
line, n = subn(from_prefix, to_prefix, line)
|
||||
count += n
|
||||
tempf.write(line)
|
||||
shutil.copystat(newname, newname+'~')
|
||||
os.rename(newname+'~', newname)
|
||||
subprocess.check_call(['git', 'add', newname])
|
||||
|
||||
# Summary
|
||||
print '%s: %d replacements' % (
|
||||
'%s -> %s' % (oldname, newname) if namecount else oldname,
|
||||
count)
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
sys.exit(main(*sys.argv[1:]))
|
26
components/fs/littlefs/littlefs/scripts/readblock.py
Executable file
26
components/fs/littlefs/littlefs/scripts/readblock.py
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess as sp
|
||||
|
||||
def main(args):
|
||||
with open(args.disk, 'rb') as f:
|
||||
f.seek(args.block * args.block_size)
|
||||
block = (f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# what did you expect?
|
||||
print("%-8s %-s" % ('off', 'data'))
|
||||
return sp.run(['xxd', '-g1', '-'], input=block).returncode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hex dump a specific block in a disk.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block', type=lambda x: int(x, 0),
|
||||
help="Address of block to dump.")
|
||||
sys.exit(main(parser.parse_args()))
|
367
components/fs/littlefs/littlefs/scripts/readmdir.py
Executable file
367
components/fs/littlefs/littlefs/scripts/readmdir.py
Executable file
|
@ -0,0 +1,367 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import binascii
|
||||
import sys
|
||||
import itertools as it
|
||||
|
||||
TAG_TYPES = {
|
||||
'splice': (0x700, 0x400),
|
||||
'create': (0x7ff, 0x401),
|
||||
'delete': (0x7ff, 0x4ff),
|
||||
'name': (0x700, 0x000),
|
||||
'reg': (0x7ff, 0x001),
|
||||
'dir': (0x7ff, 0x002),
|
||||
'superblock': (0x7ff, 0x0ff),
|
||||
'struct': (0x700, 0x200),
|
||||
'dirstruct': (0x7ff, 0x200),
|
||||
'ctzstruct': (0x7ff, 0x202),
|
||||
'inlinestruct': (0x7ff, 0x201),
|
||||
'userattr': (0x700, 0x300),
|
||||
'tail': (0x700, 0x600),
|
||||
'softtail': (0x7ff, 0x600),
|
||||
'hardtail': (0x7ff, 0x601),
|
||||
'gstate': (0x700, 0x700),
|
||||
'movestate': (0x7ff, 0x7ff),
|
||||
'crc': (0x700, 0x500),
|
||||
}
|
||||
|
||||
class Tag:
|
||||
def __init__(self, *args):
|
||||
if len(args) == 1:
|
||||
self.tag = args[0]
|
||||
elif len(args) == 3:
|
||||
if isinstance(args[0], str):
|
||||
type = TAG_TYPES[args[0]][1]
|
||||
else:
|
||||
type = args[0]
|
||||
|
||||
if isinstance(args[1], str):
|
||||
id = int(args[1], 0) if args[1] not in 'x.' else 0x3ff
|
||||
else:
|
||||
id = args[1]
|
||||
|
||||
if isinstance(args[2], str):
|
||||
size = int(args[2], str) if args[2] not in 'x.' else 0x3ff
|
||||
else:
|
||||
size = args[2]
|
||||
|
||||
self.tag = (type << 20) | (id << 10) | size
|
||||
else:
|
||||
assert False
|
||||
|
||||
@property
|
||||
def isvalid(self):
|
||||
return not bool(self.tag & 0x80000000)
|
||||
|
||||
@property
|
||||
def isattr(self):
|
||||
return not bool(self.tag & 0x40000000)
|
||||
|
||||
@property
|
||||
def iscompactable(self):
|
||||
return bool(self.tag & 0x20000000)
|
||||
|
||||
@property
|
||||
def isunique(self):
|
||||
return not bool(self.tag & 0x10000000)
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def type1(self):
|
||||
return (self.tag & 0x70000000) >> 20
|
||||
|
||||
@property
|
||||
def type3(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return (self.tag & 0x000ffc00) >> 10
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return (self.tag & 0x000003ff) >> 0
|
||||
|
||||
@property
|
||||
def dsize(self):
|
||||
return 4 + (self.size if self.size != 0x3ff else 0)
|
||||
|
||||
@property
|
||||
def chunk(self):
|
||||
return self.type & 0xff
|
||||
|
||||
@property
|
||||
def schunk(self):
|
||||
return struct.unpack('b', struct.pack('B', self.chunk))[0]
|
||||
|
||||
def is_(self, type):
|
||||
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
|
||||
|
||||
def mkmask(self):
|
||||
return Tag(
|
||||
0x700 if self.isunique else 0x7ff,
|
||||
0x3ff if self.isattr else 0,
|
||||
0)
|
||||
|
||||
def chid(self, nid):
|
||||
ntag = Tag(self.type, nid, self.size)
|
||||
if hasattr(self, 'off'): ntag.off = self.off
|
||||
if hasattr(self, 'data'): ntag.data = self.data
|
||||
if hasattr(self, 'crc'): ntag.crc = self.crc
|
||||
return ntag
|
||||
|
||||
def typerepr(self):
|
||||
if self.is_('crc') and getattr(self, 'crc', 0xffffffff) != 0xffffffff:
|
||||
return 'crc (bad)'
|
||||
|
||||
reverse_types = {v: k for k, v in TAG_TYPES.items()}
|
||||
for prefix in range(12):
|
||||
mask = 0x7ff & ~((1 << prefix)-1)
|
||||
if (mask, self.type & mask) in reverse_types:
|
||||
type = reverse_types[mask, self.type & mask]
|
||||
if prefix > 0:
|
||||
return '%s %#0*x' % (
|
||||
type, prefix//4, self.type & ((1 << prefix)-1))
|
||||
else:
|
||||
return type
|
||||
else:
|
||||
return '%02x' % self.type
|
||||
|
||||
def idrepr(self):
|
||||
return repr(self.id) if self.id != 0x3ff else '.'
|
||||
|
||||
def sizerepr(self):
|
||||
return repr(self.size) if self.size != 0x3ff else 'x'
|
||||
|
||||
def __repr__(self):
|
||||
return 'Tag(%r, %d, %d)' % (self.typerepr(), self.id, self.size)
|
||||
|
||||
def __lt__(self, other):
|
||||
return (self.id, self.type) < (other.id, other.type)
|
||||
|
||||
def __bool__(self):
|
||||
return self.isvalid
|
||||
|
||||
def __int__(self):
|
||||
return self.tag
|
||||
|
||||
def __index__(self):
|
||||
return self.tag
|
||||
|
||||
class MetadataPair:
|
||||
def __init__(self, blocks):
|
||||
if len(blocks) > 1:
|
||||
self.pair = [MetadataPair([block]) for block in blocks]
|
||||
self.pair = sorted(self.pair, reverse=True)
|
||||
|
||||
self.data = self.pair[0].data
|
||||
self.rev = self.pair[0].rev
|
||||
self.tags = self.pair[0].tags
|
||||
self.ids = self.pair[0].ids
|
||||
self.log = self.pair[0].log
|
||||
self.all_ = self.pair[0].all_
|
||||
return
|
||||
|
||||
self.pair = [self]
|
||||
self.data = blocks[0]
|
||||
block = self.data
|
||||
|
||||
self.rev, = struct.unpack('<I', block[0:4])
|
||||
crc = binascii.crc32(block[0:4])
|
||||
|
||||
# parse tags
|
||||
corrupt = False
|
||||
tag = Tag(0xffffffff)
|
||||
off = 4
|
||||
self.log = []
|
||||
self.all_ = []
|
||||
while len(block) - off >= 4:
|
||||
ntag, = struct.unpack('>I', block[off:off+4])
|
||||
|
||||
tag = Tag(int(tag) ^ ntag)
|
||||
tag.off = off + 4
|
||||
tag.data = block[off+4:off+tag.dsize]
|
||||
if tag.is_('crc'):
|
||||
crc = binascii.crc32(block[off:off+4+4], crc)
|
||||
else:
|
||||
crc = binascii.crc32(block[off:off+tag.dsize], crc)
|
||||
tag.crc = crc
|
||||
off += tag.dsize
|
||||
|
||||
self.all_.append(tag)
|
||||
|
||||
if tag.is_('crc'):
|
||||
# is valid commit?
|
||||
if crc != 0xffffffff:
|
||||
corrupt = True
|
||||
if not corrupt:
|
||||
self.log = self.all_.copy()
|
||||
|
||||
# reset tag parsing
|
||||
crc = 0
|
||||
tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
|
||||
|
||||
# find active ids
|
||||
self.ids = list(it.takewhile(
|
||||
lambda id: Tag('name', id, 0) in self,
|
||||
it.count()))
|
||||
|
||||
# find most recent tags
|
||||
self.tags = []
|
||||
for tag in self.log:
|
||||
if tag.is_('crc') or tag.is_('splice'):
|
||||
continue
|
||||
elif tag.id == 0x3ff:
|
||||
if tag in self and self[tag] is tag:
|
||||
self.tags.append(tag)
|
||||
else:
|
||||
# id could have change, I know this is messy and slow
|
||||
# but it works
|
||||
for id in self.ids:
|
||||
ntag = tag.chid(id)
|
||||
if ntag in self and self[ntag] is tag:
|
||||
self.tags.append(ntag)
|
||||
|
||||
self.tags = sorted(self.tags)
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.log)
|
||||
|
||||
def __lt__(self, other):
|
||||
# corrupt blocks don't count
|
||||
if not self or not other:
|
||||
return bool(other)
|
||||
|
||||
# use sequence arithmetic to avoid overflow
|
||||
return not ((other.rev - self.rev) & 0x80000000)
|
||||
|
||||
def __contains__(self, args):
|
||||
try:
|
||||
self[args]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def __getitem__(self, args):
|
||||
if isinstance(args, tuple):
|
||||
gmask, gtag = args
|
||||
else:
|
||||
gmask, gtag = args.mkmask(), args
|
||||
|
||||
gdiff = 0
|
||||
for tag in reversed(self.log):
|
||||
if (gmask.id != 0 and tag.is_('splice') and
|
||||
tag.id <= gtag.id - gdiff):
|
||||
if tag.is_('create') and tag.id == gtag.id - gdiff:
|
||||
# creation point
|
||||
break
|
||||
|
||||
gdiff += tag.schunk
|
||||
|
||||
if ((int(gmask) & int(tag)) ==
|
||||
(int(gmask) & int(gtag.chid(gtag.id - gdiff)))):
|
||||
if tag.size == 0x3ff:
|
||||
# deleted
|
||||
break
|
||||
|
||||
return tag
|
||||
|
||||
raise KeyError(gmask, gtag)
|
||||
|
||||
def _dump_tags(self, tags, f=sys.stdout, truncate=True):
|
||||
f.write("%-8s %-8s %-13s %4s %4s" % (
|
||||
'off', 'tag', 'type', 'id', 'len'))
|
||||
if truncate:
|
||||
f.write(' data (truncated)')
|
||||
f.write('\n')
|
||||
|
||||
for tag in tags:
|
||||
f.write("%08x: %08x %-13s %4s %4s" % (
|
||||
tag.off, tag,
|
||||
tag.typerepr(), tag.idrepr(), tag.sizerepr()))
|
||||
if truncate:
|
||||
f.write(" %-23s %-8s\n" % (
|
||||
' '.join('%02x' % c for c in tag.data[:8]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[:8]))))
|
||||
else:
|
||||
f.write("\n")
|
||||
for i in range(0, len(tag.data), 16):
|
||||
f.write(" %08x: %-47s %-16s\n" % (
|
||||
tag.off+i,
|
||||
' '.join('%02x' % c for c in tag.data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[i:i+16]))))
|
||||
|
||||
def dump_tags(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.tags, f=f, truncate=truncate)
|
||||
|
||||
def dump_log(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.log, f=f, truncate=truncate)
|
||||
|
||||
def dump_all(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.all_, f=f, truncate=truncate)
|
||||
|
||||
def main(args):
|
||||
blocks = []
|
||||
with open(args.disk, 'rb') as f:
|
||||
for block in [args.block1, args.block2]:
|
||||
if block is None:
|
||||
continue
|
||||
f.seek(block * args.block_size)
|
||||
blocks.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# find most recent pair
|
||||
mdir = MetadataPair(blocks)
|
||||
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
print("mdir {%s} rev %d%s%s%s" % (
|
||||
', '.join('%#x' % b
|
||||
for b in [args.block1, args.block2]
|
||||
if b is not None),
|
||||
mdir.rev,
|
||||
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
|
||||
if len(mdir.pair) > 1 else '',
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
if args.all:
|
||||
mdir.dump_all(truncate=not args.no_truncate)
|
||||
elif args.log:
|
||||
mdir.dump_log(truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(truncate=not args.no_truncate)
|
||||
|
||||
return 0 if mdir else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump useful info about metadata pairs in littlefs.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', type=lambda x: int(x, 0),
|
||||
help="First block address for finding the metadata pair.")
|
||||
parser.add_argument('block2', nargs='?', type=lambda x: int(x, 0),
|
||||
help="Second block address for finding the metadata pair.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Don't truncate large amounts of data.")
|
||||
sys.exit(main(parser.parse_args()))
|
183
components/fs/littlefs/littlefs/scripts/readtree.py
Executable file
183
components/fs/littlefs/littlefs/scripts/readtree.py
Executable file
|
@ -0,0 +1,183 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import sys
|
||||
import json
|
||||
import io
|
||||
import itertools as it
|
||||
from readmdir import Tag, MetadataPair
|
||||
|
||||
def main(args):
|
||||
superblock = None
|
||||
gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
|
||||
dirs = []
|
||||
mdirs = []
|
||||
corrupted = []
|
||||
cycle = False
|
||||
with open(args.disk, 'rb') as f:
|
||||
tail = (args.block1, args.block2)
|
||||
hard = False
|
||||
while True:
|
||||
for m in it.chain((m for d in dirs for m in d), mdirs):
|
||||
if set(m.blocks) == set(tail):
|
||||
# cycle detected
|
||||
cycle = m.blocks
|
||||
if cycle:
|
||||
break
|
||||
|
||||
# load mdir
|
||||
data = []
|
||||
blocks = {}
|
||||
for block in tail:
|
||||
f.seek(block * args.block_size)
|
||||
data.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
blocks[id(data[-1])] = block
|
||||
|
||||
mdir = MetadataPair(data)
|
||||
mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
|
||||
|
||||
# fetch some key metadata as a we scan
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
# have superblock?
|
||||
try:
|
||||
nsuperblock = mdir[
|
||||
Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
|
||||
superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# have gstate?
|
||||
try:
|
||||
ngstate = mdir[Tag('movestate', 0, 0)]
|
||||
gstate = bytes((a or 0) ^ (b or 0)
|
||||
for a,b in it.zip_longest(gstate, ngstate.data))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# corrupted?
|
||||
if not mdir:
|
||||
corrupted.append(mdir)
|
||||
|
||||
# add to directories
|
||||
mdirs.append(mdir)
|
||||
if mdir.tail is None or not mdir.tail.is_('hardtail'):
|
||||
dirs.append(mdirs)
|
||||
mdirs = []
|
||||
|
||||
if mdir.tail is None:
|
||||
break
|
||||
|
||||
tail = struct.unpack('<II', mdir.tail.data)
|
||||
hard = mdir.tail.is_('hardtail')
|
||||
|
||||
# find paths
|
||||
dirtable = {}
|
||||
for dir in dirs:
|
||||
dirtable[frozenset(dir[0].blocks)] = dir
|
||||
|
||||
pending = [("/", dirs[0])]
|
||||
while pending:
|
||||
path, dir = pending.pop(0)
|
||||
for mdir in dir:
|
||||
for tag in mdir.tags:
|
||||
if tag.is_('dir'):
|
||||
try:
|
||||
npath = tag.data.decode('utf8')
|
||||
dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
|
||||
nblocks = struct.unpack('<II', dirstruct.data)
|
||||
nmdir = dirtable[frozenset(nblocks)]
|
||||
pending.append(((path + '/' + npath), nmdir))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
dir[0].path = path.replace('//', '/')
|
||||
|
||||
# print littlefs + version info
|
||||
version = ('?', '?')
|
||||
if superblock:
|
||||
version = tuple(reversed(
|
||||
struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
|
||||
print("%-47s%s" % ("littlefs v%s.%s" % version,
|
||||
"data (truncated, if it fits)"
|
||||
if not any([args.no_truncate, args.log, args.all]) else ""))
|
||||
|
||||
# print gstate
|
||||
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
|
||||
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
|
||||
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
|
||||
if tag.size or not tag.isvalid:
|
||||
print(" orphans >=%d" % max(tag.size, 1))
|
||||
if tag.type:
|
||||
print(" move dir {%#x, %#x} id %d" % (
|
||||
blocks[0], blocks[1], tag.id))
|
||||
|
||||
# print mdir info
|
||||
for i, dir in enumerate(dirs):
|
||||
print("dir %s" % (json.dumps(dir[0].path)
|
||||
if hasattr(dir[0], 'path') else '(orphan)'))
|
||||
|
||||
for j, mdir in enumerate(dir):
|
||||
print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
|
||||
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
|
||||
f = io.StringIO()
|
||||
if args.log:
|
||||
mdir.dump_log(f, truncate=not args.no_truncate)
|
||||
elif args.all:
|
||||
mdir.dump_all(f, truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(f, truncate=not args.no_truncate)
|
||||
|
||||
lines = list(filter(None, f.getvalue().split('\n')))
|
||||
for k, line in enumerate(lines):
|
||||
print("%s %s" % (
|
||||
' ' if j == len(dir)-1 else
|
||||
'v' if k == len(lines)-1 else
|
||||
'|',
|
||||
line))
|
||||
|
||||
errcode = 0
|
||||
for mdir in corrupted:
|
||||
errcode = errcode or 1
|
||||
print("*** corrupted mdir {%#x, %#x}! ***" % (
|
||||
mdir.blocks[0], mdir.blocks[1]))
|
||||
|
||||
if cycle:
|
||||
errcode = errcode or 2
|
||||
print("*** cycle detected {%#x, %#x}! ***" % (
|
||||
cycle[0], cycle[1]))
|
||||
|
||||
return errcode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump semantic info about the metadata tree in littlefs")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', nargs='?', default=0,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional first block address for finding the superblock.")
|
||||
parser.add_argument('block2', nargs='?', default=1,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional second block address for finding the superblock.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Show the full contents of files/attrs/tags.")
|
||||
sys.exit(main(parser.parse_args()))
|
430
components/fs/littlefs/littlefs/scripts/stack.py
Executable file
430
components/fs/littlefs/littlefs/scripts/stack.py
Executable file
|
@ -0,0 +1,430 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find stack usage at the function level. Will detect recursion and
|
||||
# report as infinite stack usage.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
import math as m
|
||||
|
||||
|
||||
CI_PATHS = ['*.ci']
|
||||
|
||||
def collect(paths, **args):
|
||||
# parse the vcg format
|
||||
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
|
||||
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
|
||||
def parse_vcg(rest):
|
||||
def parse_vcg(rest):
|
||||
node = []
|
||||
while True:
|
||||
rest = rest.lstrip()
|
||||
m = k_pattern.match(rest)
|
||||
if not m:
|
||||
return (node, rest)
|
||||
k, rest = m.group(1), rest[m.end(0):]
|
||||
|
||||
rest = rest.lstrip()
|
||||
if rest.startswith('{'):
|
||||
v, rest = parse_vcg(rest[1:])
|
||||
assert rest[0] == '}', "unexpected %r" % rest[0:1]
|
||||
rest = rest[1:]
|
||||
node.append((k, v))
|
||||
else:
|
||||
m = v_pattern.match(rest)
|
||||
assert m, "unexpected %r" % rest[0:1]
|
||||
v, rest = m.group(1) or m.group(2), rest[m.end(0):]
|
||||
node.append((k, v))
|
||||
|
||||
node, rest = parse_vcg(rest)
|
||||
assert rest == '', "unexpected %r" % rest[0:1]
|
||||
return node
|
||||
|
||||
# collect into functions
|
||||
results = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
f_pattern = re.compile(
|
||||
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
|
||||
for path in paths:
|
||||
with open(path) as f:
|
||||
vcg = parse_vcg(f.read())
|
||||
for k, graph in vcg:
|
||||
if k != 'graph':
|
||||
continue
|
||||
for k, info in graph:
|
||||
if k == 'node':
|
||||
info = dict(info)
|
||||
m = f_pattern.match(info['label'])
|
||||
if m:
|
||||
function, file, size, type = m.groups()
|
||||
if not args.get('quiet') and type != 'static':
|
||||
print('warning: found non-static stack for %s (%s)'
|
||||
% (function, type))
|
||||
_, _, _, targets = results[info['title']]
|
||||
results[info['title']] = (
|
||||
file, function, int(size), targets)
|
||||
elif k == 'edge':
|
||||
info = dict(info)
|
||||
_, _, _, targets = results[info['sourcename']]
|
||||
targets.add(info['targetname'])
|
||||
else:
|
||||
continue
|
||||
|
||||
if not args.get('everything'):
|
||||
for source, (s_file, s_function, _, _) in list(results.items()):
|
||||
# discard internal functions
|
||||
if s_file.startswith('<') or s_file.startswith('/usr/include'):
|
||||
del results[source]
|
||||
|
||||
# find maximum stack size recursively, this requires also detecting cycles
|
||||
# (in case of recursion)
|
||||
def find_limit(source, seen=None):
|
||||
seen = seen or set()
|
||||
if source not in results:
|
||||
return 0
|
||||
_, _, frame, targets = results[source]
|
||||
|
||||
limit = 0
|
||||
for target in targets:
|
||||
if target in seen:
|
||||
# found a cycle
|
||||
return float('inf')
|
||||
limit_ = find_limit(target, seen | {target})
|
||||
limit = max(limit, limit_)
|
||||
|
||||
return frame + limit
|
||||
|
||||
def find_deps(targets):
|
||||
deps = set()
|
||||
for target in targets:
|
||||
if target in results:
|
||||
t_file, t_function, _, _ = results[target]
|
||||
deps.add((t_file, t_function))
|
||||
return deps
|
||||
|
||||
# flatten into a list
|
||||
flat_results = []
|
||||
for source, (s_file, s_function, frame, targets) in results.items():
|
||||
limit = find_limit(source)
|
||||
deps = find_deps(targets)
|
||||
flat_results.append((s_file, s_function, frame, limit, deps))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .ci files
|
||||
paths = []
|
||||
for path in args['ci_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.ci'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .ci files found in %r?' % args['ci_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']), # note limit can be inf
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
|
||||
total_frame = 0
|
||||
total_limit = 0
|
||||
for _, _, frame, limit, _ in results:
|
||||
total_frame += frame
|
||||
total_limit = max(total_limit, limit)
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']),
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_frame = 0
|
||||
prev_total_limit = 0
|
||||
for _, _, frame, limit, _ in prev_results:
|
||||
prev_total_frame += frame
|
||||
prev_total_limit = max(prev_total_limit, limit)
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('stack_frame', None)
|
||||
result.pop('stack_limit', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, frame, limit, _ in results:
|
||||
merged_results[(file, func)]['stack_frame'] = frame
|
||||
merged_results[(file, func)]['stack_limit'] = limit
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0, set()))
|
||||
for file, func, frame, limit, deps in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_frame, entry_limit, entry_deps = entries[entry]
|
||||
entries[entry] = (
|
||||
entry_frame + frame,
|
||||
max(entry_limit, limit),
|
||||
entry_deps | {file if by == 'file' else func
|
||||
for file, func in deps})
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
|
||||
for name, (new_frame, new_limit, deps) in news.items():
|
||||
diff[name] = (
|
||||
None, None,
|
||||
new_frame, new_limit,
|
||||
new_frame, new_limit,
|
||||
1.0,
|
||||
deps)
|
||||
for name, (old_frame, old_limit, _) in olds.items():
|
||||
_, _, new_frame, new_limit, _, _, _, deps = diff[name]
|
||||
diff[name] = (
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
(new_frame or 0) - (old_frame or 0),
|
||||
0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else (new_limit or 0) - (old_limit or 0),
|
||||
0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else +float('inf') if m.isinf(new_limit or 0)
|
||||
else -float('inf') if m.isinf(old_limit or 0)
|
||||
else +0.0 if not old_limit and not new_limit
|
||||
else +1.0 if not old_limit
|
||||
else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
|
||||
deps)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][0], x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][0], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s %7s' % (by, 'frame', 'limit'))
|
||||
else:
|
||||
print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, frame, limit):
|
||||
print("%-36s %7d %7s" % (name,
|
||||
frame, '∞' if m.isinf(limit) else int(limit)))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio):
|
||||
print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
|
||||
old_frame if old_frame is not None else "-",
|
||||
('∞' if m.isinf(old_limit) else int(old_limit))
|
||||
if old_limit is not None else "-",
|
||||
new_frame if new_frame is not None else "-",
|
||||
('∞' if m.isinf(new_limit) else int(new_limit))
|
||||
if new_limit is not None else "-",
|
||||
diff_frame,
|
||||
('+∞' if diff_limit > 0 and m.isinf(diff_limit)
|
||||
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
|
||||
else '%+d' % diff_limit),
|
||||
'' if not ratio
|
||||
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else ' (%+.1f%%)' % (100*ratio)))
|
||||
|
||||
def print_entries(by='name'):
|
||||
# build optional tree of dependencies
|
||||
def print_deps(entries, depth, print,
|
||||
filter=lambda _: True,
|
||||
prefixes=('', '', '', '')):
|
||||
entries = entries if isinstance(entries, list) else list(entries)
|
||||
filtered_entries = [(name, entry)
|
||||
for name, entry in entries
|
||||
if filter(name)]
|
||||
for i, (name, entry) in enumerate(filtered_entries):
|
||||
last = (i == len(filtered_entries)-1)
|
||||
print(prefixes[0+last] + name, entry)
|
||||
|
||||
if depth > 0:
|
||||
deps = entry[-1]
|
||||
print_deps(entries, depth-1, print,
|
||||
lambda name: name in deps,
|
||||
( prefixes[2+last] + "|-> ",
|
||||
prefixes[2+last] + "'-> ",
|
||||
prefixes[2+last] + "| ",
|
||||
prefixes[2+last] + " "))
|
||||
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
print_deps(
|
||||
sorted_entries(entries.items()),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_entry(name, *entry[:-1]))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
|
||||
sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
|
||||
print_deps(
|
||||
filter(
|
||||
lambda x: x[1][6] or args.get('all'),
|
||||
sorted_diff_entries(diff.items())),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_diff_entry(name, *entry[:-1]))
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_frame, total_limit)
|
||||
else:
|
||||
diff_frame = total_frame - prev_total_frame
|
||||
diff_limit = (
|
||||
0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else (total_limit or 0) - (prev_total_limit or 0))
|
||||
ratio = (
|
||||
0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else +float('inf') if m.isinf(total_limit or 0)
|
||||
else -float('inf') if m.isinf(prev_total_limit or 0)
|
||||
else 0.0 if not prev_total_limit and not total_limit
|
||||
else 1.0 if not prev_total_limit
|
||||
else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_frame, prev_total_limit,
|
||||
total_frame, total_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find stack usage at the function level.")
|
||||
parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
|
||||
help="Description of where to find *.ci files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % CI_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't parse callgraph files, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--limit-sort', action='store_true',
|
||||
help="Sort by stack limit.")
|
||||
parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
|
||||
help="Sort by stack limit, but backwards.")
|
||||
parser.add_argument('--frame-sort', action='store_true',
|
||||
help="Sort by stack frame size.")
|
||||
parser.add_argument('--reverse-frame-sort', action='store_true',
|
||||
help="Sort by stack frame size, but backwards.")
|
||||
parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
|
||||
nargs='?', const=float('inf'),
|
||||
help="Depth of dependencies to show.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total stack size.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
331
components/fs/littlefs/littlefs/scripts/structs.py
Executable file
331
components/fs/littlefs/littlefs/scripts/structs.py
Executable file
|
@ -0,0 +1,331 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find struct sizes.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
decl_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'\s+(?P<dir>[0-9]+)'
|
||||
'\s+.*'
|
||||
'\s+(?P<file>[^\s]+)$')
|
||||
struct_pattern = re.compile(
|
||||
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
|
||||
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
|
||||
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||
|
||||
results = co.defaultdict(lambda: 0)
|
||||
for path in paths:
|
||||
# find decl, we want to filter by structs in .h files
|
||||
decls = {}
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# find file numbers
|
||||
m = decl_pattern.match(line)
|
||||
if m:
|
||||
decls[int(m.group('no'))] = m.group('file')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect structs as we parse dwarf info
|
||||
found = False
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# state machine here to find structs
|
||||
m = struct_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if (name is not None
|
||||
and decl is not None
|
||||
and size is not None):
|
||||
decl = decls.get(decl, '?')
|
||||
results[(decl, name)] = size
|
||||
found = (m.group('tag') == 'structure_type')
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
elif found and m.group('name'):
|
||||
name = m.group('name')
|
||||
elif found and name and m.group('decl'):
|
||||
decl = int(m.group('decl'))
|
||||
elif found and name and m.group('size'):
|
||||
size = int(m.group('size'))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, struct), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# only include structs declared in header files in the current
|
||||
# directory, ignore internal-only # structs (these are represented
|
||||
# in other measurements)
|
||||
if not args.get('everything'):
|
||||
if not file.endswith('.h'):
|
||||
continue
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
|
||||
flat_results.append((file, struct, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
struct = result.pop('name', '')
|
||||
result.pop('struct_size', None)
|
||||
merged_results[(file, struct)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, struct, size in results:
|
||||
merged_results[(file, struct)]['struct_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
|
||||
w.writeheader()
|
||||
for (file, struct), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': struct, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, struct, size in results:
|
||||
entry = (file if by == 'file' else struct)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find struct sizes.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find struct sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff struct size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level struct sizes.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total struct size.")
|
||||
parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
|
||||
help="Path to the objdump tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
279
components/fs/littlefs/littlefs/scripts/summary.py
Executable file
279
components/fs/littlefs/littlefs/scripts/summary.py
Executable file
|
@ -0,0 +1,279 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to summarize the outputs of other scripts. Operates on CSV files.
|
||||
#
|
||||
|
||||
import functools as ft
|
||||
import collections as co
|
||||
import os
|
||||
import csv
|
||||
import re
|
||||
import math as m
|
||||
|
||||
# displayable fields
|
||||
Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
|
||||
FIELDS = [
|
||||
# name, parse, accumulate, fmt, print, null
|
||||
Field('code',
|
||||
lambda r: int(r['code_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('data',
|
||||
lambda r: int(r['data_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('stack',
|
||||
lambda r: float(r['stack_limit']),
|
||||
max,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: '∞' if m.isinf(r) else int(r),
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('structs',
|
||||
lambda r: int(r['struct_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%8s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('coverage',
|
||||
lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
|
||||
lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
|
||||
lambda r: r[0]/r[1],
|
||||
'%19s',
|
||||
lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
|
||||
'%11s %7s' % ('-', '-'),
|
||||
lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
|
||||
]
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find results
|
||||
results = co.defaultdict(lambda: {})
|
||||
for path in args.get('csv_paths', '-'):
|
||||
try:
|
||||
with openio(path) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# find fields
|
||||
if args.get('all_fields'):
|
||||
fields = FIELDS
|
||||
elif args.get('fields') is not None:
|
||||
fields_dict = {field.name: field for field in FIELDS}
|
||||
fields = [fields_dict[f] for f in args['fields']]
|
||||
else:
|
||||
fields = []
|
||||
for field in FIELDS:
|
||||
if any(field.name in result for result in results.values()):
|
||||
fields.append(field)
|
||||
|
||||
# find total for every field
|
||||
total = {}
|
||||
for result in results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in total:
|
||||
total[field.name] = field.acc(
|
||||
[total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
total[field.name] = result[field.name]
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
prev_results = co.defaultdict(lambda: {})
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = prev_results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
prev_results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
prev_results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
prev_total = {}
|
||||
for result in prev_results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev_total:
|
||||
prev_total[field.name] = field.acc(
|
||||
[prev_total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
prev_total[field.name] = result[field.name]
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: {})
|
||||
for (file, func), result in results.items():
|
||||
entry = (file if by == 'file' else func)
|
||||
prev = entries[entry]
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev:
|
||||
entries[entry][field.name] = field.acc(
|
||||
[prev[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
entries[entry][field.name] = result[field.name]
|
||||
return entries
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
-(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
elif args.get('reverse_sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['reverse_sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
+(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print()
|
||||
else:
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print(' %-9s' % '', end='')
|
||||
print()
|
||||
|
||||
def print_entry(name, result):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
r = result.get(field.name)
|
||||
if r is not None:
|
||||
print((' '+field.fmt) % field.repr(r), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
print()
|
||||
|
||||
def print_diff_entry(name, old, new):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
n = new.get(field.name)
|
||||
if n is not None:
|
||||
print((' '+field.fmt) % field.repr(n), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
o = old.get(field.name)
|
||||
ratio = (
|
||||
0.0 if m.isinf(o or 0) and m.isinf(n or 0)
|
||||
else +float('inf') if m.isinf(n or 0)
|
||||
else -float('inf') if m.isinf(o or 0)
|
||||
else 0.0 if not o and not n
|
||||
else +1.0 if not o
|
||||
else -1.0 if not n
|
||||
else field.ratio(o, n))
|
||||
print(' %-9s' % (
|
||||
'' if not ratio
|
||||
else '(+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else '(-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else '(%+.1f%%)' % (100*ratio)), end='')
|
||||
print()
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
print_entry(name, result)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for name in entries if name not in prev_entries),
|
||||
sum(1 for name in prev_entries if name not in entries)))
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
if args.get('all') or result != prev_entries.get(name, {}):
|
||||
print_diff_entry(name, prev_entries.get(name, {}), result)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
print_diff_entry('TOTAL', prev_total, total)
|
||||
|
||||
if args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Summarize measurements")
|
||||
parser.add_argument('csv_paths', nargs='*', default='-',
|
||||
help="Description of where to find *.csv files. May be a directory \
|
||||
or list of paths. *.csv files will be merged to show the total \
|
||||
coverage.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all objects, not just the ones that changed.")
|
||||
parser.add_argument('-e', '--all-fields', action='store_true',
|
||||
help="Show all fields, even those with no results.")
|
||||
parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
|
||||
help="Comma separated list of fields to print, by default all fields \
|
||||
that are found in the CSV files are printed.")
|
||||
parser.add_argument('-s', '--sort',
|
||||
help="Sort by this field.")
|
||||
parser.add_argument('-S', '--reverse-sort',
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the totals.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
860
components/fs/littlefs/littlefs/scripts/test.py
Executable file
860
components/fs/littlefs/littlefs/scripts/test.py
Executable file
|
@ -0,0 +1,860 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# This script manages littlefs tests, which are configured with
|
||||
# .toml files stored in the tests directory.
|
||||
#
|
||||
|
||||
import toml
|
||||
import glob
|
||||
import re
|
||||
import os
|
||||
import io
|
||||
import itertools as it
|
||||
import collections.abc as abc
|
||||
import subprocess as sp
|
||||
import base64
|
||||
import sys
|
||||
import copy
|
||||
import shlex
|
||||
import pty
|
||||
import errno
|
||||
import signal
|
||||
|
||||
TEST_PATHS = 'tests'
|
||||
RULES = """
|
||||
# add block devices to sources
|
||||
TESTSRC ?= $(SRC) $(wildcard bd/*.c)
|
||||
|
||||
define FLATTEN
|
||||
%(path)s%%$(subst /,.,$(target)): $(target)
|
||||
./scripts/explode_asserts.py $$< -o $$@
|
||||
endef
|
||||
$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
|
||||
|
||||
-include %(path)s*.d
|
||||
.SECONDARY:
|
||||
|
||||
%(path)s.test: %(path)s.test.o \\
|
||||
$(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
# needed in case builddir is different
|
||||
%(path)s%%.o: %(path)s%%.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
||||
"""
|
||||
COVERAGE_RULES = """
|
||||
%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
|
||||
|
||||
# delete lingering coverage
|
||||
%(path)s.test: | %(path)s.info.clean
|
||||
.PHONY: %(path)s.info.clean
|
||||
%(path)s.info.clean:
|
||||
rm -f %(path)s*.gcda
|
||||
|
||||
# accumulate coverage info
|
||||
.PHONY: %(path)s.info
|
||||
%(path)s.info:
|
||||
$(strip $(LCOV) -c \\
|
||||
$(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
|
||||
--rc 'geninfo_adjust_src_path=$(shell pwd)' \\
|
||||
-o $@)
|
||||
$(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
|
||||
ifdef COVERAGETARGET
|
||||
$(strip $(LCOV) -a $@ \\
|
||||
$(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
|
||||
-o $(COVERAGETARGET))
|
||||
endif
|
||||
"""
|
||||
GLOBALS = """
|
||||
//////////////// AUTOGENERATED TEST ////////////////
|
||||
#include "lfs.h"
|
||||
#include "bd/lfs_testbd.h"
|
||||
#include <stdio.h>
|
||||
extern const char *lfs_testbd_path;
|
||||
extern uint32_t lfs_testbd_cycles;
|
||||
"""
|
||||
DEFINES = {
|
||||
'LFS_READ_SIZE': 16,
|
||||
'LFS_PROG_SIZE': 'LFS_READ_SIZE',
|
||||
'LFS_BLOCK_SIZE': 512,
|
||||
'LFS_BLOCK_COUNT': 1024,
|
||||
'LFS_BLOCK_CYCLES': -1,
|
||||
'LFS_CACHE_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)',
|
||||
'LFS_LOOKAHEAD_SIZE': 16,
|
||||
'LFS_ERASE_VALUE': 0xff,
|
||||
'LFS_ERASE_CYCLES': 0,
|
||||
'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
}
|
||||
PROLOGUE = """
|
||||
// prologue
|
||||
__attribute__((unused)) lfs_t lfs;
|
||||
__attribute__((unused)) lfs_testbd_t bd;
|
||||
__attribute__((unused)) lfs_file_t file;
|
||||
__attribute__((unused)) lfs_dir_t dir;
|
||||
__attribute__((unused)) struct lfs_info info;
|
||||
__attribute__((unused)) char path[1024];
|
||||
__attribute__((unused)) uint8_t buffer[(1024 > LFS_BLOCK_SIZE * 4) ? (1024) : (LFS_BLOCK_SIZE * 4)];
|
||||
__attribute__((unused)) lfs_size_t size;
|
||||
__attribute__((unused)) int err;
|
||||
|
||||
__attribute__((unused)) const struct lfs_config cfg = {
|
||||
.context = &bd,
|
||||
.read = lfs_testbd_read,
|
||||
.prog = lfs_testbd_prog,
|
||||
.erase = lfs_testbd_erase,
|
||||
.sync = lfs_testbd_sync,
|
||||
.read_size = LFS_READ_SIZE,
|
||||
.prog_size = LFS_PROG_SIZE,
|
||||
.block_size = LFS_BLOCK_SIZE,
|
||||
.block_count = LFS_BLOCK_COUNT,
|
||||
.block_cycles = LFS_BLOCK_CYCLES,
|
||||
.cache_size = LFS_CACHE_SIZE,
|
||||
.lookahead_size = LFS_LOOKAHEAD_SIZE,
|
||||
};
|
||||
|
||||
__attribute__((unused)) const struct lfs_testbd_config bdcfg = {
|
||||
.erase_value = LFS_ERASE_VALUE,
|
||||
.erase_cycles = LFS_ERASE_CYCLES,
|
||||
.badblock_behavior = LFS_BADBLOCK_BEHAVIOR,
|
||||
.power_cycles = lfs_testbd_cycles,
|
||||
};
|
||||
|
||||
lfs_testbd_createcfg(&cfg, lfs_testbd_path, &bdcfg) => 0;
|
||||
"""
|
||||
EPILOGUE = """
|
||||
// epilogue
|
||||
lfs_testbd_destroy(&cfg) => 0;
|
||||
"""
|
||||
PASS = '\033[32m✓\033[0m'
|
||||
FAIL = '\033[31m✗\033[0m'
|
||||
|
||||
class TestFailure(Exception):
|
||||
def __init__(self, case, returncode=None, stdout=None, assert_=None):
|
||||
self.case = case
|
||||
self.returncode = returncode
|
||||
self.stdout = stdout
|
||||
self.assert_ = assert_
|
||||
|
||||
class TestCase:
|
||||
def __init__(self, config, filter=filter,
|
||||
suite=None, caseno=None, lineno=None, **_):
|
||||
self.config = config
|
||||
self.filter = filter
|
||||
self.suite = suite
|
||||
self.caseno = caseno
|
||||
self.lineno = lineno
|
||||
|
||||
self.code = config['code']
|
||||
self.code_lineno = config['code_lineno']
|
||||
self.defines = config.get('define', {})
|
||||
self.if_ = config.get('if', None)
|
||||
self.in_ = config.get('in', None)
|
||||
|
||||
self.result = None
|
||||
|
||||
def __str__(self):
|
||||
if hasattr(self, 'permno'):
|
||||
if any(k not in self.case.defines for k in self.defines):
|
||||
return '%s#%d#%d (%s)' % (
|
||||
self.suite.name, self.caseno, self.permno, ', '.join(
|
||||
'%s=%s' % (k, v) for k, v in self.defines.items()
|
||||
if k not in self.case.defines))
|
||||
else:
|
||||
return '%s#%d#%d' % (
|
||||
self.suite.name, self.caseno, self.permno)
|
||||
else:
|
||||
return '%s#%d' % (
|
||||
self.suite.name, self.caseno)
|
||||
|
||||
def permute(self, class_=None, defines={}, permno=None, **_):
|
||||
ncase = (class_ or type(self))(self.config)
|
||||
for k, v in self.__dict__.items():
|
||||
setattr(ncase, k, v)
|
||||
ncase.case = self
|
||||
ncase.perms = [ncase]
|
||||
ncase.permno = permno
|
||||
ncase.defines = defines
|
||||
return ncase
|
||||
|
||||
def build(self, f, **_):
|
||||
# prologue
|
||||
for k, v in sorted(self.defines.items()):
|
||||
if k not in self.suite.defines:
|
||||
f.write('#define %s %s\n' % (k, v))
|
||||
|
||||
f.write('void test_case%d(%s) {' % (self.caseno, ','.join(
|
||||
'\n'+8*' '+'__attribute__((unused)) intmax_t %s' % k
|
||||
for k in sorted(self.perms[0].defines)
|
||||
if k not in self.defines)))
|
||||
|
||||
f.write(PROLOGUE)
|
||||
f.write('\n')
|
||||
f.write(4*' '+'// test case %d\n' % self.caseno)
|
||||
f.write(4*' '+'#line %d "%s"\n' % (self.code_lineno, self.suite.path))
|
||||
|
||||
# test case goes here
|
||||
f.write(self.code)
|
||||
|
||||
# epilogue
|
||||
f.write(EPILOGUE)
|
||||
f.write('}\n')
|
||||
|
||||
for k, v in sorted(self.defines.items()):
|
||||
if k not in self.suite.defines:
|
||||
f.write('#undef %s\n' % k)
|
||||
|
||||
def shouldtest(self, **args):
|
||||
if (self.filter is not None and
|
||||
len(self.filter) >= 1 and
|
||||
self.filter[0] != self.caseno):
|
||||
return False
|
||||
elif (self.filter is not None and
|
||||
len(self.filter) >= 2 and
|
||||
self.filter[1] != self.permno):
|
||||
return False
|
||||
elif args.get('no_internal') and self.in_ is not None:
|
||||
return False
|
||||
elif self.if_ is not None:
|
||||
if_ = self.if_
|
||||
while True:
|
||||
for k, v in sorted(self.defines.items(),
|
||||
key=lambda x: len(x[0]), reverse=True):
|
||||
if k in if_:
|
||||
if_ = if_.replace(k, '(%s)' % v)
|
||||
break
|
||||
else:
|
||||
break
|
||||
if_ = (
|
||||
re.sub('(\&\&|\?)', ' and ',
|
||||
re.sub('(\|\||:)', ' or ',
|
||||
re.sub('!(?!=)', ' not ', if_))))
|
||||
return eval(if_)
|
||||
else:
|
||||
return True
|
||||
|
||||
def test(self, exec=[], persist=False, cycles=None,
|
||||
gdb=False, failure=None, disk=None, **args):
|
||||
# build command
|
||||
cmd = exec + ['./%s.test' % self.suite.path,
|
||||
repr(self.caseno), repr(self.permno)]
|
||||
|
||||
# persist disk or keep in RAM for speed?
|
||||
if persist:
|
||||
if not disk:
|
||||
disk = self.suite.path + '.disk'
|
||||
if persist != 'noerase':
|
||||
try:
|
||||
with open(disk, 'w') as f:
|
||||
f.truncate(0)
|
||||
if args.get('verbose'):
|
||||
print('truncate --size=0', disk)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
cmd.append(disk)
|
||||
|
||||
# simulate power-loss after n cycles?
|
||||
if cycles:
|
||||
cmd.append(str(cycles))
|
||||
|
||||
# failed? drop into debugger?
|
||||
if gdb and failure:
|
||||
ncmd = ['gdb']
|
||||
if gdb == 'assert':
|
||||
ncmd.extend(['-ex', 'r'])
|
||||
if failure.assert_:
|
||||
ncmd.extend(['-ex', 'up 2'])
|
||||
elif gdb == 'main':
|
||||
ncmd.extend([
|
||||
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
|
||||
'-ex', 'r'])
|
||||
ncmd.extend(['--args'] + cmd)
|
||||
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in ncmd))
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
sys.exit(sp.call(ncmd))
|
||||
|
||||
# run test case!
|
||||
mpty, spty = pty.openpty()
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||
os.close(spty)
|
||||
mpty = os.fdopen(mpty, 'r', 1)
|
||||
stdout = []
|
||||
assert_ = None
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
line = mpty.readline()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
sys.stdout.write(line)
|
||||
# intercept asserts
|
||||
m = re.match(
|
||||
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
|
||||
.format('(?:\033\[[\d;]*.| )*', 'assert'),
|
||||
line)
|
||||
if m and assert_ is None:
|
||||
try:
|
||||
with open(m.group(1)) as f:
|
||||
lineno = int(m.group(2))
|
||||
line = (next(it.islice(f, lineno-1, None))
|
||||
.strip('\n'))
|
||||
assert_ = {
|
||||
'path': m.group(1),
|
||||
'line': line,
|
||||
'lineno': lineno,
|
||||
'message': m.group(3)}
|
||||
except:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
raise TestFailure(self, 1, stdout, None)
|
||||
proc.wait()
|
||||
|
||||
# did we pass?
|
||||
if proc.returncode != 0:
|
||||
raise TestFailure(self, proc.returncode, stdout, assert_)
|
||||
else:
|
||||
return PASS
|
||||
|
||||
class ValgrindTestCase(TestCase):
|
||||
def __init__(self, config, **args):
|
||||
self.leaky = config.get('leaky', False)
|
||||
super().__init__(config, **args)
|
||||
|
||||
def shouldtest(self, **args):
|
||||
return not self.leaky and super().shouldtest(**args)
|
||||
|
||||
def test(self, exec=[], **args):
|
||||
verbose = args.get('verbose')
|
||||
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
|
||||
exec = [
|
||||
'valgrind',
|
||||
'--leak-check=full',
|
||||
] + (['--undef-value-errors=no'] if uninit else []) + [
|
||||
] + (['--track-origins=yes'] if not uninit else []) + [
|
||||
'--error-exitcode=4',
|
||||
'--error-limit=no',
|
||||
] + (['--num-callers=1'] if not verbose else []) + [
|
||||
'-q'] + exec
|
||||
return super().test(exec=exec, **args)
|
||||
|
||||
class ReentrantTestCase(TestCase):
|
||||
def __init__(self, config, **args):
|
||||
self.reentrant = config.get('reentrant', False)
|
||||
super().__init__(config, **args)
|
||||
|
||||
def shouldtest(self, **args):
|
||||
return self.reentrant and super().shouldtest(**args)
|
||||
|
||||
def test(self, persist=False, gdb=False, failure=None, **args):
|
||||
for cycles in it.count(1):
|
||||
# clear disk first?
|
||||
if cycles == 1 and persist != 'noerase':
|
||||
persist = 'erase'
|
||||
else:
|
||||
persist = 'noerase'
|
||||
|
||||
# exact cycle we should drop into debugger?
|
||||
if gdb and failure and failure.cycleno == cycles:
|
||||
return super().test(gdb=gdb, persist=persist, cycles=cycles,
|
||||
failure=failure, **args)
|
||||
|
||||
# run tests, but kill the program after prog/erase has
|
||||
# been hit n cycles. We exit with a special return code if the
|
||||
# program has not finished, since this isn't a test failure.
|
||||
try:
|
||||
return super().test(persist=persist, cycles=cycles, **args)
|
||||
except TestFailure as nfailure:
|
||||
if nfailure.returncode == 33:
|
||||
continue
|
||||
else:
|
||||
nfailure.cycleno = cycles
|
||||
raise
|
||||
|
||||
class TestSuite:
|
||||
def __init__(self, path, classes=[TestCase], defines={},
|
||||
filter=None, **args):
|
||||
self.name = os.path.basename(path)
|
||||
if self.name.endswith('.toml'):
|
||||
self.name = self.name[:-len('.toml')]
|
||||
if args.get('build_dir'):
|
||||
self.toml = path
|
||||
self.path = args['build_dir'] + '/' + path
|
||||
else:
|
||||
self.toml = path
|
||||
self.path = path
|
||||
self.classes = classes
|
||||
self.defines = defines.copy()
|
||||
self.filter = filter
|
||||
|
||||
with open(self.toml) as f:
|
||||
# load tests
|
||||
config = toml.load(f)
|
||||
|
||||
# find line numbers
|
||||
f.seek(0)
|
||||
linenos = []
|
||||
code_linenos = []
|
||||
for i, line in enumerate(f):
|
||||
if re.match(r'\[\[\s*case\s*\]\]', line):
|
||||
linenos.append(i+1)
|
||||
if re.match(r'code\s*=\s*(\'\'\'|""")', line):
|
||||
code_linenos.append(i+2)
|
||||
|
||||
code_linenos.reverse()
|
||||
|
||||
# grab global config
|
||||
for k, v in config.get('define', {}).items():
|
||||
if k not in self.defines:
|
||||
self.defines[k] = v
|
||||
self.code = config.get('code', None)
|
||||
if self.code is not None:
|
||||
self.code_lineno = code_linenos.pop()
|
||||
|
||||
# create initial test cases
|
||||
self.cases = []
|
||||
for i, (case, lineno) in enumerate(zip(config['case'], linenos)):
|
||||
# code lineno?
|
||||
if 'code' in case:
|
||||
case['code_lineno'] = code_linenos.pop()
|
||||
# merge conditions if necessary
|
||||
if 'if' in config and 'if' in case:
|
||||
case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
|
||||
elif 'if' in config:
|
||||
case['if'] = config['if']
|
||||
# initialize test case
|
||||
self.cases.append(TestCase(case, filter=filter,
|
||||
suite=self, caseno=i+1, lineno=lineno, **args))
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.name < other.name
|
||||
|
||||
def permute(self, **args):
|
||||
for case in self.cases:
|
||||
# lets find all parameterized definitions, in one of [args.D,
|
||||
# suite.defines, case.defines, DEFINES]. Note that each of these
|
||||
# can be either a dict of defines, or a list of dicts, expressing
|
||||
# an initial set of permutations.
|
||||
pending = [{}]
|
||||
for inits in [self.defines, case.defines, DEFINES]:
|
||||
if not isinstance(inits, list):
|
||||
inits = [inits]
|
||||
|
||||
npending = []
|
||||
for init, pinit in it.product(inits, pending):
|
||||
ninit = pinit.copy()
|
||||
for k, v in init.items():
|
||||
if k not in ninit:
|
||||
try:
|
||||
ninit[k] = eval(v)
|
||||
except:
|
||||
ninit[k] = v
|
||||
npending.append(ninit)
|
||||
|
||||
pending = npending
|
||||
|
||||
# expand permutations
|
||||
pending = list(reversed(pending))
|
||||
expanded = []
|
||||
while pending:
|
||||
perm = pending.pop()
|
||||
for k, v in sorted(perm.items()):
|
||||
if not isinstance(v, str) and isinstance(v, abc.Iterable):
|
||||
for nv in reversed(v):
|
||||
nperm = perm.copy()
|
||||
nperm[k] = nv
|
||||
pending.append(nperm)
|
||||
break
|
||||
else:
|
||||
expanded.append(perm)
|
||||
|
||||
# generate permutations
|
||||
case.perms = []
|
||||
for i, (class_, defines) in enumerate(
|
||||
it.product(self.classes, expanded)):
|
||||
case.perms.append(case.permute(
|
||||
class_, defines, permno=i+1, **args))
|
||||
|
||||
# also track non-unique defines
|
||||
case.defines = {}
|
||||
for k, v in case.perms[0].defines.items():
|
||||
if all(perm.defines[k] == v for perm in case.perms):
|
||||
case.defines[k] = v
|
||||
|
||||
# track all perms and non-unique defines
|
||||
self.perms = []
|
||||
for case in self.cases:
|
||||
self.perms.extend(case.perms)
|
||||
|
||||
self.defines = {}
|
||||
for k, v in self.perms[0].defines.items():
|
||||
if all(perm.defines.get(k, None) == v for perm in self.perms):
|
||||
self.defines[k] = v
|
||||
|
||||
return self.perms
|
||||
|
||||
def build(self, **args):
|
||||
# build test files
|
||||
tf = open(self.path + '.test.tc', 'w')
|
||||
tf.write(GLOBALS)
|
||||
if self.code is not None:
|
||||
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
|
||||
tf.write(self.code)
|
||||
|
||||
tfs = {None: tf}
|
||||
for case in self.cases:
|
||||
if case.in_ not in tfs:
|
||||
tfs[case.in_] = open(self.path+'.'+
|
||||
re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
|
||||
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
|
||||
with open(case.in_) as f:
|
||||
for line in f:
|
||||
tfs[case.in_].write(line)
|
||||
tfs[case.in_].write('\n')
|
||||
tfs[case.in_].write(GLOBALS)
|
||||
|
||||
tfs[case.in_].write('\n')
|
||||
case.build(tfs[case.in_], **args)
|
||||
|
||||
tf.write('\n')
|
||||
tf.write('const char *lfs_testbd_path;\n')
|
||||
tf.write('uint32_t lfs_testbd_cycles;\n')
|
||||
tf.write('int main(int argc, char **argv) {\n')
|
||||
tf.write(4*' '+'int case_ = (argc > 1) ? atoi(argv[1]) : 0;\n')
|
||||
tf.write(4*' '+'int perm = (argc > 2) ? atoi(argv[2]) : 0;\n')
|
||||
tf.write(4*' '+'lfs_testbd_path = (argc > 3) ? argv[3] : NULL;\n')
|
||||
tf.write(4*' '+'lfs_testbd_cycles = (argc > 4) ? atoi(argv[4]) : 0;\n')
|
||||
for perm in self.perms:
|
||||
# test declaration
|
||||
tf.write(4*' '+'extern void test_case%d(%s);\n' % (
|
||||
perm.caseno, ', '.join(
|
||||
'intmax_t %s' % k for k in sorted(perm.defines)
|
||||
if k not in perm.case.defines)))
|
||||
# test call
|
||||
tf.write(4*' '+
|
||||
'if (argc < 3 || (case_ == %d && perm == %d)) {'
|
||||
' test_case%d(%s); '
|
||||
'}\n' % (perm.caseno, perm.permno, perm.caseno, ', '.join(
|
||||
str(v) for k, v in sorted(perm.defines.items())
|
||||
if k not in perm.case.defines)))
|
||||
tf.write('}\n')
|
||||
|
||||
for tf in tfs.values():
|
||||
tf.close()
|
||||
|
||||
# write makefiles
|
||||
with open(self.path + '.mk', 'w') as mk:
|
||||
mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
|
||||
mk.write('\n')
|
||||
|
||||
# add coverage hooks?
|
||||
if args.get('coverage'):
|
||||
mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
|
||||
path=self.path))
|
||||
mk.write('\n')
|
||||
|
||||
# add truly global defines globally
|
||||
for k, v in sorted(self.defines.items()):
|
||||
mk.write('%s.test: override CFLAGS += -D%s=%r\n'
|
||||
% (self.path, k, v))
|
||||
|
||||
for path in tfs:
|
||||
if path is None:
|
||||
mk.write('%s: %s | %s\n' % (
|
||||
self.path+'.test.c',
|
||||
self.toml,
|
||||
self.path+'.test.tc'))
|
||||
else:
|
||||
mk.write('%s: %s %s | %s\n' % (
|
||||
self.path+'.'+path.replace('/', '.'),
|
||||
self.toml,
|
||||
path,
|
||||
self.path+'.'+re.sub('(\.c)?$', '.tc',
|
||||
path.replace('/', '.'))))
|
||||
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
|
||||
|
||||
self.makefile = self.path + '.mk'
|
||||
self.target = self.path + '.test'
|
||||
return self.makefile, self.target
|
||||
|
||||
def test(self, **args):
|
||||
# run test suite!
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write(self.name + ' ')
|
||||
sys.stdout.flush()
|
||||
for perm in self.perms:
|
||||
if not perm.shouldtest(**args):
|
||||
continue
|
||||
|
||||
try:
|
||||
result = perm.test(**args)
|
||||
except TestFailure as failure:
|
||||
perm.result = failure
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write(FAIL)
|
||||
sys.stdout.flush()
|
||||
if not args.get('keep_going'):
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write('\n')
|
||||
raise
|
||||
else:
|
||||
perm.result = PASS
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write(PASS)
|
||||
sys.stdout.flush()
|
||||
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write('\n')
|
||||
|
||||
def main(**args):
|
||||
# figure out explicit defines
|
||||
defines = {}
|
||||
for define in args['D']:
|
||||
k, v, *_ = define.split('=', 2) + ['']
|
||||
defines[k] = v
|
||||
|
||||
# and what class of TestCase to run
|
||||
classes = []
|
||||
if args.get('normal'):
|
||||
classes.append(TestCase)
|
||||
if args.get('reentrant'):
|
||||
classes.append(ReentrantTestCase)
|
||||
if args.get('valgrind'):
|
||||
classes.append(ValgrindTestCase)
|
||||
if not classes:
|
||||
classes = [TestCase]
|
||||
|
||||
suites = []
|
||||
for testpath in args['test_paths']:
|
||||
# optionally specified test case/perm
|
||||
testpath, *filter = testpath.split('#')
|
||||
filter = [int(f) for f in filter]
|
||||
|
||||
# figure out the suite's toml file
|
||||
if os.path.isdir(testpath):
|
||||
testpath = testpath + '/*.toml'
|
||||
elif os.path.isfile(testpath):
|
||||
testpath = testpath
|
||||
elif testpath.endswith('.toml'):
|
||||
testpath = TEST_PATHS + '/' + testpath
|
||||
else:
|
||||
testpath = TEST_PATHS + '/' + testpath + '.toml'
|
||||
|
||||
# find tests
|
||||
for path in glob.glob(testpath):
|
||||
suites.append(TestSuite(path, classes, defines, filter, **args))
|
||||
|
||||
# sort for reproducibility
|
||||
suites = sorted(suites)
|
||||
|
||||
# generate permutations
|
||||
for suite in suites:
|
||||
suite.permute(**args)
|
||||
|
||||
# build tests in parallel
|
||||
print('====== building ======')
|
||||
makefiles = []
|
||||
targets = []
|
||||
for suite in suites:
|
||||
makefile, target = suite.build(**args)
|
||||
makefiles.append(makefile)
|
||||
targets.append(target)
|
||||
|
||||
cmd = (['make', '-f', 'Makefile'] +
|
||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
||||
[target for target in targets])
|
||||
mpty, spty = pty.openpty()
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||
os.close(spty)
|
||||
mpty = os.fdopen(mpty, 'r', 1)
|
||||
stdout = []
|
||||
while True:
|
||||
try:
|
||||
line = mpty.readline()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
sys.stdout.write(line)
|
||||
# intercept warnings
|
||||
m = re.match(
|
||||
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
|
||||
.format('(?:\033\[[\d;]*.| )*', 'warning'),
|
||||
line)
|
||||
if m and not args.get('verbose'):
|
||||
try:
|
||||
with open(m.group(1)) as f:
|
||||
lineno = int(m.group(2))
|
||||
line = next(it.islice(f, lineno-1, None)).strip('\n')
|
||||
sys.stdout.write(
|
||||
"\033[01m{path}:{lineno}:\033[01;35mwarning:\033[m "
|
||||
"{message}\n{line}\n\n".format(
|
||||
path=m.group(1), line=line, lineno=lineno,
|
||||
message=m.group(3)))
|
||||
except:
|
||||
pass
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
print('built %d test suites, %d test cases, %d permutations' % (
|
||||
len(suites),
|
||||
sum(len(suite.cases) for suite in suites),
|
||||
sum(len(suite.perms) for suite in suites)))
|
||||
|
||||
total = 0
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
total += perm.shouldtest(**args)
|
||||
if total != sum(len(suite.perms) for suite in suites):
|
||||
print('filtered down to %d permutations' % total)
|
||||
|
||||
# only requested to build?
|
||||
if args.get('build'):
|
||||
return 0
|
||||
|
||||
print('====== testing ======')
|
||||
try:
|
||||
for suite in suites:
|
||||
suite.test(**args)
|
||||
except TestFailure:
|
||||
pass
|
||||
|
||||
print('====== results ======')
|
||||
passed = 0
|
||||
failed = 0
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
if perm.result == PASS:
|
||||
passed += 1
|
||||
elif isinstance(perm.result, TestFailure):
|
||||
sys.stdout.write(
|
||||
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
|
||||
"{perm} failed\n".format(
|
||||
perm=perm, path=perm.suite.path, lineno=perm.lineno,
|
||||
returncode=perm.result.returncode or 0))
|
||||
if perm.result.stdout:
|
||||
if perm.result.assert_:
|
||||
stdout = perm.result.stdout[:-1]
|
||||
else:
|
||||
stdout = perm.result.stdout
|
||||
for line in stdout[-5:]:
|
||||
sys.stdout.write(line)
|
||||
if perm.result.assert_:
|
||||
sys.stdout.write(
|
||||
"\033[01m{path}:{lineno}:\033[01;31massert:\033[m "
|
||||
"{message}\n{line}\n".format(
|
||||
**perm.result.assert_))
|
||||
sys.stdout.write('\n')
|
||||
failed += 1
|
||||
|
||||
if args.get('coverage'):
|
||||
# collect coverage info
|
||||
# why -j1? lcov doesn't work in parallel because of gcov limitations
|
||||
cmd = (['make', '-j1', '-f', 'Makefile'] +
|
||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
||||
(['COVERAGETARGET=%s' % args['coverage']]
|
||||
if isinstance(args['coverage'], str) else []) +
|
||||
[suite.path + '.info' for suite in suites
|
||||
if any(perm.result == PASS for perm in suite.perms)])
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE if not args.get('verbose') else None,
|
||||
stderr=sp.STDOUT if not args.get('verbose') else None,
|
||||
universal_newlines=True)
|
||||
stdout = []
|
||||
for line in proc.stdout:
|
||||
stdout.append(line)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
if args.get('gdb'):
|
||||
failure = None
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
if isinstance(perm.result, TestFailure):
|
||||
failure = perm.result
|
||||
if failure is not None:
|
||||
print('======= gdb ======')
|
||||
# drop into gdb
|
||||
failure.case.test(failure=failure, **args)
|
||||
sys.exit(0)
|
||||
|
||||
print('tests passed %d/%d (%.1f%%)' % (passed, total,
|
||||
100*(passed/total if total else 1.0)))
|
||||
print('tests failed %d/%d (%.1f%%)' % (failed, total,
|
||||
100*(failed/total if total else 1.0)))
|
||||
return 1 if failed > 0 else 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run parameterized tests in various configurations.")
|
||||
parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
|
||||
help="Description of test(s) to run. By default, this is all tests \
|
||||
found in the \"{0}\" directory. Here, you can specify a different \
|
||||
directory of tests, a specific file, a suite by name, and even \
|
||||
specific test cases and permutations. For example \
|
||||
\"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
|
||||
parser.add_argument('-D', action='append', default=[],
|
||||
help="Overriding parameter definitions.")
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output everything that is happening.")
|
||||
parser.add_argument('-k', '--keep-going', action='store_true',
|
||||
help="Run all tests instead of stopping on first error. Useful for CI.")
|
||||
parser.add_argument('-p', '--persist', choices=['erase', 'noerase'],
|
||||
nargs='?', const='erase',
|
||||
help="Store disk image in a file.")
|
||||
parser.add_argument('-b', '--build', action='store_true',
|
||||
help="Only build the tests, do not execute.")
|
||||
parser.add_argument('-g', '--gdb', choices=['init', 'main', 'assert'],
|
||||
nargs='?', const='assert',
|
||||
help="Drop into gdb on test failure.")
|
||||
parser.add_argument('--no-internal', action='store_true',
|
||||
help="Don't run tests that require internal knowledge.")
|
||||
parser.add_argument('-n', '--normal', action='store_true',
|
||||
help="Run tests normally.")
|
||||
parser.add_argument('-r', '--reentrant', action='store_true',
|
||||
help="Run reentrant tests with simulated power-loss.")
|
||||
parser.add_argument('--valgrind', action='store_true',
|
||||
help="Run non-leaky tests under valgrind to check for memory leaks.")
|
||||
parser.add_argument('--exec', default=[], type=lambda e: e.split(),
|
||||
help="Run tests with another executable prefixed on the command line.")
|
||||
parser.add_argument('--disk',
|
||||
help="Specify a file to use for persistent/reentrant tests.")
|
||||
parser.add_argument('--coverage', type=lambda x: x if x else True,
|
||||
nargs='?', const='',
|
||||
help="Collect coverage information during testing. This uses lcov/gcov \
|
||||
to accumulate coverage information into *.info files. May also \
|
||||
a path to a *.info file to accumulate coverage info into.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Build relative to the specified directory instead of the \
|
||||
current directory.")
|
||||
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
653
components/fs/littlefs/littlefs/tests/test_alloc.toml
Normal file
653
components/fs/littlefs/littlefs/tests/test_alloc.toml
Normal file
|
@ -0,0 +1,653 @@
|
|||
# allocator tests
|
||||
# note for these to work there are a number constraints on the device geometry
|
||||
if = 'LFS_BLOCK_CYCLES == -1'
|
||||
|
||||
[[case]] # parallel allocation test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &files[n], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
size = strlen(names[n]);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &files[n], names[n], size) => size;
|
||||
}
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_file_close(&lfs, &files[n]) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size = strlen(names[n]);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # serial allocation test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen(names[n]);
|
||||
memcpy(buffer, names[n], size);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # parallel allocation reuse test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.CYCLES = [1, 10]
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
for (int c = 0; c < CYCLES; c++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &files[n], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &files[n], names[n], size) => size;
|
||||
}
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_file_close(&lfs, &files[n]) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_remove(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
||||
[[case]] # serial allocation reuse test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.CYCLES = [1, 10]
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
for (int c = 0; c < CYCLES; c++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen(names[n]);
|
||||
memcpy(buffer, names[n], size);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_remove(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
||||
[[case]] # exhaustion test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("exhaustion");
|
||||
memcpy(buffer, "exhaustion", size);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_ssize_t res;
|
||||
while (true) {
|
||||
res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (res < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
res => size;
|
||||
}
|
||||
res => LFS_ERR_NOSPC;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
||||
size = strlen("exhaustion");
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "exhaustion", size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # exhaustion wraparound test
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("buffering");
|
||||
memcpy(buffer, "buffering", size);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_remove(&lfs, "padding") => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("exhaustion");
|
||||
memcpy(buffer, "exhaustion", size);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_ssize_t res;
|
||||
while (true) {
|
||||
res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (res < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
res => size;
|
||||
}
|
||||
res => LFS_ERR_NOSPC;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
||||
size = strlen("exhaustion");
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "exhaustion", size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # dir exhaustion test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// find out max file size
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
int count = 0;
|
||||
while (true) {
|
||||
err = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (err < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
count += 1;
|
||||
}
|
||||
err => LFS_ERR_NOSPC;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_remove(&lfs, "exhaustiondir") => 0;
|
||||
|
||||
// see if dir fits with max file size
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
for (int i = 0; i < count; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
lfs_remove(&lfs, "exhaustiondir") => 0;
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
|
||||
// see if dir fits with > max file size
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
for (int i = 0; i < count+1; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
|
||||
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # what if we have a bad block during an allocation scan?
|
||||
in = "lfs.c"
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
// first fill to exhaustion to find available space
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "waka");
|
||||
size = strlen("waka");
|
||||
lfs_size_t filesize = 0;
|
||||
while (true) {
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
break;
|
||||
}
|
||||
filesize += size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// now fill all but a couple of blocks of the filesystem with data
|
||||
filesize -= 3*LFS_BLOCK_SIZE;
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "waka");
|
||||
size = strlen("waka");
|
||||
for (lfs_size_t i = 0; i < filesize/size; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// also save head of file so we can error during lookahead scan
|
||||
lfs_block_t fileblock = file.ctz.head;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// remount to force an alloc scan
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// but mark the head of our file as a "bad block", this is force our
|
||||
// scan to bail early
|
||||
lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0;
|
||||
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "chomp");
|
||||
size = strlen("chomp");
|
||||
while (true) {
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
assert(res == (lfs_ssize_t)size || res == LFS_ERR_CORRUPT);
|
||||
if (res == LFS_ERR_CORRUPT) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// now reverse the "bad block" and try to write the file again until we
|
||||
// run out of space
|
||||
lfs_testbd_setwear(&cfg, fileblock, 0) => 0;
|
||||
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "chomp");
|
||||
size = strlen("chomp");
|
||||
while (true) {
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// check that the disk isn't hurt
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
|
||||
strcpy((char*)buffer, "waka");
|
||||
size = strlen("waka");
|
||||
for (lfs_size_t i = 0; i < filesize/size; i++) {
|
||||
uint8_t rbuffer[4];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
# Below, I don't like these tests. They're fragile and depend _heavily_
|
||||
# on the geometry of the block device. But they are valuable. Eventually they
|
||||
# should be removed and replaced with generalized tests.
|
||||
|
||||
[[case]] # chained dir exhaustion test
|
||||
define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// find out max file size
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
int count = 0;
|
||||
while (true) {
|
||||
err = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (err < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
count += 1;
|
||||
}
|
||||
err => LFS_ERR_NOSPC;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_remove(&lfs, "exhaustiondir") => 0;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
|
||||
// see that chained dir fails
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
for (int i = 0; i < count+1; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
|
||||
|
||||
// shorten file to try a second chained dir
|
||||
while (true) {
|
||||
err = lfs_mkdir(&lfs, "exhaustiondir");
|
||||
if (err != LFS_ERR_NOSPC) {
|
||||
break;
|
||||
}
|
||||
|
||||
lfs_ssize_t filesize = lfs_file_size(&lfs, &file);
|
||||
filesize > 0 => true;
|
||||
|
||||
lfs_file_truncate(&lfs, &file, filesize - size) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
err => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir2") => LFS_ERR_NOSPC;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # split dir test
|
||||
define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// create one block hole for half a directory
|
||||
lfs_file_open(&lfs, &file, "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
|
||||
memcpy(&buffer[i], "hi", 2);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, cfg.block_size) => cfg.block_size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < (cfg.block_count-4)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// open hole
|
||||
lfs_remove(&lfs, "bump") => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "splitdir") => 0;
|
||||
lfs_file_open(&lfs, &file, "splitdir/bump",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
|
||||
memcpy(&buffer[i], "hi", 2);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, 2*cfg.block_size) => LFS_ERR_NOSPC;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # outdated lookahead test
|
||||
define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// fill completely with two files
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion2",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// rewrite one file
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// rewrite second file, this requires lookahead does not
|
||||
// use old population
|
||||
lfs_file_open(&lfs, &file, "exhaustion2",
|
||||
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # outdated lookahead and split dir test
|
||||
define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// fill completely with two files
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion2",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// rewrite one file with a hole of one block
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2 - 1)*(cfg.block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// try to allocate a directory, should fail!
|
||||
lfs_mkdir(&lfs, "split") => LFS_ERR_NOSPC;
|
||||
|
||||
// file should not fail
|
||||
lfs_file_open(&lfs, &file, "notasplit",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hi", 2) => 2;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
304
components/fs/littlefs/littlefs/tests/test_attrs.toml
Normal file
304
components/fs/littlefs/littlefs/tests/test_attrs.toml
Normal file
|
@ -0,0 +1,304 @@
|
|||
[[case]] # set/get attribute
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_setattr(&lfs, "hello", 'A', "aaaa", 4) => 0;
|
||||
lfs_setattr(&lfs, "hello", 'B', "bbbbbb", 6) => 0;
|
||||
lfs_setattr(&lfs, "hello", 'C', "ccccc", 5) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "bbbbbb", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'B', "", 0) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_removeattr(&lfs, "hello", 'B') => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => LFS_ERR_NOATTR;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'B', "dddddd", 6) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "dddddd", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'B', "eee", 3) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 3;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "eee\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'A', buffer, LFS_ATTR_MAX+1) => LFS_ERR_NOSPC;
|
||||
lfs_setattr(&lfs, "hello", 'B', "fffffffff", 9) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 9;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 9) => 9;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+13, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+13, "ccccc", 5) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
|
||||
memcmp(buffer, "hello", strlen("hello")) => 0;
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # set/get root attribute
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_setattr(&lfs, "/", 'A', "aaaa", 4) => 0;
|
||||
lfs_setattr(&lfs, "/", 'B', "bbbbbb", 6) => 0;
|
||||
lfs_setattr(&lfs, "/", 'C', "ccccc", 5) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "bbbbbb", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'B', "", 0) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 0;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_removeattr(&lfs, "/", 'B') => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => LFS_ERR_NOATTR;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'B', "dddddd", 6) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "dddddd", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'B', "eee", 3) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 3;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "eee\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'A', buffer, LFS_ATTR_MAX+1) => LFS_ERR_NOSPC;
|
||||
lfs_setattr(&lfs, "/", 'B', "fffffffff", 9) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 9;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 9) => 9;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+13, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+13, "ccccc", 5) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
|
||||
memcmp(buffer, "hello", strlen("hello")) => 0;
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # set/get file attribute
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs1[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 6},
|
||||
{'C', buffer+10, 5},
|
||||
};
|
||||
struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
memcpy(buffer, "aaaa", 4);
|
||||
memcpy(buffer+4, "bbbbbb", 6);
|
||||
memcpy(buffer+10, "ccccc", 5);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "bbbbbb", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[1].size = 0;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
memcpy(buffer+4, "dddddd", 6);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "dddddd", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[1].size = 3;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
memcpy(buffer+4, "eee", 3);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "eee\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[0].size = LFS_ATTR_MAX+1;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1)
|
||||
=> LFS_ERR_NOSPC;
|
||||
|
||||
struct lfs_attr attrs2[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 9},
|
||||
{'C', buffer+13, 5},
|
||||
};
|
||||
struct lfs_file_config cfg2 = {.attrs=attrs2, .attr_count=3};
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDWR, &cfg2) => 0;
|
||||
memcpy(buffer+4, "fffffffff", 9);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
attrs1[0].size = 4;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs3[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 9},
|
||||
{'C', buffer+13, 5},
|
||||
};
|
||||
struct lfs_file_config cfg3 = {.attrs=attrs3, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg3) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+13, "ccccc", 5) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
|
||||
memcmp(buffer, "hello", strlen("hello")) => 0;
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # deferred file attributes
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_setattr(&lfs, "hello/hello", 'B', "fffffffff", 9) => 0;
|
||||
lfs_setattr(&lfs, "hello/hello", 'C', "ccccc", 5) => 0;
|
||||
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs1[] = {
|
||||
{'B', "gggg", 4},
|
||||
{'C', "", 0},
|
||||
{'D', "hhhh", 4},
|
||||
};
|
||||
struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
|
||||
lfs_getattr(&lfs, "hello/hello", 'B', buffer, 9) => 9;
|
||||
lfs_getattr(&lfs, "hello/hello", 'C', buffer+9, 9) => 5;
|
||||
lfs_getattr(&lfs, "hello/hello", 'D', buffer+18, 9) => LFS_ERR_NOATTR;
|
||||
memcmp(buffer, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+9, "ccccc\0\0\0\0", 9) => 0;
|
||||
memcmp(buffer+18, "\0\0\0\0\0\0\0\0\0", 9) => 0;
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_getattr(&lfs, "hello/hello", 'B', buffer, 9) => 4;
|
||||
lfs_getattr(&lfs, "hello/hello", 'C', buffer+9, 9) => 0;
|
||||
lfs_getattr(&lfs, "hello/hello", 'D', buffer+18, 9) => 4;
|
||||
memcmp(buffer, "gggg\0\0\0\0\0", 9) => 0;
|
||||
memcmp(buffer+9, "\0\0\0\0\0\0\0\0\0", 9) => 0;
|
||||
memcmp(buffer+18, "hhhh\0\0\0\0\0", 9) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
241
components/fs/littlefs/littlefs/tests/test_badblocks.toml
Normal file
241
components/fs/littlefs/littlefs/tests/test_badblocks.toml
Normal file
|
@ -0,0 +1,241 @@
|
|||
# bad blocks with block cycles should be tested in test_relocations
|
||||
if = 'LFS_BLOCK_CYCLES == -1'
|
||||
|
||||
[[case]] # single bad blocks
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
|
||||
define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
'LFS_TESTBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_TESTBD_BADBLOCK_READERROR',
|
||||
'LFS_TESTBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_TESTBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
define.NAMEMULT = 64
|
||||
define.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT; badblock++) {
|
||||
lfs_testbd_setwear(&cfg, badblock-1, 0) => 0;
|
||||
lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0;
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_mkdir(&lfs, (char*)buffer) => 0;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_open(&lfs, &file, (char*)buffer,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_stat(&lfs, (char*)buffer, &info) => 0;
|
||||
info.type => LFS_TYPE_DIR;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
|
||||
|
||||
size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(buffer, rbuffer, size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
||||
[[case]] # region corruption (causes cascading failures)
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
|
||||
define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
'LFS_TESTBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_TESTBD_BADBLOCK_READERROR',
|
||||
'LFS_TESTBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_TESTBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
define.NAMEMULT = 64
|
||||
define.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
|
||||
lfs_testbd_setwear(&cfg, i+2, 0xffffffff) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_mkdir(&lfs, (char*)buffer) => 0;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_open(&lfs, &file, (char*)buffer,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_stat(&lfs, (char*)buffer, &info) => 0;
|
||||
info.type => LFS_TYPE_DIR;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
|
||||
|
||||
size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(buffer, rbuffer, size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # alternating corruption (causes cascading failures)
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
|
||||
define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
'LFS_TESTBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_TESTBD_BADBLOCK_READERROR',
|
||||
'LFS_TESTBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_TESTBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
define.NAMEMULT = 64
|
||||
define.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
|
||||
lfs_testbd_setwear(&cfg, (2*i) + 2, 0xffffffff) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_mkdir(&lfs, (char*)buffer) => 0;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_open(&lfs, &file, (char*)buffer,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_stat(&lfs, (char*)buffer, &info) => 0;
|
||||
info.type => LFS_TYPE_DIR;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
|
||||
|
||||
size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(buffer, rbuffer, size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# other corner cases
|
||||
[[case]] # bad superblocks (corrupt 1 or 0)
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
|
||||
define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
'LFS_TESTBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_TESTBD_BADBLOCK_READERROR',
|
||||
'LFS_TESTBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_TESTBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
code = '''
|
||||
lfs_testbd_setwear(&cfg, 0, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&cfg, 1, 0xffffffff) => 0;
|
||||
|
||||
lfs_format(&lfs, &cfg) => LFS_ERR_NOSPC;
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
838
components/fs/littlefs/littlefs/tests/test_dirs.toml
Normal file
838
components/fs/littlefs/littlefs/tests/test_dirs.toml
Normal file
|
@ -0,0 +1,838 @@
|
|||
[[case]] # root
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # many directory creation
|
||||
define.N = 'range(0, 100, 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "dir%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "dir%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # many directory removal
|
||||
define.N = 'range(3, 100, 11)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # many directory rename
|
||||
define.N = 'range(3, 100, 11)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "test%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "test%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
char oldpath[128];
|
||||
char newpath[128];
|
||||
sprintf(oldpath, "test%03d", i);
|
||||
sprintf(newpath, "tedd%03d", i);
|
||||
lfs_rename(&lfs, oldpath, newpath) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "tedd%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
'''
|
||||
|
||||
[[case]] # reentrant many directory creation/rename/removal
|
||||
define.N = [5, 11]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hi%03d", i);
|
||||
err = lfs_mkdir(&lfs, path);
|
||||
assert(err == 0 || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hello%03d", i);
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(err == 0 || err == LFS_ERR_NOENT);
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hi%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
char oldpath[128];
|
||||
char newpath[128];
|
||||
sprintf(oldpath, "hi%03d", i);
|
||||
sprintf(newpath, "hello%03d", i);
|
||||
// YES this can overwrite an existing newpath
|
||||
lfs_rename(&lfs, oldpath, newpath) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hello%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hello%03d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # file creation
|
||||
define.N = 'range(3, 100, 11)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
'''
|
||||
|
||||
[[case]] # file removal
|
||||
define.N = 'range(0, 100, 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # file rename
|
||||
define.N = 'range(0, 100, 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "test%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "test%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
char oldpath[128];
|
||||
char newpath[128];
|
||||
sprintf(oldpath, "test%03d", i);
|
||||
sprintf(newpath, "tedd%03d", i);
|
||||
lfs_rename(&lfs, oldpath, newpath) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "tedd%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
'''
|
||||
|
||||
[[case]] # reentrant file creation/rename/removal
|
||||
define.N = [5, 25]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hi%03d", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hello%03d", i);
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(err == 0 || err == LFS_ERR_NOENT);
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hi%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
char oldpath[128];
|
||||
char newpath[128];
|
||||
sprintf(oldpath, "hi%03d", i);
|
||||
sprintf(newpath, "hello%03d", i);
|
||||
// YES this can overwrite an existing newpath
|
||||
lfs_rename(&lfs, oldpath, newpath) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hello%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "hello%03d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # nested directories
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "potato") => 0;
|
||||
lfs_file_open(&lfs, &file, "burito",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "potato/baked") => 0;
|
||||
lfs_mkdir(&lfs, "potato/sweet") => 0;
|
||||
lfs_mkdir(&lfs, "potato/fried") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "potato") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "baked") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "fried") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "sweet") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// try removing?
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// try renaming?
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "potato", "coldpotato") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "coldpotato", "warmpotato") => 0;
|
||||
lfs_rename(&lfs, "warmpotato", "hotpotato") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "potato") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "warmpotato") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// try cross-directory renaming
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "coldpotato") => 0;
|
||||
lfs_rename(&lfs, "hotpotato/baked", "coldpotato/baked") => 0;
|
||||
lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_rename(&lfs, "hotpotato/fried", "coldpotato/fried") => 0;
|
||||
lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_rename(&lfs, "hotpotato/sweet", "coldpotato/sweet") => 0;
|
||||
lfs_rename(&lfs, "coldpotato", "hotpotato") => 0;
|
||||
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "hotpotato") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "baked") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "fried") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "sweet") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// final remove
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "hotpotato/baked") => 0;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "hotpotato/fried") => 0;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "hotpotato/sweet") => 0;
|
||||
lfs_remove(&lfs, "hotpotato") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
info.type => LFS_TYPE_DIR;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "burito") == 0);
|
||||
info.type => LFS_TYPE_REG;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # recursive remove
|
||||
define.N = [10, 100]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "prickly-pear") => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "prickly-pear/cactus%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "cactus%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "cactus%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
sprintf(path, "prickly-pear/%s", info.name);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_remove(&lfs, "prickly-pear") => 0;
|
||||
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # other error cases
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "potato") => 0;
|
||||
lfs_file_open(&lfs, &file, "burito",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
|
||||
lfs_mkdir(&lfs, "burito") => LFS_ERR_EXIST;
|
||||
lfs_file_open(&lfs, &file, "burito",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
|
||||
lfs_file_open(&lfs, &file, "potato",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
|
||||
lfs_dir_open(&lfs, &dir, "tomato") => LFS_ERR_NOENT;
|
||||
lfs_dir_open(&lfs, &dir, "burito") => LFS_ERR_NOTDIR;
|
||||
lfs_file_open(&lfs, &file, "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
|
||||
lfs_file_open(&lfs, &file, "potato", LFS_O_RDONLY) => LFS_ERR_ISDIR;
|
||||
lfs_file_open(&lfs, &file, "tomato", LFS_O_WRONLY) => LFS_ERR_NOENT;
|
||||
lfs_file_open(&lfs, &file, "potato", LFS_O_WRONLY) => LFS_ERR_ISDIR;
|
||||
lfs_file_open(&lfs, &file, "potato",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_ISDIR;
|
||||
|
||||
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
|
||||
lfs_file_open(&lfs, &file, "/",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
|
||||
lfs_file_open(&lfs, &file, "/", LFS_O_RDONLY) => LFS_ERR_ISDIR;
|
||||
lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY) => LFS_ERR_ISDIR;
|
||||
lfs_file_open(&lfs, &file, "/",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_ISDIR;
|
||||
|
||||
// check that errors did not corrupt directory
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, "burito") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "potato") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// or on disk
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, "burito") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "potato") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # directory seek
|
||||
define.COUNT = [4, 128, 132]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hello/kitty%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int j = 2; j < COUNT; j++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "hello") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_soff_t pos;
|
||||
for (int i = 0; i < j; i++) {
|
||||
sprintf(path, "kitty%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
pos = lfs_dir_tell(&lfs, &dir);
|
||||
assert(pos >= 0);
|
||||
}
|
||||
|
||||
lfs_dir_seek(&lfs, &dir, pos) => 0;
|
||||
sprintf(path, "kitty%03d", j);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_rewind(&lfs, &dir) => 0;
|
||||
sprintf(path, "kitty%03d", 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_seek(&lfs, &dir, pos) => 0;
|
||||
sprintf(path, "kitty%03d", j);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
||||
[[case]] # root seek
|
||||
define.COUNT = [4, 128, 132]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hi%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int j = 2; j < COUNT; j++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_soff_t pos;
|
||||
for (int i = 0; i < j; i++) {
|
||||
sprintf(path, "hi%03d", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
pos = lfs_dir_tell(&lfs, &dir);
|
||||
assert(pos >= 0);
|
||||
}
|
||||
|
||||
lfs_dir_seek(&lfs, &dir, pos) => 0;
|
||||
sprintf(path, "hi%03d", j);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_rewind(&lfs, &dir) => 0;
|
||||
sprintf(path, "hi%03d", 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_seek(&lfs, &dir, pos) => 0;
|
||||
sprintf(path, "hi%03d", j);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
611
components/fs/littlefs/littlefs/tests/test_entries.toml
Normal file
611
components/fs/littlefs/littlefs/tests/test_entries.toml
Normal file
|
@ -0,0 +1,611 @@
|
|||
# These tests are for some specific corner cases with neighboring inline files.
|
||||
# Note that these tests are intended for 512 byte inline sizes. They should
|
||||
# still pass with other inline sizes but wouldn't be testing anything.
|
||||
|
||||
define.LFS_CACHE_SIZE = 512
|
||||
if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512'
|
||||
|
||||
[[case]] # entry grow test
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # entry shrink test
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # entry spill test
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # entry push spill test
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # entry push spill two test
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi4 200
|
||||
sprintf(path, "hi4"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi4 200
|
||||
sprintf(path, "hi4"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # entry drop test
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi1") => 0;
|
||||
lfs_stat(&lfs, "hi1", &info) => LFS_ERR_NOENT;
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi2") => 0;
|
||||
lfs_stat(&lfs, "hi2", &info) => LFS_ERR_NOENT;
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi3") => 0;
|
||||
lfs_stat(&lfs, "hi3", &info) => LFS_ERR_NOENT;
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi0") => 0;
|
||||
lfs_stat(&lfs, "hi0", &info) => LFS_ERR_NOENT;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # create too big
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(path, 'm', 200);
|
||||
path[200] = '\0';
|
||||
|
||||
size = 400;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
uint8_t wbuffer[1024];
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 400;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # resize too big
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(path, 'm', 200);
|
||||
path[200] = '\0';
|
||||
|
||||
size = 40;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
uint8_t wbuffer[1024];
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 40;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 400;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 400;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
288
components/fs/littlefs/littlefs/tests/test_evil.toml
Normal file
288
components/fs/littlefs/littlefs/tests/test_evil.toml
Normal file
|
@ -0,0 +1,288 @@
|
|||
# Tests for recovering from conditions which shouldn't normally
|
||||
# happen during normal operation of littlefs
|
||||
|
||||
# invalid pointer tests (outside of block_count)
|
||||
|
||||
[[case]] # invalid tail-pointer test
|
||||
define.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
|
||||
define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// change tail-pointer to invalid pointers
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # invalid dir pointer test
|
||||
define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// make a dir
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "dir_here") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the dir pointer to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our directory
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIR, 1, strlen("dir_here"));
|
||||
assert(memcmp((char*)buffer, "dir_here", strlen("dir_here")) == 0);
|
||||
// change dir pointer
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, 8),
|
||||
(lfs_block_t[2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad dir fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dir_here", &info) => 0;
|
||||
assert(strcmp(info.name, "dir_here") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT;
|
||||
lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT;
|
||||
lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT;
|
||||
lfs_file_open(&lfs, &file, "dir_here/file_here",
|
||||
LFS_O_RDONLY) => LFS_ERR_CORRUPT;
|
||||
lfs_file_open(&lfs, &file, "dir_here/file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_CORRUPT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # invalid file pointer test
|
||||
in = "lfs.c"
|
||||
define.SIZE = [10, 1000, 100000] # faked file size
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the file pointer to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
|
||||
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
|
||||
// change file pointer
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)),
|
||||
&(struct lfs_ctz){0xcccccccc, lfs_tole32(SIZE)}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
|
||||
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// any allocs that traverse CTZ must unfortunately must fail
|
||||
if (SIZE > 2*LFS_BLOCK_SIZE) {
|
||||
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # invalid pointer in CTZ skip-list test
|
||||
define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE']
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
char c = 'c';
|
||||
lfs_file_write(&lfs, &file, &c, 1) => 1;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
// change pointer in CTZ skip-list to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file and get our CTZ structure
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
|
||||
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
|
||||
struct lfs_ctz ctz;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz)
|
||||
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
|
||||
lfs_ctz_fromle32(&ctz);
|
||||
// rewrite block to contain bad pointer
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
uint32_t bad = lfs_tole32(0xcccccccc);
|
||||
memcpy(&bbuffer[0], &bad, sizeof(bad));
|
||||
memcpy(&bbuffer[4], &bad, sizeof(bad));
|
||||
cfg.erase(&cfg, ctz.head) => 0;
|
||||
cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
|
||||
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// any allocs that traverse CTZ must unfortunately must fail
|
||||
if (SIZE > 2*LFS_BLOCK_SIZE) {
|
||||
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
[[case]] # invalid gstate pointer
|
||||
define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// create an invalid gstate
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0});
|
||||
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
// mount may not fail, but our first alloc should fail when
|
||||
// we try to fix the gstate
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# cycle detection/recovery tests
|
||||
|
||||
[[case]] # metadata-pair threaded-list loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// change tail-pointer to point to ourself
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){0, 1}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # metadata-pair threaded-list 2-length loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
|
||||
lfs_pair_fromle32(pair);
|
||||
// change tail-pointer to point to root
|
||||
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){0, 1}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # metadata-pair threaded-list 1-length child loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
|
||||
lfs_pair_fromle32(pair);
|
||||
// change tail-pointer to point to ourself
|
||||
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), pair})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
465
components/fs/littlefs/littlefs/tests/test_exhaustion.toml
Normal file
465
components/fs/littlefs/littlefs/tests/test_exhaustion.toml
Normal file
|
@ -0,0 +1,465 @@
|
|||
[[case]] # test running a filesystem to exhaustion
|
||||
define.LFS_ERASE_CYCLES = 10
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
||||
define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
'LFS_TESTBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_TESTBD_BADBLOCK_READERROR',
|
||||
'LFS_TESTBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_TESTBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
define.FILES = 10
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
LFS_WARN("completed %d cycles", cycle);
|
||||
'''
|
||||
|
||||
[[case]] # test running a filesystem to exhaustion
|
||||
# which also requires expanding superblocks
|
||||
define.LFS_ERASE_CYCLES = 10
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
||||
define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
'LFS_TESTBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_TESTBD_BADBLOCK_READERROR',
|
||||
'LFS_TESTBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_TESTBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
define.FILES = 10
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
LFS_WARN("completed %d cycles", cycle);
|
||||
'''
|
||||
|
||||
# These are a sort of high-level litmus test for wear-leveling. One definition
|
||||
# of wear-leveling is that increasing a block device's space translates directly
|
||||
# into increasing the block devices lifetime. This is something we can actually
|
||||
# check for.
|
||||
|
||||
[[case]] # wear-level test running a filesystem to exhaustion
|
||||
define.LFS_ERASE_CYCLES = 20
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
||||
define.FILES = 10
|
||||
code = '''
|
||||
uint32_t run_cycles[2];
|
||||
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
|
||||
|
||||
for (int run = 0; run < 2; run++) {
|
||||
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_setwear(&cfg, b,
|
||||
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
run_cycles[run] = cycle;
|
||||
LFS_WARN("completed %d blocks %d cycles",
|
||||
run_block_count[run], run_cycles[run]);
|
||||
}
|
||||
|
||||
// check we increased the lifetime by 2x with ~10% error
|
||||
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
|
||||
'''
|
||||
|
||||
[[case]] # wear-level test + expanding superblock
|
||||
define.LFS_ERASE_CYCLES = 20
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
||||
define.FILES = 10
|
||||
code = '''
|
||||
uint32_t run_cycles[2];
|
||||
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
|
||||
|
||||
for (int run = 0; run < 2; run++) {
|
||||
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_setwear(&cfg, b,
|
||||
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << ((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
run_cycles[run] = cycle;
|
||||
LFS_WARN("completed %d blocks %d cycles",
|
||||
run_block_count[run], run_cycles[run]);
|
||||
}
|
||||
|
||||
// check we increased the lifetime by 2x with ~10% error
|
||||
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
|
||||
'''
|
||||
|
||||
[[case]] # test that we wear blocks roughly evenly
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
|
||||
define.CYCLES = 100
|
||||
define.FILES = 10
|
||||
if = 'LFS_BLOCK_CYCLES < CYCLES/10'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (cycle < CYCLES) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << 4; //((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << 4; //((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
LFS_WARN("completed %d cycles", cycle);
|
||||
|
||||
// check the wear on our block device
|
||||
lfs_testbd_wear_t minwear = -1;
|
||||
lfs_testbd_wear_t totalwear = 0;
|
||||
lfs_testbd_wear_t maxwear = 0;
|
||||
// skip 0 and 1 as superblock movement is intentionally avoided
|
||||
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
printf("%08x: wear %d\n", b, wear);
|
||||
assert(wear >= 0);
|
||||
if (wear < minwear) {
|
||||
minwear = wear;
|
||||
}
|
||||
if (wear > maxwear) {
|
||||
maxwear = wear;
|
||||
}
|
||||
totalwear += wear;
|
||||
}
|
||||
lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT;
|
||||
LFS_WARN("max wear: %d cycles", maxwear);
|
||||
LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT);
|
||||
LFS_WARN("min wear: %d cycles", minwear);
|
||||
|
||||
// find standard deviation^2
|
||||
lfs_testbd_wear_t dev2 = 0;
|
||||
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
assert(wear >= 0);
|
||||
lfs_testbd_swear_t diff = wear - avgwear;
|
||||
dev2 += diff*diff;
|
||||
}
|
||||
dev2 /= totalwear;
|
||||
LFS_WARN("std dev^2: %d", dev2);
|
||||
assert(dev2 < 8);
|
||||
'''
|
||||
|
486
components/fs/littlefs/littlefs/tests/test_files.toml
Normal file
486
components/fs/littlefs/littlefs/tests/test_files.toml
Normal file
|
@ -0,0 +1,486 @@
|
|||
|
||||
[[case]] # simple file test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "hello",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
size = strlen("Hello World!")+1;
|
||||
strcpy((char*)buffer, "Hello World!");
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(strcmp((char*)buffer, "Hello World!") == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # larger files
|
||||
define.SIZE = [32, 8192, 262144, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 33, 1, 1023]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # rewriting files
|
||||
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// rewrite
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY) => 0;
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => lfs_max(SIZE1, SIZE2);
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
if (SIZE1 > SIZE2) {
|
||||
srand(1);
|
||||
for (lfs_size_t b = 0; b < SIZE2; b++) {
|
||||
rand();
|
||||
}
|
||||
for (lfs_size_t i = SIZE2; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # appending files
|
||||
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// append
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_APPEND) => 0;
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1 + SIZE2;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # truncating files
|
||||
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// truncate
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE2;
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant file writing
|
||||
define.SIZE = [32, 0, 7, 2049]
|
||||
define.CHUNKSIZE = [31, 16, 65]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
|
||||
assert(err == LFS_ERR_NOENT || err == 0);
|
||||
if (err == 0) {
|
||||
// can only be 0 (new file) or full size
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
assert(size == 0 || size == SIZE);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
// write
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant file writing with syncs
|
||||
define = [
|
||||
# append (O(n))
|
||||
{MODE='LFS_O_APPEND', SIZE=[32, 0, 7, 2049], CHUNKSIZE=[31, 16, 65]},
|
||||
# truncate (O(n^2))
|
||||
{MODE='LFS_O_TRUNC', SIZE=[32, 0, 7, 200], CHUNKSIZE=[31, 16, 65]},
|
||||
# rewrite (O(n^2))
|
||||
{MODE=0, SIZE=[32, 0, 7, 200], CHUNKSIZE=[31, 16, 65]},
|
||||
]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
|
||||
assert(err == LFS_ERR_NOENT || err == 0);
|
||||
if (err == 0) {
|
||||
// with syncs we could be any size, but it at least must be valid data
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
assert(size <= SIZE);
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < size; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, size-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
// write
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | MODE) => 0;
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
assert(size <= SIZE);
|
||||
srand(1);
|
||||
lfs_size_t skip = (MODE == LFS_O_APPEND) ? size : 0;
|
||||
for (lfs_size_t b = 0; b < skip; b++) {
|
||||
rand();
|
||||
}
|
||||
for (lfs_size_t i = skip; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = rand() & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
srand(1);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (rand() & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # many files
|
||||
define.N = 300
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// create N files of 7 bytes
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
char wbuffer[1024];
|
||||
size = 7;
|
||||
snprintf(wbuffer, size, "Hi %03d", i);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
char rbuffer[1024];
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(strcmp(rbuffer, wbuffer) == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # many files with power cycle
|
||||
define.N = 300
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// create N files of 7 bytes
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
char wbuffer[1024];
|
||||
size = 7;
|
||||
snprintf(wbuffer, size, "Hi %03d", i);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
char rbuffer[1024];
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(strcmp(rbuffer, wbuffer) == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # many files with power loss
|
||||
define.N = 300
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
// create N files of 7 bytes
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file_%03d", i);
|
||||
err = lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT);
|
||||
char wbuffer[1024];
|
||||
size = 7;
|
||||
snprintf(wbuffer, size, "Hi %03d", i);
|
||||
if ((lfs_size_t)lfs_file_size(&lfs, &file) != size) {
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
char rbuffer[1024];
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(strcmp(rbuffer, wbuffer) == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
244
components/fs/littlefs/littlefs/tests/test_interspersed.toml
Normal file
244
components/fs/littlefs/littlefs/tests/test_interspersed.toml
Normal file
|
@ -0,0 +1,244 @@
|
|||
|
||||
[[case]] # interspersed file test
|
||||
define.SIZE = [10, 100]
|
||||
define.FILES = [4, 10, 26]
|
||||
code = '''
|
||||
lfs_file_t files[FILES];
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
|
||||
assert(buffer[0] == alphas[j]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # interspersed remove file test
|
||||
define.SIZE = [10, 100]
|
||||
define.FILES = [4, 10, 26]
|
||||
code = '''
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
lfs_file_write(&lfs, &file, &alphas[j], 1) => 1;
|
||||
}
|
||||
lfs_file_close(&lfs, &file);
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "zzz", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_write(&lfs, &file, (const void*)"~", 1) => 1;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file);
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "zzz") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == FILES);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "zzz", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < FILES; i++) {
|
||||
lfs_file_read(&lfs, &file, buffer, 1) => 1;
|
||||
assert(buffer[0] == '~');
|
||||
}
|
||||
lfs_file_close(&lfs, &file);
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # remove inconveniently test
|
||||
define.SIZE = [10, 100]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_t files[3];
|
||||
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_open(&lfs, &files[2], "g", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
for (int i = 0; i < SIZE/2; i++) {
|
||||
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
|
||||
}
|
||||
|
||||
lfs_remove(&lfs, "f") => 0;
|
||||
|
||||
for (int i = 0; i < SIZE/2; i++) {
|
||||
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &files[0]);
|
||||
lfs_file_close(&lfs, &files[1]);
|
||||
lfs_file_close(&lfs, &files[2]);
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "e") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "g") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
|
||||
lfs_file_open(&lfs, &files[1], "g", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
|
||||
assert(buffer[0] == 'e');
|
||||
lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
|
||||
assert(buffer[0] == 'g');
|
||||
}
|
||||
lfs_file_close(&lfs, &files[0]);
|
||||
lfs_file_close(&lfs, &files[1]);
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant interspersed file test
|
||||
define.SIZE = [10, 100]
|
||||
define.FILES = [4, 10, 26]
|
||||
reentrant = true
|
||||
code = '''
|
||||
lfs_file_t files[FILES];
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
size = lfs_file_size(&lfs, &files[j]);
|
||||
assert((int)size >= 0);
|
||||
if ((int)size <= i) {
|
||||
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
|
||||
lfs_file_sync(&lfs, &files[j]) => 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
|
||||
assert(buffer[0] == alphas[j]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
1815
components/fs/littlefs/littlefs/tests/test_move.toml
Normal file
1815
components/fs/littlefs/littlefs/tests/test_move.toml
Normal file
File diff suppressed because it is too large
Load diff
120
components/fs/littlefs/littlefs/tests/test_orphans.toml
Normal file
120
components/fs/littlefs/littlefs/tests/test_orphans.toml
Normal file
|
@ -0,0 +1,120 @@
|
|||
[[case]] # orphan test
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "parent") => 0;
|
||||
lfs_mkdir(&lfs, "parent/orphan") => 0;
|
||||
lfs_mkdir(&lfs, "parent/child") => 0;
|
||||
lfs_remove(&lfs, "parent/orphan") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the child's most recent commit, this should be the update
|
||||
// to the linked-list entry, which should orphan the orphan. Note this
|
||||
// makes a lot of assumptions about the remove operation.
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "parent/child") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
// this mkdir should both create a dir and deorphan, so size
|
||||
// should be unchanged
|
||||
lfs_mkdir(&lfs, "parent/otherchild") => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant testing for orphans, basically just spam mkdir/remove
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20},
|
||||
{FILES=26, DEPTH=1, CYCLES=20},
|
||||
{FILES=3, DEPTH=3, CYCLES=20},
|
||||
]
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
srand(1);
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (int i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
// is valid dir?
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// try to delete path in reverse order, ignore if dir is not empty
|
||||
for (int d = DEPTH-1; d >= 0; d--) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
293
components/fs/littlefs/littlefs/tests/test_paths.toml
Normal file
293
components/fs/littlefs/littlefs/tests/test_paths.toml
Normal file
|
@ -0,0 +1,293 @@
|
|||
|
||||
[[case]] # simple path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/coldtea") => 0;
|
||||
|
||||
lfs_stat(&lfs, "tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
|
||||
lfs_mkdir(&lfs, "/milk") => 0;
|
||||
lfs_stat(&lfs, "/milk", &info) => 0;
|
||||
assert(strcmp(info.name, "milk") == 0);
|
||||
lfs_stat(&lfs, "milk", &info) => 0;
|
||||
assert(strcmp(info.name, "milk") == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # redundant slashes
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/coldtea") => 0;
|
||||
|
||||
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "//tea//hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "///tea///hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
|
||||
lfs_mkdir(&lfs, "////milk") => 0;
|
||||
lfs_stat(&lfs, "////milk", &info) => 0;
|
||||
assert(strcmp(info.name, "milk") == 0);
|
||||
lfs_stat(&lfs, "milk", &info) => 0;
|
||||
assert(strcmp(info.name, "milk") == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/coldtea") => 0;
|
||||
|
||||
lfs_stat(&lfs, "./tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "/./tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "/././tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "/./tea/./hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
|
||||
lfs_mkdir(&lfs, "/./milk") => 0;
|
||||
lfs_stat(&lfs, "/./milk", &info) => 0;
|
||||
assert(strcmp(info.name, "milk") == 0);
|
||||
lfs_stat(&lfs, "milk", &info) => 0;
|
||||
assert(strcmp(info.name, "milk") == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # dot dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/coldtea") => 0;
|
||||
lfs_mkdir(&lfs, "coffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
lfs_stat(&lfs, "coffee/../tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "tea/coldtea/../hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "coffee/coldcoffee/../../tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "coffee/../coffee/../tea/hottea", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
|
||||
lfs_mkdir(&lfs, "coffee/../milk") => 0;
|
||||
lfs_stat(&lfs, "coffee/../milk", &info) => 0;
|
||||
strcmp(info.name, "milk") => 0;
|
||||
lfs_stat(&lfs, "milk", &info) => 0;
|
||||
strcmp(info.name, "milk") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # trailing dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/coldtea") => 0;
|
||||
|
||||
lfs_stat(&lfs, "tea/hottea/", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "tea/hottea/.", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "tea/hottea/./.", &info) => 0;
|
||||
assert(strcmp(info.name, "hottea") == 0);
|
||||
lfs_stat(&lfs, "tea/hottea/..", &info) => 0;
|
||||
assert(strcmp(info.name, "tea") == 0);
|
||||
lfs_stat(&lfs, "tea/hottea/../.", &info) => 0;
|
||||
assert(strcmp(info.name, "tea") == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # leading dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, ".milk") => 0;
|
||||
lfs_stat(&lfs, ".milk", &info) => 0;
|
||||
strcmp(info.name, ".milk") => 0;
|
||||
lfs_stat(&lfs, "tea/.././.milk", &info) => 0;
|
||||
strcmp(info.name, ".milk") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # root dot dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/coldtea") => 0;
|
||||
lfs_mkdir(&lfs, "coffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
lfs_stat(&lfs, "coffee/../../../../../../tea/hottea", &info) => 0;
|
||||
strcmp(info.name, "hottea") => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "coffee/../../../../../../milk") => 0;
|
||||
lfs_stat(&lfs, "coffee/../../../../../../milk", &info) => 0;
|
||||
strcmp(info.name, "milk") => 0;
|
||||
lfs_stat(&lfs, "milk", &info) => 0;
|
||||
strcmp(info.name, "milk") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # invalid path tests
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg);
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dirt", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "dirt/ground", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "dirt/ground/earth", &info) => LFS_ERR_NOENT;
|
||||
|
||||
lfs_remove(&lfs, "dirt") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "dirt/ground") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
|
||||
|
||||
lfs_mkdir(&lfs, "dirt/ground") => LFS_ERR_NOENT;
|
||||
lfs_file_open(&lfs, &file, "dirt/ground", LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NOENT;
|
||||
lfs_mkdir(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
|
||||
lfs_file_open(&lfs, &file, "dirt/ground/earth", LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NOENT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # root operations
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "/", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
|
||||
lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_ISDIR;
|
||||
|
||||
lfs_remove(&lfs, "/") => LFS_ERR_INVAL;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # root representations
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "/", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_stat(&lfs, "", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_stat(&lfs, ".", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_stat(&lfs, "..", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_stat(&lfs, "//", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_stat(&lfs, "./", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # superblock conflict test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "littlefs") => LFS_ERR_NOENT;
|
||||
|
||||
lfs_mkdir(&lfs, "littlefs") => 0;
|
||||
lfs_stat(&lfs, "littlefs", &info) => 0;
|
||||
assert(strcmp(info.name, "littlefs") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_remove(&lfs, "littlefs") => 0;
|
||||
lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # max path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "coffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
memset(path, 'w', LFS_NAME_MAX+1);
|
||||
path[LFS_NAME_MAX+1] = '\0';
|
||||
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NAMETOOLONG;
|
||||
|
||||
memcpy(path, "coffee/", strlen("coffee/"));
|
||||
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX+1);
|
||||
path[strlen("coffee/")+LFS_NAME_MAX+1] = '\0';
|
||||
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NAMETOOLONG;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # really big path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "coffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
memset(path, 'w', LFS_NAME_MAX);
|
||||
path[LFS_NAME_MAX] = '\0';
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
|
||||
memcpy(path, "coffee/", strlen("coffee/"));
|
||||
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX);
|
||||
path[strlen("coffee/")+LFS_NAME_MAX] = '\0';
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
305
components/fs/littlefs/littlefs/tests/test_relocations.toml
Normal file
305
components/fs/littlefs/littlefs/tests/test_relocations.toml
Normal file
|
@ -0,0 +1,305 @@
|
|||
# specific corner cases worth explicitly testing for
|
||||
[[case]] # dangling split dir test
|
||||
define.ITERATIONS = 20
|
||||
define.COUNT = 10
|
||||
define.LFS_BLOCK_CYCLES = [8, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// fill up filesystem so only ~16 blocks are left
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
memset(buffer, 0, 512);
|
||||
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
|
||||
lfs_file_write(&lfs, &file, buffer, 512) => 512;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// make a child dir to use in bounded space
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < ITERATIONS; j++) {
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "child") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
if (j == ITERATIONS-1) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "child") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # outdated head test
|
||||
define.ITERATIONS = 20
|
||||
define.COUNT = 10
|
||||
define.LFS_BLOCK_CYCLES = [8, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// fill up filesystem so only ~16 blocks are left
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
memset(buffer, 0, 512);
|
||||
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
|
||||
lfs_file_write(&lfs, &file, buffer, 512) => 512;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// make a child dir to use in bounded space
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < ITERATIONS; j++) {
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "child") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
info.size => 0;
|
||||
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hi", 2) => 2;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
|
||||
lfs_dir_rewind(&lfs, &dir) => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
info.size => 2;
|
||||
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hi", 2) => 2;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
|
||||
lfs_dir_rewind(&lfs, &dir) => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
info.size => 2;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant testing for relocations, this is the same as the
|
||||
# orphan testing, except here we also set block_cycles so that
|
||||
# almost every tree operation needs a relocation
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
srand(1);
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (int i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
// is valid dir?
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// try to delete path in reverse order, ignore if dir is not empty
|
||||
for (int d = DEPTH-1; d >= 0; d--) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant testing for relocations, but now with random renames!
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
srand(1);
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (int i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
assert(!res || res == LFS_ERR_NOENT);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// create new random path
|
||||
char new_path[256];
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
sprintf(&new_path[2*d], "/%c", alpha[rand() % FILES]);
|
||||
}
|
||||
|
||||
// if new path does not exist, rename, otherwise destroy
|
||||
res = lfs_stat(&lfs, new_path, &info);
|
||||
assert(!res || res == LFS_ERR_NOENT);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// stop once some dir is renamed
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(&path[2*d], &full_path[2*d]);
|
||||
path[2*d+2] = '\0';
|
||||
strcpy(&path[128+2*d], &new_path[2*d]);
|
||||
path[128+2*d+2] = '\0';
|
||||
err = lfs_rename(&lfs, path, path+128);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
if (!err) {
|
||||
strcpy(path, path+128);
|
||||
}
|
||||
}
|
||||
|
||||
for (int d = 0; d < DEPTH; d++) {
|
||||
strcpy(path, new_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
} else {
|
||||
// try to delete path in reverse order,
|
||||
// ignore if dir is not empty
|
||||
for (int d = DEPTH-1; d >= 0; d--) {
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
380
components/fs/littlefs/littlefs/tests/test_seek.toml
Normal file
380
components/fs/littlefs/littlefs/tests/test_seek.toml
Normal file
|
@ -0,0 +1,380 @@
|
|||
|
||||
[[case]] # simple file seek
|
||||
define = [
|
||||
{COUNT=132, SKIP=4},
|
||||
{COUNT=132, SKIP=128},
|
||||
{COUNT=200, SKIP=10},
|
||||
{COUNT=200, SKIP=100},
|
||||
{COUNT=4, SKIP=1},
|
||||
{COUNT=4, SKIP=2},
|
||||
]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
|
||||
|
||||
lfs_soff_t pos = -1;
|
||||
size = strlen("kittycatcat");
|
||||
for (int i = 0; i < SKIP; i++) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
pos = lfs_file_tell(&lfs, &file);
|
||||
}
|
||||
assert(pos >= 0);
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_rewind(&lfs, &file) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, size, LFS_SEEK_CUR) => 3*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_CUR) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # simple file seek and write
|
||||
define = [
|
||||
{COUNT=132, SKIP=4},
|
||||
{COUNT=132, SKIP=128},
|
||||
{COUNT=200, SKIP=10},
|
||||
{COUNT=200, SKIP=100},
|
||||
{COUNT=4, SKIP=1},
|
||||
{COUNT=4, SKIP=2},
|
||||
]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
lfs_soff_t pos = -1;
|
||||
size = strlen("kittycatcat");
|
||||
for (int i = 0; i < SKIP; i++) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
pos = lfs_file_tell(&lfs, &file);
|
||||
}
|
||||
assert(pos >= 0);
|
||||
|
||||
memcpy(buffer, "doggodogdog", size);
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "doggodogdog", size) => 0;
|
||||
|
||||
lfs_file_rewind(&lfs, &file) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "doggodogdog", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # boundary seek and writes
|
||||
define.COUNT = 132
|
||||
define.OFFSETS = '"{512, 1020, 513, 1021, 511, 1019, 1441}"'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
size = strlen("hedgehoghog");
|
||||
const lfs_soff_t offsets[] = OFFSETS;
|
||||
|
||||
for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
|
||||
lfs_soff_t off = offsets[i];
|
||||
memcpy(buffer, "hedgehoghog", size);
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hedgehoghog", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hedgehoghog", size) => 0;
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hedgehoghog", size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # out of bounds seek
|
||||
define = [
|
||||
{COUNT=132, SKIP=4},
|
||||
{COUNT=132, SKIP=128},
|
||||
{COUNT=200, SKIP=10},
|
||||
{COUNT=200, SKIP=100},
|
||||
{COUNT=4, SKIP=2},
|
||||
{COUNT=4, SKIP=3},
|
||||
]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
size = strlen("kittycatcat");
|
||||
lfs_file_size(&lfs, &file) => COUNT*size;
|
||||
lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
|
||||
LFS_SEEK_SET) => (COUNT+SKIP)*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
memcpy(buffer, "porcupineee", size);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
|
||||
LFS_SEEK_SET) => (COUNT+SKIP)*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "porcupineee", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, COUNT*size,
|
||||
LFS_SEEK_SET) => COUNT*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "\0\0\0\0\0\0\0\0\0\0\0", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -((COUNT+SKIP)*size),
|
||||
LFS_SEEK_CUR) => LFS_ERR_INVAL;
|
||||
lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -((COUNT+2*SKIP)*size),
|
||||
LFS_SEEK_END) => LFS_ERR_INVAL;
|
||||
lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # inline write and seek
|
||||
define.SIZE = [2, 4, 128, 132]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "tinykitty",
|
||||
LFS_O_RDWR | LFS_O_CREAT) => 0;
|
||||
int j = 0;
|
||||
int k = 0;
|
||||
|
||||
memcpy(buffer, "abcdefghijklmnopqrstuvwxyz", 26);
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => i+1;
|
||||
}
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
uint8_t c;
|
||||
lfs_file_read(&lfs, &file, &c, 1) => 1;
|
||||
c => buffer[k++ % 26];
|
||||
}
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_tell(&lfs, &file) => SIZE;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
if (i < SIZE-2) {
|
||||
uint8_t c[3];
|
||||
lfs_file_seek(&lfs, &file, -1, LFS_SEEK_CUR) => i;
|
||||
lfs_file_read(&lfs, &file, &c, 3) => 3;
|
||||
lfs_file_tell(&lfs, &file) => i+3;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
lfs_file_seek(&lfs, &file, i+1, LFS_SEEK_SET) => i+1;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
uint8_t c;
|
||||
lfs_file_read(&lfs, &file, &c, 1) => 1;
|
||||
c => buffer[k++ % 26];
|
||||
}
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_tell(&lfs, &file) => SIZE;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # file seek and write with power-loss
|
||||
# must be power-of-2 for quadratic probing to be exhaustive
|
||||
define.COUNT = [4, 64, 128]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
err = lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
if (!err) {
|
||||
if (lfs_file_size(&lfs, &file) != 0) {
|
||||
lfs_file_size(&lfs, &file) => 11*COUNT;
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
memset(buffer, 0, 11+1);
|
||||
lfs_file_read(&lfs, &file, buffer, 11) => 11;
|
||||
assert(memcmp(buffer, "kittycatcat", 11) == 0 ||
|
||||
memcmp(buffer, "doggodogdog", 11) == 0);
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
if (lfs_file_size(&lfs, &file) == 0) {
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
strcpy((char*)buffer, "kittycatcat");
|
||||
size = strlen((char*)buffer);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
strcpy((char*)buffer, "doggodogdog");
|
||||
size = strlen((char*)buffer);
|
||||
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => COUNT*size;
|
||||
// seek and write using quadratic probing to touch all
|
||||
// 11-byte words in the file
|
||||
lfs_off_t off = 0;
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
off = (5*off + 1) % COUNT;
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "kittycatcat", size) == 0 ||
|
||||
memcmp(buffer, "doggodogdog", size) == 0);
|
||||
if (memcmp(buffer, "doggodogdog", size) != 0) {
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
strcpy((char*)buffer, "doggodogdog");
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "doggodogdog", size) == 0);
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "doggodogdog", size) == 0);
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => COUNT*size;
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "doggodogdog", size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
127
components/fs/littlefs/littlefs/tests/test_superblocks.toml
Normal file
127
components/fs/littlefs/littlefs/tests/test_superblocks.toml
Normal file
|
@ -0,0 +1,127 @@
|
|||
[[case]] # simple formatting test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # mount/unmount
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant format
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # invalid mount
|
||||
code = '''
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # expanding superblock
|
||||
define.LFS_BLOCK_CYCLES = [32, 33, 1]
|
||||
define.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # expanding superblock with power cycle
|
||||
define.LFS_BLOCK_CYCLES = [32, 33, 1]
|
||||
define.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
// remove lingering dummy?
|
||||
err = lfs_stat(&lfs, "dummy", &info);
|
||||
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
|
||||
if (!err) {
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant expanding superblock
|
||||
define.LFS_BLOCK_CYCLES = [2, 1]
|
||||
define.N = 24
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
// remove lingering dummy?
|
||||
err = lfs_stat(&lfs, "dummy", &info);
|
||||
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
|
||||
if (!err) {
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
439
components/fs/littlefs/littlefs/tests/test_truncate.toml
Normal file
439
components/fs/littlefs/littlefs/tests/test_truncate.toml
Normal file
|
@ -0,0 +1,439 @@
|
|||
[[case]] # simple truncate
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
define.LARGESIZE = 8192
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
strcpy((char*)buffer, "hair");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("hair");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # truncate and read
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
define.LARGESIZE = 8192
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
strcpy((char*)buffer, "hair");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("hair");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("hair");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # write, truncate, and read
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "sequence",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
|
||||
lfs_size_t qsize = size / 4;
|
||||
uint8_t *wb = buffer;
|
||||
uint8_t *rb = buffer + size;
|
||||
for (lfs_off_t j = 0; j < size; ++j) {
|
||||
wb[j] = j;
|
||||
}
|
||||
|
||||
/* Spread sequence over size */
|
||||
lfs_file_write(&lfs, &file, wb, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_tell(&lfs, &file) => size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
|
||||
/* Chop off the last quarter */
|
||||
lfs_size_t trunc = size - qsize;
|
||||
lfs_file_truncate(&lfs, &file, trunc) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
|
||||
/* Read should produce first 3/4 */
|
||||
lfs_file_read(&lfs, &file, rb, size) => trunc;
|
||||
memcmp(rb, wb, trunc) => 0;
|
||||
|
||||
/* Move to 1/4 */
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
lfs_file_seek(&lfs, &file, qsize, LFS_SEEK_SET) => qsize;
|
||||
lfs_file_tell(&lfs, &file) => qsize;
|
||||
|
||||
/* Chop to 1/2 */
|
||||
trunc -= qsize;
|
||||
lfs_file_truncate(&lfs, &file, trunc) => 0;
|
||||
lfs_file_tell(&lfs, &file) => qsize;
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
|
||||
/* Read should produce second quarter */
|
||||
lfs_file_read(&lfs, &file, rb, size) => trunc - qsize;
|
||||
memcmp(rb, wb + qsize, trunc - qsize) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # truncate and write
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
define.LARGESIZE = 8192
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
strcpy((char*)buffer, "hair");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
strcpy((char*)buffer, "bald");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("bald");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "bald", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # truncate write under powerloss
|
||||
define.SMALLSIZE = [4, 512]
|
||||
define.MEDIUMSIZE = [32, 1024]
|
||||
define.LARGESIZE = 2048
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
}
|
||||
err = lfs_file_open(&lfs, &file, "baldy", LFS_O_RDONLY);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
if (!err) {
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
assert(size == 0 ||
|
||||
size == LARGESIZE ||
|
||||
size == MEDIUMSIZE ||
|
||||
size == SMALLSIZE);
|
||||
for (lfs_off_t j = 0; j < size; j += 4) {
|
||||
lfs_file_read(&lfs, &file, buffer, 4) => 4;
|
||||
assert(memcmp(buffer, "hair", 4) == 0 ||
|
||||
memcmp(buffer, "bald", 4) == 0 ||
|
||||
memcmp(buffer, "comb", 4) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "baldy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
lfs_file_size(&lfs, &file) => 0;
|
||||
strcpy((char*)buffer, "hair");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
strcpy((char*)buffer, "bald");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
lfs_file_truncate(&lfs, &file, SMALLSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => SMALLSIZE;
|
||||
strcpy((char*)buffer, "comb");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < SMALLSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => SMALLSIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # more aggressive general truncation tests
|
||||
define.CONFIG = 'range(6)'
|
||||
define.SMALLSIZE = 32
|
||||
define.MEDIUMSIZE = 2048
|
||||
define.LARGESIZE = 8192
|
||||
code = '''
|
||||
#define COUNT 5
|
||||
const struct {
|
||||
lfs_off_t startsizes[COUNT];
|
||||
lfs_off_t startseeks[COUNT];
|
||||
lfs_off_t hotsizes[COUNT];
|
||||
lfs_off_t coldsizes[COUNT];
|
||||
} configs[] = {
|
||||
// cold shrinking
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
|
||||
// cold expanding
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
|
||||
// warm shrinking truncate
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, 0, 0, 0, 0}},
|
||||
// warm expanding truncate
|
||||
{{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
|
||||
// mid-file shrinking truncate
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, 0, 0, 0, 0}},
|
||||
// mid-file expanding truncate
|
||||
{{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
|
||||
};
|
||||
|
||||
const lfs_off_t *startsizes = configs[CONFIG].startsizes;
|
||||
const lfs_off_t *startseeks = configs[CONFIG].startseeks;
|
||||
const lfs_off_t *hotsizes = configs[CONFIG].hotsizes;
|
||||
const lfs_off_t *coldsizes = configs[CONFIG].coldsizes;
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
strcpy((char*)buffer, "hair");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => startsizes[i];
|
||||
|
||||
if (startseeks[i] != startsizes[i]) {
|
||||
lfs_file_seek(&lfs, &file,
|
||||
startseeks[i], LFS_SEEK_SET) => startseeks[i];
|
||||
}
|
||||
|
||||
lfs_file_truncate(&lfs, &file, hotsizes[i]) => 0;
|
||||
lfs_file_size(&lfs, &file) => hotsizes[i];
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => hotsizes[i];
|
||||
|
||||
size = strlen("hair");
|
||||
lfs_off_t j = 0;
|
||||
for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
|
||||
for (; j < hotsizes[i]; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "\0\0\0\0", size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_truncate(&lfs, &file, coldsizes[i]) => 0;
|
||||
lfs_file_size(&lfs, &file) => coldsizes[i];
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => coldsizes[i];
|
||||
|
||||
size = strlen("hair");
|
||||
lfs_off_t j = 0;
|
||||
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
|
||||
j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
|
||||
for (; j < coldsizes[i]; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "\0\0\0\0", size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # noop truncate
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop",
|
||||
LFS_O_RDWR | LFS_O_CREAT) => 0;
|
||||
|
||||
strcpy((char*)buffer, "hair");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
// this truncate should do nothing
|
||||
lfs_file_truncate(&lfs, &file, j+size) => 0;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
// should do nothing again
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// still there after reboot?
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
Loading…
Add table
Reference in a new issue