pax_global_header00006660000000000000000000000064135541601050014512gustar00rootroot0000000000000052 comment=436448b52be6e6ad25ed98fbf412892b5a639d5a erofs-utils-1.0/000077500000000000000000000000001355416010500136265ustar00rootroot00000000000000erofs-utils-1.0/.gitignore000066400000000000000000000002661355416010500156220ustar00rootroot00000000000000.* *~ *.diff *.o *.la *.a *.patch *.rej # # Generated files # aclocal.m4 autom4te.cache config.* Makefile Makefile.in config/ m4/ configure configure.scan libtool stamp-h stamp-h1 erofs-utils-1.0/AUTHORS000066400000000000000000000003701355416010500146760ustar00rootroot00000000000000EROFS USERSPACE UTILITIES M: Li Guifu M: Miao Xie M: Fang Wei R: Gao Xiang R: Chao Yu S: Maintained L: linux-erofs@lists.ozlabs.org erofs-utils-1.0/COPYING000066400000000000000000000444441355416010500146730ustar00rootroot00000000000000Valid-License-Identifier: GPL-2.0 Valid-License-Identifier: GPL-2.0-only Valid-License-Identifier: GPL-2.0+ Valid-License-Identifier: GPL-2.0-or-later SPDX-URL: https://spdx.org/licenses/GPL-2.0.html Usage-Guide: To use this license in source code, put one of the following SPDX tag/value pairs into a comment according to the placement guidelines in the licensing rules documentation. For 'GNU General Public License (GPL) version 2 only' use: SPDX-License-Identifier: GPL-2.0 or SPDX-License-Identifier: GPL-2.0-only For 'GNU General Public License (GPL) version 2 or any later version' use: SPDX-License-Identifier: GPL-2.0+ or SPDX-License-Identifier: GPL-2.0-or-later License-Text: GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. erofs-utils-1.0/ChangeLog000066400000000000000000000007111355416010500153770ustar00rootroot00000000000000erofs-utils (1.0-1) unstable; urgency=low * first release with the following new features: - (mkfs.erofs) uncompressed file support; - (mkfs.erofs) uncompressed tail-end packing inline data support; - (mkfs.erofs) lz4 / lz4HC compressed file support; - (mkfs.erofs) special file support; - (mkfs.erofs) inline / shared xattrs support; - (mkfs.erofs) Posix ACL support; -- Gao Xiang Thu, 24 Oct 2019 00:00:00 +0800 erofs-utils-1.0/Makefile.am000066400000000000000000000001351355416010500156610ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0+ # Makefile.am ACLOCAL_AMFLAGS = -I m4 SUBDIRS=lib mkfs erofs-utils-1.0/README000066400000000000000000000112731355416010500145120ustar00rootroot00000000000000erofs-utils =========== erofs-utils includes user-space tools for erofs filesystem images. Currently only mkfs.erofs is available. mkfs.erofs ---------- It can create 2 primary kinds of erofs images: (un)compressed. - For compressed images, it's able to use several compression algorithms, but lz4(hc) are only supported due to the current linux kernel implementation. - For uncompressed images, it can decide whether the last page of a file should be inlined or not properly [1]. Dependencies ~~~~~~~~~~~~ lz4-1.8.0+ for lz4 enabled [2], lz4-1.9.0+ recommended How to build for lz4-1.9.0 or above ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build you can run the following commands in order: :: $ ./autogen.sh $ ./configure $ make mkfs.erofs binary will be generated under mkfs folder. There are still some issues which affect the stability of LZ4_compress_destSize() * they have impacts on lz4 only rather than lz4HC * [3]. How to build for lz4-1.8.0~1.8.3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For these old lz4 versions, lz4hc algorithm cannot be supported without lz4 static libary due to LZ4_compress_HC_destSize unstable api usage, which means only lz4 algrithm is available if lz4 static library isn't found. On Fedora, static lz4 can be installed using: yum install lz4-static.x86_64 However, it's not recommended to use those versions since there were bugs in these compressors, see [2] [3] as well. How to generate erofs images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently lz4 and lz4hc are available for compression, e.g. $ mkfs.erofs -zlz4hc foo.erofs.img foo/ Or leave all files uncompressed as a option: $ mkfs.erofs foo.erofs.img foo/ How to generate legacy erofs images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Decompression inplace and compacted indexes have been introduced in linux-5.3, which are not backward-compatible with older kernels. In order to generate _legacy_ erofs images for old kernels, add "-E legacy-compress" to the command line, e.g. $ mkfs.erofs -E legacy-compress -zlz4hc foo.erofs.img foo/ Known issues ~~~~~~~~~~~~ 1. LZ4HC cannot compress long zeroed buffer properly with LZ4_compress_HC_destSize() https://github.com/lz4/lz4/issues/784 Obsoleted erofs.mkfs ~~~~~~~~~~~~~~~~~~~~ There is an original erofs.mkfs version developped by Li Guifu, which was replaced by the new erofs-utils implementation. git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git -b obsoleted_mkfs It may still be useful since new erofs-utils has not been widely used in commercial products. However, if that happens, please report bug to us as well. Contribution ------------ erofs-utils is a GPLv2+ project as a part of erofs file system, feel free to send patches or feedback to us. To: linux-erofs mailing list Li Guifu Miao Xie Fang Wei Cc: Gao Xiang Chao Yu Comments -------- [1] According to the erofs on-disk format, the last page of files could be inlined aggressively with its metadata in order to reduce the I/O overhead and save the storage space. [2] There was a bug until lz4-1.8.3, which can crash erofs-utils randomly. Fortunately bugfix by our colleague Qiuyang Sun was merged in lz4-1.9.0. For more details, please refer to https://github.com/lz4/lz4/commit/660d21272e4c8a0f49db5fc1e6853f08713dff82 [3] There are many crash fixes merged to lz4 1.9.2 for LZ4_compress_destSize(), and I once ran into some crashs due to those issues. * Again lz4HC is not effected for this section. * [LZ4_compress_destSize] Allow 2 more bytes of match length https://github.com/lz4/lz4/commit/690009e2c2f9e5dcb0d40e7c0c40610ce6006eda [LZ4_compress_destSize] Fix rare data corruption bug https://github.com/lz4/lz4/commit/6bc6f836a18d1f8fd05c8fc2b42f1d800bc25de1 [LZ4_compress_destSize] Fix overflow condition https://github.com/lz4/lz4/commit/13a2d9e34ffc4170720ce417c73e396d0ac1471a [LZ4_compress_destSize] Fix off-by-one error in fix https://github.com/lz4/lz4/commit/7c32101c655d93b61fc212dcd512b87119dd7333 [LZ4_compress_destSize] Fix off-by-one error https://github.com/lz4/lz4/commit/d7cad81093cd805110291f84d64d385557d0ffba since upstream lz4 doesn't have stable branch for old versions, it's preferred to use latest upstream lz4 library (although some regressions could happen since new features are also introduced to latest upstream version as well) or backport all stable bugfixes to old stable versions, e.g. our unoffical lz4 fork: https://github.com/erofs/lz4 erofs-utils-1.0/VERSION000066400000000000000000000000171355416010500146740ustar00rootroot000000000000001.0 2019-10-24 erofs-utils-1.0/autogen.sh000077500000000000000000000001721355416010500156270ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: GPL-2.0+ aclocal && \ autoheader && \ autoconf && \ libtoolize && \ automake -a -c erofs-utils-1.0/configure.ac000066400000000000000000000120051355416010500161120ustar00rootroot00000000000000# -*- Autoconf -*- # Process this file with autoconf to produce a configure script. AC_PREREQ([2.69]) m4_define([erofs_utils_version], m4_esyscmd([sed -n '1p' VERSION | tr -d '\n'])) m4_define([erofs_utils_date], m4_esyscmd([sed -n '2p' VERSION | tr -d '\n'])) AC_INIT([erofs-utils], [erofs_utils_version], [linux-erofs@lists.ozlabs.org]) AC_CONFIG_SRCDIR([config.h.in]) AC_CONFIG_HEADERS([config.h]) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_AUX_DIR(config) AM_INIT_AUTOMAKE([foreign -Wall -Werror]) # Checks for programs. AM_PROG_AR AC_PROG_CC AC_PROG_INSTALL LT_INIT dnl EROFS_UTILS_PARSE_DIRECTORY dnl Input: $1 = a string to a relative or absolute directory dnl Output: $2 = the variable to set with the absolute directory AC_DEFUN([EROFS_UTILS_PARSE_DIRECTORY], [ dnl Check if argument is a directory if test -d $1 ; then dnl Get the absolute path of the directory dnl in case of relative directory. dnl If realpath is not a valid command, dnl an error is produced and we keep the given path. local_tmp=`realpath $1 2>/dev/null` if test "$local_tmp" != "" ; then if test -d "$local_tmp" ; then $2="$local_tmp" else $2=$1 fi else $2=$1 fi dnl Check for space in the directory if test `echo $1|cut -d' ' -f1` != $1 ; then AC_MSG_ERROR($1 directory shall not contain any space.) fi else AC_MSG_ERROR($1 shall be a valid directory) fi ]) AC_ARG_ENABLE(lz4, [AS_HELP_STRING([--disable-lz4], [disable LZ4 compression support @<:@default=enabled@:>@])], [enable_lz4="$enableval"], [enable_lz4="yes"]) # Checks for libraries. # Use customized LZ4 library path when specified. AC_ARG_WITH(lz4-incdir, [AS_HELP_STRING([--with-lz4-incdir=DIR], [LZ4 include directory])], [ EROFS_UTILS_PARSE_DIRECTORY(["$withval"],[withval])]) AC_ARG_WITH(lz4-libdir, [AS_HELP_STRING([--with-lz4-libdir=DIR], [LZ4 lib directory])], [ EROFS_UTILS_PARSE_DIRECTORY(["$withval"],[withval])]) AC_ARG_VAR([LZ4_CFLAGS], [C compiler flags for lz4]) AC_ARG_VAR([LZ4_LIBS], [linker flags for lz4]) # Checks for header files. AC_CHECK_HEADERS(m4_flatten([ dirent.h fcntl.h getopt.h inttypes.h linux/falloc.h linux/fs.h linux/types.h linux/xattr.h limits.h stddef.h stdint.h stdlib.h string.h sys/ioctl.h sys/stat.h sys/sysmacros.h sys/time.h unistd.h ])) # Checks for typedefs, structures, and compiler characteristics. AC_C_INLINE AC_TYPE_INT64_T AC_TYPE_SIZE_T AC_TYPE_SSIZE_T AC_CHECK_MEMBERS([struct stat.st_rdev]) AC_TYPE_UINT64_T # # Check to see if llseek() is declared in unistd.h. On some libc's # it is, and on others it isn't..... Thank you glibc developers.... # AC_CHECK_DECL(llseek, [AC_DEFINE(HAVE_LLSEEK_PROTOTYPE, 1, [Define to 1 if llseek declared in unistd.h])],, [#include ]) # # Check to see if lseek64() is declared in unistd.h. Glibc's header files # are so convoluted that I can't tell whether it will always be defined, # and if it isn't defined while lseek64 is defined in the library, # disaster will strike. # # Warning! Use of --enable-gcc-wall may throw off this test. # AC_CHECK_DECL(lseek64,[AC_DEFINE(HAVE_LSEEK64_PROTOTYPE, 1, [Define to 1 if lseek64 declared in unistd.h])],, [#define _LARGEFILE_SOURCE #define _LARGEFILE64_SOURCE #include ]) # Checks for library functions. AC_CHECK_FUNCS([fallocate gettimeofday memset realpath strdup strerror strrchr strtoull]) # Configure lz4 test -z $LZ4_LIBS && LZ4_LIBS='-llz4' if test "x$enable_lz4" = "xyes"; then test -z "${with_lz4_incdir}" || LZ4_CFLAGS="-I$with_lz4_incdir $LZ4_CFLAGS" test -z "${with_lz4_libdir}" || LZ4_LIBS="-L$with_lz4_libdir $LZ4_LIBS" saved_CPPFLAGS=${CPPFLAGS} CPPFLAGS="${LZ4_CFLAGS} ${CFLAGS}" AC_CHECK_HEADERS([lz4.h],[have_lz4h="yes"], []) if test "x${have_lz4h}" = "xyes" ; then saved_LDFLAGS=${LDFLAGS} LDFLAGS="-L$with_lz4_libdir ${LDFLAGS}" AC_CHECK_LIB(lz4, LZ4_compress_destSize, [ have_lz4="yes" have_lz4hc="yes" AC_CHECK_LIB(lz4, LZ4_compress_HC_destSize, [], [ AC_CHECK_DECL(LZ4_compress_HC_destSize, [lz4_force_static="yes"], [have_lz4hc="no"], [[ #define LZ4_HC_STATIC_LINKING_ONLY (1) #include ]]) ]) ], [AC_MSG_ERROR([Cannot find proper lz4 version (>= 1.8.0)])]) LDFLAGS=${saved_LDFLAGS} if test "x${have_lz4}" = "xyes"; then AC_DEFINE([LZ4_ENABLED], [1], [Define to 1 if lz4 is enabled.]) if test "x${have_lz4hc}" = "xyes"; then AC_DEFINE([LZ4HC_ENABLED], [1], [Define to 1 if lz4hc is enabled.]) fi if test "x${lz4_force_static}" = "xyes"; then LDFLAGS="-all-static ${LDFLAGS}" else test -z "${with_lz4_libdir}" || LZ4_LIBS="-R ${with_lz4_libdir} $LZ4_LIBS" fi LIBS="$LZ4_LIBS $LIBS" fi fi CFLAGS=${saved_CPPFLAGS} fi AM_CONDITIONAL([ENABLE_LZ4], [test "x${have_lz4}" = "xyes"]) AM_CONDITIONAL([ENABLE_LZ4HC], [test "x${have_lz4hc}" = "xyes"]) AC_CONFIG_FILES([Makefile lib/Makefile mkfs/Makefile]) AC_OUTPUT erofs-utils-1.0/include/000077500000000000000000000000001355416010500152515ustar00rootroot00000000000000erofs-utils-1.0/include/erofs/000077500000000000000000000000001355416010500163675ustar00rootroot00000000000000erofs-utils-1.0/include/erofs/cache.h000066400000000000000000000045061355416010500176100ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/cache.h * * Copyright (C) 2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Miao Xie * with heavy changes by Gao Xiang */ #ifndef __EROFS_CACHE_H #define __EROFS_CACHE_H #include "internal.h" struct erofs_buffer_head; struct erofs_buffer_block; #define DATA 0 #define META 1 /* including inline xattrs, extent */ #define INODE 2 /* shared xattrs */ #define XATTR 3 struct erofs_bhops { bool (*preflush)(struct erofs_buffer_head *bh); bool (*flush)(struct erofs_buffer_head *bh); }; struct erofs_buffer_head { struct list_head list; struct erofs_buffer_block *block; erofs_off_t off; struct erofs_bhops *op; void *fsprivate; }; struct erofs_buffer_block { struct list_head list; erofs_blk_t blkaddr; int type; struct erofs_buffer_head buffers; }; static inline const int get_alignsize(int type, int *type_ret) { if (type == DATA) return EROFS_BLKSIZ; if (type == INODE) { *type_ret = META; return sizeof(struct erofs_inode_compact); } else if (type == XATTR) { *type_ret = META; return sizeof(struct erofs_xattr_entry); } if (type == META) return 1; return -EINVAL; } extern struct erofs_bhops erofs_drop_directly_bhops; extern struct erofs_bhops erofs_skip_write_bhops; extern struct erofs_bhops erofs_buf_write_bhops; static inline erofs_off_t erofs_btell(struct erofs_buffer_head *bh, bool end) { const struct erofs_buffer_block *bb = bh->block; if (bb->blkaddr == NULL_ADDR) return NULL_ADDR_UL; return blknr_to_addr(bb->blkaddr) + (end ? list_next_entry(bh, list)->off : bh->off); } static inline bool erofs_bh_flush_generic_end(struct erofs_buffer_head *bh) { list_del(&bh->list); free(bh); return true; } struct erofs_buffer_head *erofs_buffer_init(void); int erofs_bh_balloon(struct erofs_buffer_head *bh, erofs_off_t incr); struct erofs_buffer_head *erofs_balloc(int type, erofs_off_t size, unsigned int required_ext, unsigned int inline_ext); struct erofs_buffer_head *erofs_battach(struct erofs_buffer_head *bh, int type, unsigned int size); erofs_blk_t erofs_mapbh(struct erofs_buffer_block *bb, bool end); bool erofs_bflush(struct erofs_buffer_block *bb); void erofs_bdrop(struct erofs_buffer_head *bh, bool tryrevoke); #endif erofs-utils-1.0/include/erofs/compress.h000066400000000000000000000013471355416010500204000ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/compress.h * * Copyright (C) 2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #ifndef __EROFS_COMPRESS_H #define __EROFS_COMPRESS_H #include "internal.h" /* workaround for an upstream lz4 compression issue, which can crash us */ /* #define EROFS_CONFIG_COMPR_MAX_SZ (1024 * 1024) */ #define EROFS_CONFIG_COMPR_MAX_SZ (900 * 1024) #define EROFS_CONFIG_COMPR_MIN_SZ (32 * 1024) int erofs_write_compressed_file(struct erofs_inode *inode); int z_erofs_compress_init(void); int z_erofs_compress_exit(void); const char *z_erofs_list_available_compressors(unsigned int i); #endif erofs-utils-1.0/include/erofs/config.h000066400000000000000000000015551355416010500200130ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/config.h * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #ifndef __EROFS_CONFIG_H #define __EROFS_CONFIG_H #include "defs.h" enum { FORCE_INODE_COMPACT = 1, FORCE_INODE_EXTENDED, }; struct erofs_configure { const char *c_version; int c_dbg_lvl; bool c_dry_run; bool c_legacy_compress; /* related arguments for mkfs.erofs */ char *c_img_path; char *c_src_path; char *c_compr_alg_master; int c_compr_level_master; int c_force_inodeversion; /* < 0, xattr disabled and INT_MAX, always use inline xattrs */ int c_inline_xattr_tolerance; u64 c_unix_timestamp; }; extern struct erofs_configure cfg; void erofs_init_configure(void); void erofs_show_config(void); void erofs_exit_configure(void); #endif erofs-utils-1.0/include/erofs/defs.h000066400000000000000000000105251355416010500174640ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/defs.h * * Copyright (C) 2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu * Modified by Gao Xiang */ #ifndef __EROFS_DEFS_H #define __EROFS_DEFS_H #include #include #include #include #include #include #ifdef HAVE_CONFIG_H #include #endif #ifdef HAVE_LINUX_TYPES_H #include #endif /* * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. */ #define container_of(ptr, type, member) ({ \ const typeof(((type *)0)->member) *__mptr = (ptr); \ (type *)((char *)__mptr - offsetof(type, member)); }) typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; #ifndef HAVE_LINUX_TYPES_H typedef u8 __u8; typedef u16 __u16; typedef u32 __u32; typedef u64 __u64; typedef u16 __le16; typedef u32 __le32; typedef u64 __le64; typedef u16 __be16; typedef u32 __be32; typedef u64 __be64; #endif typedef int8_t s8; typedef int16_t s16; typedef int32_t s32; typedef int64_t s64; #if __BYTE_ORDER == __LITTLE_ENDIAN /* * The host byte order is the same as network byte order, * so these functions are all just identity. */ #define cpu_to_le16(x) ((__u16)(x)) #define cpu_to_le32(x) ((__u32)(x)) #define cpu_to_le64(x) ((__u64)(x)) #define le16_to_cpu(x) ((__u16)(x)) #define le32_to_cpu(x) ((__u32)(x)) #define le64_to_cpu(x) ((__u64)(x)) #else #if __BYTE_ORDER == __BIG_ENDIAN #define cpu_to_le16(x) (__builtin_bswap16(x)) #define cpu_to_le32(x) (__builtin_bswap32(x)) #define cpu_to_le64(x) (__builtin_bswap64(x)) #define le16_to_cpu(x) (__builtin_bswap16(x)) #define le32_to_cpu(x) (__builtin_bswap32(x)) #define le64_to_cpu(x) (__builtin_bswap64(x)) #else #pragma error #endif #endif #ifndef __OPTIMIZE__ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)])) #else #define BUILD_BUG_ON(condition) assert(condition) #endif #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define __round_mask(x, y) ((__typeof__(x))((y)-1)) #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) #define round_down(x, y) ((x) & ~__round_mask(x, y)) /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ #define roundup(x, y) ( \ { \ const typeof(y) __y = y; \ (((x) + (__y - 1)) / __y) * __y; \ } \ ) #define rounddown(x, y) ( \ { \ typeof(x) __x = (x); \ __x - (__x % (y)); \ } \ ) #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #define max(x, y) ({ \ typeof(x) _max1 = (x); \ typeof(y) _max2 = (y); \ (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) /* * ..and if you can't take the strict types, you can specify one yourself. * Or don't use min/max at all, of course. */ #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1: __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1: __max2; }) #define cmpsgn(x, y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (_x > _y) - (_x < _y); }) #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define BIT(nr) (1UL << (nr)) #define BIT_ULL(nr) (1ULL << (nr)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) #define BITS_PER_BYTE 8 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #ifdef __SIZEOF_LONG__ #define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__) #else #define BITS_PER_LONG __WORDSIZE #endif #define BUG_ON(cond) assert(!(cond)) #ifdef NDEBUG #define DBG_BUGON(condition) ((void)(condition)) #else #define DBG_BUGON(condition) BUG_ON(condition) #endif #ifndef __maybe_unused #define __maybe_unused __attribute__((__unused__)) #endif #endif erofs-utils-1.0/include/erofs/err.h000066400000000000000000000012411355416010500173260ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/err.h * * Copyright (C) 2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #ifndef __EROFS_ERR_H #define __EROFS_ERR_H #include #define MAX_ERRNO (4095) #define IS_ERR_VALUE(x) \ ((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO) static inline void *ERR_PTR(long error) { return (void *)error; } static inline int IS_ERR(const void *ptr) { return IS_ERR_VALUE((unsigned long)ptr); } static inline long PTR_ERR(const void *ptr) { return (long) ptr; } #endif erofs-utils-1.0/include/erofs/hashtable.h000066400000000000000000000315501355416010500204770ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * erofs_utils/include/erofs/hashtable.h * * Original code taken from 'linux/include/linux/hash{,table}.h' */ #ifndef __EROFS_HASHTABLE_H #define __EROFS_HASHTABLE_H /* * Fast hashing routine for ints, longs and pointers. * (C) 2002 Nadia Yvette Chambers, IBM */ /* * Statically sized hash table implementation * (C) 2012 Sasha Levin */ #include "defs.h" /* * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and * fs/inode.c. It's not actually prime any more (the previous primes * were actively bad for hashing), but the name remains. */ #if BITS_PER_LONG == 32 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32 #define hash_long(val, bits) hash_32(val, bits) #elif BITS_PER_LONG == 64 #define hash_long(val, bits) hash_64(val, bits) #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64 #else #error Wordsize not 32 or 64 #endif /* * This hash multiplies the input by a large odd number and takes the * high bits. Since multiplication propagates changes to the most * significant end only, it is essential that the high bits of the * product be used for the hash value. * * Chuck Lever verified the effectiveness of this technique: * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf * * Although a random odd number will do, it turns out that the golden * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice * properties. (See Knuth vol 3, section 6.4, exercise 9.) * * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2, * which is very slightly easier to multiply by and makes no * difference to the hash distribution. */ #define GOLDEN_RATIO_32 0x61C88647 #define GOLDEN_RATIO_64 0x61C8864680B583EBull struct hlist_head { struct hlist_node *first; }; struct hlist_node { struct hlist_node *next, **pprev; }; /* * Architectures might want to move the poison pointer offset * into some well-recognized area such as 0xdead000000000000, * that is also not mappable by user-space exploits: */ #ifdef CONFIG_ILLEGAL_POINTER_VALUE # define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL) #else # define POISON_POINTER_DELTA 0 #endif /* * These are non-NULL pointers that will result in page faults * under normal circumstances, used to verify that nobody uses * non-initialized list entries. */ #define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) /* * Double linked lists with a single pointer list head. * Mostly useful for hash tables where the two pointer list head is * too wasteful. * You lose the ability to access the tail in O(1). */ #define HLIST_HEAD_INIT { .first = NULL } #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) static inline void INIT_HLIST_NODE(struct hlist_node *h) { h->next = NULL; h->pprev = NULL; } static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } static inline int hlist_empty(const struct hlist_head *h) { return !h->first; } static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; *pprev = next; if (next) next->pprev = pprev; } static inline void hlist_del(struct hlist_node *n) { __hlist_del(n); n->next = LIST_POISON1; n->pprev = LIST_POISON2; } static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n); } } static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; if (first) first->pprev = &n->next; h->first = n; n->pprev = &h->first; } /* next must be != NULL */ static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; next->pprev = &n->next; *(n->pprev) = n; } static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; prev->next = n; n->pprev = &prev->next; if (n->next) n->next->pprev = &n->next; } /* after that we'll appear to be on some hlist and hlist_del will work */ static inline void hlist_add_fake(struct hlist_node *n) { n->pprev = &n->next; } /* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ static inline void hlist_move_list(struct hlist_head *old, struct hlist_head *new) { new->first = old->first; if (new->first) new->first->pprev = &new->first; old->first = NULL; } #define hlist_entry(ptr, type, member) container_of(ptr, type, member) #define hlist_for_each(pos, head) \ for (pos = (head)->first; pos; pos = pos->next) #define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ pos = n) #define hlist_entry_safe(ptr, type, member) \ ({ typeof(ptr) ____ptr = (ptr); \ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ }) /** * hlist_for_each_entry - iterate over list of given type * @pos:the type * to use as a loop cursor. * @head:the head for your list. * @member:the name of the hlist_node within the struct. */ #define hlist_for_each_entry(pos, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_continue * iterate over a hlist continuing after current point * @pos:the type * to use as a loop cursor. * @member:the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue(pos, member) \ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_from * iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from(pos, member) \ for (; pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_safe * iterate over list of given type safe against removal of list entry * @pos:the type * to use as a loop cursor. * @n:another &struct hlist_node to use as temporary storage * @head:the head for your list. * @member:the name of the hlist_node within the struct. */ #define hlist_for_each_entry_safe(pos, n, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ pos && ({ n = pos->member.next; 1; }); \ pos = hlist_entry_safe(n, typeof(*pos), member)) static inline u32 __hash_32(u32 val) { return val * GOLDEN_RATIO_32; } static inline u32 hash_32(u32 val, unsigned int bits) { /* High bits are more random, so use them. */ return __hash_32(val) >> (32 - bits); } static __always_inline u32 hash_64(u64 val, unsigned int bits) { #if BITS_PER_LONG == 64 /* 64x64-bit multiply is efficient on all 64-bit processors */ return val * GOLDEN_RATIO_64 >> (64 - bits); #else /* Hash 64 bits using only 32x32-bit multiply. */ return hash_32((u32)val ^ __hash_32(val >> 32), bits); #endif } /** * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value * @n - parameter * * constant-capable log of base 2 calculation * - this can be used to initialise global variables from constant data, hence * the massive ternary operator construction * * selects the appropriately-sized optimised version depending on sizeof(n) */ #define ilog2(n) \ ( \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ (n) & (1ULL << 60) ? 60 : \ (n) & (1ULL << 59) ? 59 : \ (n) & (1ULL << 58) ? 58 : \ (n) & (1ULL << 57) ? 57 : \ (n) & (1ULL << 56) ? 56 : \ (n) & (1ULL << 55) ? 55 : \ (n) & (1ULL << 54) ? 54 : \ (n) & (1ULL << 53) ? 53 : \ (n) & (1ULL << 52) ? 52 : \ (n) & (1ULL << 51) ? 51 : \ (n) & (1ULL << 50) ? 50 : \ (n) & (1ULL << 49) ? 49 : \ (n) & (1ULL << 48) ? 48 : \ (n) & (1ULL << 47) ? 47 : \ (n) & (1ULL << 46) ? 46 : \ (n) & (1ULL << 45) ? 45 : \ (n) & (1ULL << 44) ? 44 : \ (n) & (1ULL << 43) ? 43 : \ (n) & (1ULL << 42) ? 42 : \ (n) & (1ULL << 41) ? 41 : \ (n) & (1ULL << 40) ? 40 : \ (n) & (1ULL << 39) ? 39 : \ (n) & (1ULL << 38) ? 38 : \ (n) & (1ULL << 37) ? 37 : \ (n) & (1ULL << 36) ? 36 : \ (n) & (1ULL << 35) ? 35 : \ (n) & (1ULL << 34) ? 34 : \ (n) & (1ULL << 33) ? 33 : \ (n) & (1ULL << 32) ? 32 : \ (n) & (1ULL << 31) ? 31 : \ (n) & (1ULL << 30) ? 30 : \ (n) & (1ULL << 29) ? 29 : \ (n) & (1ULL << 28) ? 28 : \ (n) & (1ULL << 27) ? 27 : \ (n) & (1ULL << 26) ? 26 : \ (n) & (1ULL << 25) ? 25 : \ (n) & (1ULL << 24) ? 24 : \ (n) & (1ULL << 23) ? 23 : \ (n) & (1ULL << 22) ? 22 : \ (n) & (1ULL << 21) ? 21 : \ (n) & (1ULL << 20) ? 20 : \ (n) & (1ULL << 19) ? 19 : \ (n) & (1ULL << 18) ? 18 : \ (n) & (1ULL << 17) ? 17 : \ (n) & (1ULL << 16) ? 16 : \ (n) & (1ULL << 15) ? 15 : \ (n) & (1ULL << 14) ? 14 : \ (n) & (1ULL << 13) ? 13 : \ (n) & (1ULL << 12) ? 12 : \ (n) & (1ULL << 11) ? 11 : \ (n) & (1ULL << 10) ? 10 : \ (n) & (1ULL << 9) ? 9 : \ (n) & (1ULL << 8) ? 8 : \ (n) & (1ULL << 7) ? 7 : \ (n) & (1ULL << 6) ? 6 : \ (n) & (1ULL << 5) ? 5 : \ (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ (n) & (1ULL << 1) ? 1 : 0 \ ) #define DEFINE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } #define DECLARE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] #define HASH_SIZE(name) (ARRAY_SIZE(name)) #define HASH_BITS(name) ilog2(HASH_SIZE(name)) /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/ #define hash_min(val, bits) \ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) static inline void __hash_init(struct hlist_head *ht, unsigned int sz) { unsigned int i; for (i = 0; i < sz; i++) INIT_HLIST_HEAD(&ht[i]); } /** * hash_init - initialize a hash table * @hashtable: hashtable to be initialized * * Calculates the size of the hashtable from the given parameter, otherwise * same as hash_init_size. * * This has to be a macro since HASH_BITS() will not work on pointers since * it calculates the size during preprocessing. */ #define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) /** * hash_add - add an object to a hashtable * @hashtable: hashtable to add to * @node: the &struct hlist_node of the object to be added * @key: the key of the object to be added */ #define hash_add(hashtable, node, key) \ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) /** * hash_hashed - check whether an object is in any hashtable * @node: the &struct hlist_node of the object to be checked */ static inline bool hash_hashed(struct hlist_node *node) { return !hlist_unhashed(node); } static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) { unsigned int i; for (i = 0; i < sz; i++) if (!hlist_empty(&ht[i])) return false; return true; } /** * hash_empty - check whether a hashtable is empty * @hashtable: hashtable to check * * This has to be a macro since HASH_BITS() will not work on pointers since * it calculates the size during preprocessing. */ #define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) /** * hash_del - remove an object from a hashtable * @node: &struct hlist_node of the object to remove */ static inline void hash_del(struct hlist_node *node) { hlist_del_init(node); } /** * hash_for_each - iterate over a hashtable * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ #define hash_for_each(name, bkt, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry(obj, &name[bkt], member) /** * hash_for_each_safe - iterate over a hashtable safe against removal of * hash entry * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor * @tmp: a &struct used for temporary storage * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ #define hash_for_each_safe(name, bkt, tmp, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) /** * hash_for_each_possible - iterate over all possible objects hashing to the * same bucket * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ #define hash_for_each_possible(name, obj, member, key) \ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) #endif erofs-utils-1.0/include/erofs/inode.h000066400000000000000000000011461355416010500176400ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/inode.h * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu * with heavy changes by Gao Xiang */ #ifndef __EROFS_INODE_H #define __EROFS_INODE_H #include "erofs/internal.h" void erofs_inode_manager_init(void); unsigned int erofs_iput(struct erofs_inode *inode); erofs_nid_t erofs_lookupnid(struct erofs_inode *inode); struct erofs_inode *erofs_mkfs_build_tree_from_path(struct erofs_inode *parent, const char *path); #endif erofs-utils-1.0/include/erofs/internal.h000066400000000000000000000053661355416010500203660ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/internal.h * * Copyright (C) 2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #ifndef __EROFS_INTERNAL_H #define __EROFS_INTERNAL_H #include "list.h" #include "err.h" typedef unsigned short umode_t; #define __packed __attribute__((__packed__)) #include "erofs_fs.h" #include #ifndef PATH_MAX #define PATH_MAX 4096 /* # chars in a path name including nul */ #endif #define PAGE_SHIFT (12) #define PAGE_SIZE (1U << PAGE_SHIFT) #define LOG_BLOCK_SIZE (12) #define EROFS_BLKSIZ (1U << LOG_BLOCK_SIZE) #define EROFS_ISLOTBITS 5 #define EROFS_SLOTSIZE (1U << EROFS_ISLOTBITS) typedef u64 erofs_off_t; typedef u64 erofs_nid_t; /* data type for filesystem-wide blocks number */ typedef u32 erofs_blk_t; #define NULL_ADDR ((unsigned int)-1) #define NULL_ADDR_UL ((unsigned long)-1) #define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) #define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) #define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) #define BLK_ROUND_UP(addr) DIV_ROUND_UP(addr, EROFS_BLKSIZ) struct erofs_buffer_head; struct erofs_sb_info { erofs_blk_t meta_blkaddr; erofs_blk_t xattr_blkaddr; u32 feature_incompat; u64 build_time; u32 build_time_nsec; }; /* global sbi */ extern struct erofs_sb_info sbi; struct erofs_inode { struct list_head i_hash, i_subdirs, i_xattrs; unsigned int i_count; struct erofs_inode *i_parent; umode_t i_mode; erofs_off_t i_size; u64 i_ino[2]; u32 i_uid; u32 i_gid; u64 i_ctime; u32 i_ctime_nsec; u32 i_nlink; union { u32 i_blkaddr; u32 i_blocks; u32 i_rdev; } u; char i_srcpath[PATH_MAX + 1]; unsigned char datalayout; unsigned char inode_isize; /* inline tail-end packing size */ unsigned short idata_size; unsigned int xattr_isize; unsigned int extent_isize; erofs_nid_t nid; struct erofs_buffer_head *bh; struct erofs_buffer_head *bh_inline, *bh_data; void *idata; void *compressmeta; }; static inline bool is_inode_layout_compression(struct erofs_inode *inode) { return erofs_inode_is_data_compressed(inode->datalayout); } #define IS_ROOT(x) ((x) == (x)->i_parent) struct erofs_dentry { struct list_head d_child; /* child of parent list */ unsigned int type; char name[EROFS_NAME_LEN]; union { struct erofs_inode *inode; erofs_nid_t nid; }; }; static inline bool is_dot_dotdot(const char *name) { if (name[0] != '.') return false; return name[1] == '\0' || (name[1] == '.' && name[2] == '\0'); } #include #include static inline const char *erofs_strerror(int err) { static char msg[256]; sprintf(msg, "[Error %d] %s", -err, strerror(-err)); return msg; } #endif erofs-utils-1.0/include/erofs/io.h000066400000000000000000000013771355416010500171570ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/io.h * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #ifndef __EROFS_IO_H #define __EROFS_IO_H #include #include "internal.h" #ifndef O_BINARY #define O_BINARY 0 #endif int dev_open(const char *devname); void dev_close(void); int dev_write(const void *buf, u64 offset, size_t len); int dev_fillzero(u64 offset, size_t len, bool padding); int dev_fsync(void); int dev_resize(erofs_blk_t nblocks); u64 dev_length(void); static inline int blk_write(const void *buf, erofs_blk_t blkaddr, u32 nblocks) { return dev_write(buf, blknr_to_addr(blkaddr), blknr_to_addr(nblocks)); } #endif erofs-utils-1.0/include/erofs/list.h000066400000000000000000000067061355416010500175240ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/list.h * * Copyright (C) 2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #ifndef __EROFS_LIST_HEAD_H #define __EROFS_LIST_HEAD_H #include "defs.h" struct list_head { struct list_head *prev; struct list_head *next; }; #define LIST_HEAD_INIT(name) \ { \ &(name), &(name) \ } #define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name) static inline void init_list_head(struct list_head *list) { list->prev = list; list->next = list; } static inline void __list_add(struct list_head *entry, struct list_head *prev, struct list_head *next) { entry->prev = prev; entry->next = next; prev->next = entry; next->prev = entry; } static inline void list_add(struct list_head *entry, struct list_head *head) { __list_add(entry, head, head->next); } static inline void list_add_tail(struct list_head *entry, struct list_head *head) { __list_add(entry, head->prev, head); } static inline void __list_del(struct list_head *prev, struct list_head *next) { prev->next = next; next->prev = prev; } static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->prev = entry->next = NULL; } static inline int list_empty(struct list_head *head) { return head->next == head; } #define list_entry(ptr, type, member) container_of(ptr, type, member) #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) #define list_last_entry(ptr, type, member) \ list_entry((ptr)->prev, type, member) #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) #define list_prev_entry(pos, member) \ list_entry((pos)->member.prev, typeof(*(pos)), member) #define list_for_each(pos, head) \ for (pos = (head)->next; pos != (head); pos = pos->next) #define list_for_each_safe(pos, n, head) \ for (pos = (head)->next, n = pos->next; pos != (head); \ pos = n, n = pos->next) #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ &pos->member != (head); \ pos = list_next_entry(pos, member)) #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ &pos->member != (head); \ pos = list_prev_entry(pos, member)) #define list_for_each_entry_from(pos, head, member) \ for (; &pos->member != (head); pos = list_next_entry(pos, member)) #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ n = list_next_entry(pos, member); \ &pos->member != (head); \ pos = n, n = list_next_entry(n, member)) #endif erofs-utils-1.0/include/erofs/print.h000066400000000000000000000027441355416010500177030ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs_utils/include/erofs/print.h * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #ifndef __EROFS_PRINT_H #define __EROFS_PRINT_H #include "config.h" #include enum { EROFS_MSG_MIN = 0, EROFS_ERR = 0, EROFS_WARN = 2, EROFS_INFO = 3, EROFS_DBG = 7, EROFS_MSG_MAX = 9 }; #define FUNC_LINE_FMT "%s() Line[%d] " #ifndef pr_fmt #define pr_fmt(fmt) "EROFS: " FUNC_LINE_FMT fmt "\n" #endif #define erofs_dbg(fmt, ...) do { \ if (cfg.c_dbg_lvl >= EROFS_DBG) { \ fprintf(stdout, \ pr_fmt(fmt), \ __func__, \ __LINE__, \ ##__VA_ARGS__); \ } \ } while (0) #define erofs_info(fmt, ...) do { \ if (cfg.c_dbg_lvl >= EROFS_INFO) { \ fprintf(stdout, \ pr_fmt(fmt), \ __func__, \ __LINE__, \ ##__VA_ARGS__); \ fflush(stdout); \ } \ } while (0) #define erofs_warn(fmt, ...) do { \ if (cfg.c_dbg_lvl >= EROFS_WARN) { \ fprintf(stdout, \ pr_fmt(fmt), \ __func__, \ __LINE__, \ ##__VA_ARGS__); \ fflush(stdout); \ } \ } while (0) #define erofs_err(fmt, ...) do { \ if (cfg.c_dbg_lvl >= EROFS_ERR) { \ fprintf(stderr, \ "Err: " pr_fmt(fmt), \ __func__, \ __LINE__, \ ##__VA_ARGS__); \ } \ } while (0) #define erofs_dump(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__) #endif erofs-utils-1.0/include/erofs/xattr.h000066400000000000000000000027201355416010500177030ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs_utils/include/erofs/xattr.h * * Originally contributed by an anonymous person, * heavily changed by Li Guifu * and Gao Xiang */ #ifndef __EROFS_XATTR_H #define __EROFS_XATTR_H #include "internal.h" #define EROFS_INODE_XATTR_ICOUNT(_size) ({\ u32 __size = le16_to_cpu(_size); \ ((__size) == 0) ? 0 : \ (_size - sizeof(struct erofs_xattr_ibody_header)) / \ sizeof(struct erofs_xattr_entry) + 1; }) #ifndef XATTR_USER_PREFIX #define XATTR_USER_PREFIX "user." #endif #ifndef XATTR_USER_PREFIX_LEN #define XATTR_USER_PREFIX_LEN (sizeof(XATTR_USER_PREFIX) - 1) #endif #ifndef XATTR_SECURITY_PREFIX #define XATTR_SECURITY_PREFIX "security." #endif #ifndef XATTR_SECURITY_PREFIX_LEN #define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1) #endif #ifndef XATTR_TRUSTED_PREFIX #define XATTR_TRUSTED_PREFIX "trusted." #endif #ifndef XATTR_TRUSTED_PREFIX_LEN #define XATTR_TRUSTED_PREFIX_LEN (sizeof(XATTR_TRUSTED_PREFIX) - 1) #endif #ifndef XATTR_NAME_POSIX_ACL_ACCESS #define XATTR_NAME_POSIX_ACL_ACCESS "system.posix_acl_access" #endif #ifndef XATTR_NAME_POSIX_ACL_DEFAULT #define XATTR_NAME_POSIX_ACL_DEFAULT "system.posix_acl_default" #endif int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs); char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size); int erofs_build_shared_xattrs_from_path(const char *path); #endif erofs-utils-1.0/include/erofs_fs.h000066400000000000000000000231611355416010500172330ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */ /* * erofs_utils/include/erofs_fs.h * EROFS (Enhanced ROM File System) on-disk format definition * * Copyright (C) 2017-2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #ifndef __EROFS_FS_H #define __EROFS_FS_H #define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EROFS_SUPER_OFFSET 1024 /* * Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should * be incompatible with this kernel version. */ #define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001 #define EROFS_ALL_FEATURE_INCOMPAT EROFS_FEATURE_INCOMPAT_LZ4_0PADDING /* 128-byte erofs on-disk super block */ struct erofs_super_block { __le32 magic; /* file system magic number */ __le32 checksum; /* crc32c(super_block) */ __le32 feature_compat; __u8 blkszbits; /* support block_size == PAGE_SIZE only */ __u8 reserved; __le16 root_nid; /* nid of root directory */ __le64 inos; /* total valid ino # (== f_files - f_favail) */ __le64 build_time; /* inode v1 time derivation */ __le32 build_time_nsec; /* inode v1 time derivation in nano scale */ __le32 blocks; /* used for statfs */ __le32 meta_blkaddr; /* start block address of metadata area */ __le32 xattr_blkaddr; /* start block address of shared xattr area */ __u8 uuid[16]; /* 128-bit uuid for volume */ __u8 volume_name[16]; /* volume name */ __le32 feature_incompat; __u8 reserved2[44]; }; /* * erofs inode datalayout (i_format in on-disk inode): * 0 - inode plain without inline data A: * inode, [xattrs], ... | ... | no-holed data * 1 - inode VLE compression B (legacy): * inode, [xattrs], extents ... | ... * 2 - inode plain with inline data C: * inode, [xattrs], last_inline_data, ... | ... | no-holed data * 3 - inode compression D: * inode, [xattrs], map_header, extents ... | ... * 4~7 - reserved */ enum { EROFS_INODE_FLAT_PLAIN = 0, EROFS_INODE_FLAT_COMPRESSION_LEGACY = 1, EROFS_INODE_FLAT_INLINE = 2, EROFS_INODE_FLAT_COMPRESSION = 3, EROFS_INODE_DATALAYOUT_MAX }; static inline bool erofs_inode_is_data_compressed(unsigned int datamode) { return datamode == EROFS_INODE_FLAT_COMPRESSION || datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; } /* bit definitions of inode i_advise */ #define EROFS_I_VERSION_BITS 1 #define EROFS_I_DATALAYOUT_BITS 3 #define EROFS_I_VERSION_BIT 0 #define EROFS_I_DATALAYOUT_BIT 1 /* 32-byte reduced form of an ondisk inode */ struct erofs_inode_compact { __le16 i_format; /* inode format hints */ /* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ __le16 i_xattr_icount; __le16 i_mode; __le16 i_nlink; __le32 i_size; __le32 i_reserved; union { /* file total compressed blocks for data mapping 1 */ __le32 compressed_blocks; __le32 raw_blkaddr; /* for device files, used to indicate old/new device # */ __le32 rdev; } i_u; __le32 i_ino; /* only used for 32-bit stat compatibility */ __le16 i_uid; __le16 i_gid; __le32 i_reserved2; }; /* 32 bytes on-disk inode */ #define EROFS_INODE_LAYOUT_COMPACT 0 /* 64 bytes on-disk inode */ #define EROFS_INODE_LAYOUT_EXTENDED 1 /* 64-byte complete form of an ondisk inode */ struct erofs_inode_extended { __le16 i_format; /* inode format hints */ /* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ __le16 i_xattr_icount; __le16 i_mode; __le16 i_reserved; __le64 i_size; union { /* file total compressed blocks for data mapping 1 */ __le32 compressed_blocks; __le32 raw_blkaddr; /* for device files, used to indicate old/new device # */ __le32 rdev; } i_u; /* only used for 32-bit stat compatibility */ __le32 i_ino; __le32 i_uid; __le32 i_gid; __le64 i_ctime; __le32 i_ctime_nsec; __le32 i_nlink; __u8 i_reserved2[16]; }; #define EROFS_MAX_SHARED_XATTRS (128) /* h_shared_count between 129 ... 255 are special # */ #define EROFS_SHARED_XATTR_EXTENT (255) /* * inline xattrs (n == i_xattr_icount): * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes * 12 bytes / \ * / \ * /-----------------------\ * | erofs_xattr_entries+ | * +-----------------------+ * inline xattrs must starts in erofs_xattr_ibody_header, * for read-only fs, no need to introduce h_refcount */ struct erofs_xattr_ibody_header { __le32 h_reserved; __u8 h_shared_count; __u8 h_reserved2[7]; __le32 h_shared_xattrs[0]; /* shared xattr id array */ }; /* Name indexes */ #define EROFS_XATTR_INDEX_USER 1 #define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2 #define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 #define EROFS_XATTR_INDEX_TRUSTED 4 #define EROFS_XATTR_INDEX_LUSTRE 5 #define EROFS_XATTR_INDEX_SECURITY 6 /* xattr entry (for both inline & shared xattrs) */ struct erofs_xattr_entry { __u8 e_name_len; /* length of name */ __u8 e_name_index; /* attribute name index */ __le16 e_value_size; /* size of attribute value */ /* followed by e_name and e_value */ char e_name[0]; /* attribute name */ }; static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount) { if (!i_xattr_icount) return 0; return sizeof(struct erofs_xattr_ibody_header) + sizeof(__u32) * (le16_to_cpu(i_xattr_icount) - 1); } #define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry)) static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e) { return EROFS_XATTR_ALIGN(sizeof(struct erofs_xattr_entry) + e->e_name_len + le16_to_cpu(e->e_value_size)); } /* available compression algorithm types (for h_algorithmtype) */ enum { Z_EROFS_COMPRESSION_LZ4 = 0, Z_EROFS_COMPRESSION_MAX }; /* * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) * e.g. for 4k logical cluster size, 4B if compacted 2B is off; * (4B) + 2B + (4B) if compacted 2B is on. */ #define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 #define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) struct z_erofs_map_header { __le32 h_reserved1; __le16 h_advise; /* * bit 0-3 : algorithm type of head 1 (logical cluster type 01); * bit 4-7 : algorithm type of head 2 (logical cluster type 11). */ __u8 h_algorithmtype; /* * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; * bit 3-4 : (physical - logical) cluster bits of head 1: * For example, if logical clustersize = 4096, 1 for 8192. * bit 5-7 : (physical - logical) cluster bits of head 2. */ __u8 h_clusterbits; }; #define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 /* * Fixed-sized output compression ondisk Logical Extent cluster type: * 0 - literal (uncompressed) cluster * 1 - compressed cluster (for the head logical cluster) * 2 - compressed cluster (for the other logical clusters) * * In detail, * 0 - literal (uncompressed) cluster, * di_advise = 0 * di_clusterofs = the literal data offset of the cluster * di_blkaddr = the blkaddr of the literal cluster * * 1 - compressed cluster (for the head logical cluster) * di_advise = 1 * di_clusterofs = the decompressed data offset of the cluster * di_blkaddr = the blkaddr of the compressed cluster * * 2 - compressed cluster (for the other logical clusters) * di_advise = 2 * di_clusterofs = * the decompressed data offset in its own head cluster * di_u.delta[0] = distance to its corresponding head cluster * di_u.delta[1] = distance to its corresponding tail cluster * (di_advise could be 0, 1 or 2) */ enum { Z_EROFS_VLE_CLUSTER_TYPE_PLAIN = 0, Z_EROFS_VLE_CLUSTER_TYPE_HEAD = 1, Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD = 2, Z_EROFS_VLE_CLUSTER_TYPE_RESERVED = 3, Z_EROFS_VLE_CLUSTER_TYPE_MAX }; #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 struct z_erofs_vle_decompressed_index { __le16 di_advise; /* where to decompress in the head cluster */ __le16 di_clusterofs; union { /* for the head cluster */ __le32 blkaddr; /* * for the rest clusters * eg. for 4k page-sized cluster, maximum 4K*64k = 256M) * [0] - pointing to the head cluster * [1] - pointing to the tail cluster */ __le16 delta[2]; } di_u; }; #define Z_EROFS_VLE_EXTENT_ALIGN(size) round_up(size, \ sizeof(struct z_erofs_vle_decompressed_index)) /* dirent sorts in alphabet order, thus we can do binary search */ struct erofs_dirent { __le64 nid; /* node number */ __le16 nameoff; /* start offset of file name */ __u8 file_type; /* file type */ __u8 reserved; /* reserved */ } __packed; /* file types used in inode_info->flags */ enum { EROFS_FT_UNKNOWN, EROFS_FT_REG_FILE, EROFS_FT_DIR, EROFS_FT_CHRDEV, EROFS_FT_BLKDEV, EROFS_FT_FIFO, EROFS_FT_SOCK, EROFS_FT_SYMLINK, EROFS_FT_MAX }; #define EROFS_NAME_LEN 255 /* check the EROFS on-disk layout strictly at compile time */ static inline void erofs_check_ondisk_layout_definitions(void) { BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); BUILD_BUG_ON(sizeof(struct erofs_inode_compact) != 32); BUILD_BUG_ON(sizeof(struct erofs_inode_extended) != 64); BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4); BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); } #endif erofs-utils-1.0/lib/000077500000000000000000000000001355416010500143745ustar00rootroot00000000000000erofs-utils-1.0/lib/Makefile.am000066400000000000000000000006161355416010500164330ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0+ # Makefile.am noinst_LTLIBRARIES = liberofs.la liberofs_la_SOURCES = config.c io.c cache.c inode.c xattr.c \ compress.c compressor.c liberofs_la_CFLAGS = -Wall -Werror -I$(top_srcdir)/include if ENABLE_LZ4 liberofs_la_CFLAGS += ${LZ4_CFLAGS} liberofs_la_SOURCES += compressor_lz4.c if ENABLE_LZ4HC liberofs_la_SOURCES += compressor_lz4hc.c endif endif erofs-utils-1.0/lib/cache.c000066400000000000000000000174061355416010500156130ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs_utils/lib/cache.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Miao Xie * with heavy changes by Gao Xiang */ #include #include #include "erofs/io.h" #include "erofs/print.h" static struct erofs_buffer_block blkh = { .list = LIST_HEAD_INIT(blkh.list), .blkaddr = NULL_ADDR, }; static erofs_blk_t tail_blkaddr; static bool erofs_bh_flush_drop_directly(struct erofs_buffer_head *bh) { return erofs_bh_flush_generic_end(bh); } struct erofs_bhops erofs_drop_directly_bhops = { .flush = erofs_bh_flush_drop_directly, }; static bool erofs_bh_flush_skip_write(struct erofs_buffer_head *bh) { return false; } struct erofs_bhops erofs_skip_write_bhops = { .flush = erofs_bh_flush_skip_write, }; int erofs_bh_flush_generic_write(struct erofs_buffer_head *bh, void *buf) { struct erofs_buffer_head *nbh = list_next_entry(bh, list); erofs_off_t offset = erofs_btell(bh, false); DBG_BUGON(nbh->off < bh->off); return dev_write(buf, offset, nbh->off - bh->off); } static bool erofs_bh_flush_buf_write(struct erofs_buffer_head *bh) { int err = erofs_bh_flush_generic_write(bh, bh->fsprivate); if (err) return false; free(bh->fsprivate); return erofs_bh_flush_generic_end(bh); } struct erofs_bhops erofs_buf_write_bhops = { .flush = erofs_bh_flush_buf_write, }; /* return buffer_head of erofs super block (with size 0) */ struct erofs_buffer_head *erofs_buffer_init(void) { struct erofs_buffer_head *bh = erofs_balloc(META, 0, 0, 0); if (IS_ERR(bh)) return bh; bh->op = &erofs_skip_write_bhops; return bh; } /* return occupied bytes in specific buffer block if succeed */ static int __erofs_battach(struct erofs_buffer_block *bb, struct erofs_buffer_head *bh, erofs_off_t incr, unsigned int alignsize, unsigned int extrasize, bool dryrun) { const erofs_off_t alignedoffset = roundup(bb->buffers.off, alignsize); const int oob = cmpsgn(roundup(bb->buffers.off % EROFS_BLKSIZ, alignsize) + incr + extrasize, EROFS_BLKSIZ); bool tailupdate = false; erofs_blk_t blkaddr; if (oob >= 0) { /* the next buffer block should be NULL_ADDR all the time */ if (oob && list_next_entry(bb, list)->blkaddr != NULL_ADDR) return -EINVAL; blkaddr = bb->blkaddr; if (blkaddr != NULL_ADDR) { tailupdate = (tail_blkaddr == blkaddr + BLK_ROUND_UP(bb->buffers.off)); if (oob && !tailupdate) return -EINVAL; } } if (!dryrun) { if (bh) { bh->off = alignedoffset; bh->block = bb; list_add_tail(&bh->list, &bb->buffers.list); } bb->buffers.off = alignedoffset + incr; /* need to update the tail_blkaddr */ if (tailupdate) tail_blkaddr = blkaddr + BLK_ROUND_UP(bb->buffers.off); } return (alignedoffset + incr) % EROFS_BLKSIZ; } int erofs_bh_balloon(struct erofs_buffer_head *bh, erofs_off_t incr) { struct erofs_buffer_block *const bb = bh->block; /* should be the tail bh in the corresponding buffer block */ if (bh->list.next != &bb->buffers.list) return -EINVAL; return __erofs_battach(bb, NULL, incr, 1, 0, false); } struct erofs_buffer_head *erofs_balloc(int type, erofs_off_t size, unsigned int required_ext, unsigned int inline_ext) { struct erofs_buffer_block *cur, *bb; struct erofs_buffer_head *bh; unsigned int alignsize, used0, usedmax; int ret = get_alignsize(type, &type); if (ret < 0) return ERR_PTR(ret); alignsize = ret; used0 = (size + required_ext) % EROFS_BLKSIZ + inline_ext; usedmax = 0; bb = NULL; list_for_each_entry(cur, &blkh.list, list) { unsigned int used_before, used; used_before = cur->buffers.off % EROFS_BLKSIZ; /* skip if buffer block is just full */ if (!used_before) continue; /* skip if the entry which has different type */ if (cur->type != type) continue; ret = __erofs_battach(cur, NULL, size, alignsize, required_ext + inline_ext, true); if (ret < 0) continue; used = (ret + required_ext) % EROFS_BLKSIZ + inline_ext; /* should contain inline data in current block */ if (used > EROFS_BLKSIZ) continue; /* * remaining should be smaller than before or * larger than allocating a new buffer block */ if (used < used_before && used < used0) continue; if (usedmax < used) { bb = cur; usedmax = used; } } if (bb) { bh = malloc(sizeof(struct erofs_buffer_head)); if (!bh) return ERR_PTR(-ENOMEM); goto found; } /* allocate a new buffer block */ if (used0 > EROFS_BLKSIZ) return ERR_PTR(-ENOSPC); bb = malloc(sizeof(struct erofs_buffer_block)); if (!bb) return ERR_PTR(-ENOMEM); bb->type = type; bb->blkaddr = NULL_ADDR; bb->buffers.off = 0; init_list_head(&bb->buffers.list); list_add_tail(&bb->list, &blkh.list); bh = malloc(sizeof(struct erofs_buffer_head)); if (!bh) { free(bb); return ERR_PTR(-ENOMEM); } found: ret = __erofs_battach(bb, bh, size, alignsize, required_ext + inline_ext, false); if (ret < 0) return ERR_PTR(ret); return bh; } struct erofs_buffer_head *erofs_battach(struct erofs_buffer_head *bh, int type, unsigned int size) { struct erofs_buffer_block *const bb = bh->block; struct erofs_buffer_head *nbh; unsigned int alignsize; int ret = get_alignsize(type, &type); if (ret < 0) return ERR_PTR(ret); alignsize = ret; /* should be the tail bh in the corresponding buffer block */ if (bh->list.next != &bb->buffers.list) return ERR_PTR(-EINVAL); nbh = malloc(sizeof(*nbh)); if (!nbh) return ERR_PTR(-ENOMEM); ret = __erofs_battach(bb, nbh, size, alignsize, 0, false); if (ret < 0) { free(nbh); return ERR_PTR(ret); } return nbh; } static erofs_blk_t __erofs_mapbh(struct erofs_buffer_block *bb) { erofs_blk_t blkaddr; if (bb->blkaddr == NULL_ADDR) bb->blkaddr = tail_blkaddr; blkaddr = bb->blkaddr + BLK_ROUND_UP(bb->buffers.off); if (blkaddr > tail_blkaddr) tail_blkaddr = blkaddr; return blkaddr; } erofs_blk_t erofs_mapbh(struct erofs_buffer_block *bb, bool end) { struct erofs_buffer_block *t, *nt; if (!bb || bb->blkaddr == NULL_ADDR) { list_for_each_entry_safe(t, nt, &blkh.list, list) { if (!end && (t == bb || nt == &blkh)) break; (void)__erofs_mapbh(t); if (end && t == bb) break; } } return tail_blkaddr; } bool erofs_bflush(struct erofs_buffer_block *bb) { struct erofs_buffer_block *p, *n; erofs_blk_t blkaddr; list_for_each_entry_safe(p, n, &blkh.list, list) { struct erofs_buffer_head *bh, *nbh; unsigned int padding; bool skip = false; if (p == bb) break; /* check if the buffer block can flush */ list_for_each_entry(bh, &p->buffers.list, list) if (bh->op->preflush && !bh->op->preflush(bh)) return false; blkaddr = __erofs_mapbh(p); list_for_each_entry_safe(bh, nbh, &p->buffers.list, list) { /* flush and remove bh */ if (!bh->op->flush(bh)) skip = true; } if (skip) continue; padding = EROFS_BLKSIZ - p->buffers.off % EROFS_BLKSIZ; if (padding != EROFS_BLKSIZ) dev_fillzero(blknr_to_addr(blkaddr) - padding, padding, true); DBG_BUGON(!list_empty(&p->buffers.list)); erofs_dbg("block %u to %u flushed", p->blkaddr, blkaddr - 1); list_del(&p->list); free(p); } return true; } void erofs_bdrop(struct erofs_buffer_head *bh, bool tryrevoke) { struct erofs_buffer_block *const bb = bh->block; const erofs_blk_t blkaddr = bh->block->blkaddr; bool rollback = false; /* tail_blkaddr could be rolled back after revoking all bhs */ if (tryrevoke && blkaddr != NULL_ADDR && tail_blkaddr == blkaddr + BLK_ROUND_UP(bb->buffers.off)) rollback = true; bh->op = &erofs_drop_directly_bhops; erofs_bh_flush_generic_end(bh); if (!list_empty(&bb->buffers.list)) return; list_del(&bb->list); free(bb); if (rollback) tail_blkaddr = blkaddr; } erofs-utils-1.0/lib/compress.c000066400000000000000000000333461355416010500164040ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs_utils/lib/compress.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Miao Xie * with heavy changes by Gao Xiang */ #define _LARGEFILE64_SOURCE #include #include #include #include "erofs/print.h" #include "erofs/io.h" #include "erofs/cache.h" #include "erofs/compress.h" #include "compressor.h" static struct erofs_compress compresshandle; static int compressionlevel; static struct z_erofs_map_header mapheader; struct z_erofs_vle_compress_ctx { u8 *metacur; u8 queue[EROFS_CONFIG_COMPR_MAX_SZ * 2]; unsigned int head, tail; erofs_blk_t blkaddr; /* pointing to the next blkaddr */ u16 clusterofs; }; #define Z_EROFS_LEGACY_MAP_HEADER_SIZE \ (sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) static unsigned int vle_compressmeta_capacity(erofs_off_t filesize) { const unsigned int indexsize = BLK_ROUND_UP(filesize) * sizeof(struct z_erofs_vle_decompressed_index); return Z_EROFS_LEGACY_MAP_HEADER_SIZE + indexsize; } static void vle_write_indexes_final(struct z_erofs_vle_compress_ctx *ctx) { const unsigned int type = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN; struct z_erofs_vle_decompressed_index di; if (!ctx->clusterofs) return; di.di_clusterofs = cpu_to_le16(ctx->clusterofs); di.di_u.blkaddr = 0; di.di_advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT); memcpy(ctx->metacur, &di, sizeof(di)); ctx->metacur += sizeof(di); } static void vle_write_indexes(struct z_erofs_vle_compress_ctx *ctx, unsigned int count, bool raw) { unsigned int clusterofs = ctx->clusterofs; unsigned int d0 = 0, d1 = (clusterofs + count) / EROFS_BLKSIZ; struct z_erofs_vle_decompressed_index di; unsigned int type; __le16 advise; di.di_clusterofs = cpu_to_le16(ctx->clusterofs); /* whether the tail-end (un)compressed block or not */ if (!d1) { type = raw ? Z_EROFS_VLE_CLUSTER_TYPE_PLAIN : Z_EROFS_VLE_CLUSTER_TYPE_HEAD; advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT); di.di_advise = advise; di.di_u.blkaddr = cpu_to_le32(ctx->blkaddr); memcpy(ctx->metacur, &di, sizeof(di)); ctx->metacur += sizeof(di); /* don't add the final index if the tail-end block exists */ ctx->clusterofs = 0; return; } do { if (d0) { type = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD; di.di_u.delta[0] = cpu_to_le16(d0); di.di_u.delta[1] = cpu_to_le16(d1); } else { type = raw ? Z_EROFS_VLE_CLUSTER_TYPE_PLAIN : Z_EROFS_VLE_CLUSTER_TYPE_HEAD; di.di_u.blkaddr = cpu_to_le32(ctx->blkaddr); } advise = cpu_to_le16(type << Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT); di.di_advise = advise; memcpy(ctx->metacur, &di, sizeof(di)); ctx->metacur += sizeof(di); count -= EROFS_BLKSIZ - clusterofs; clusterofs = 0; ++d0; --d1; } while (clusterofs + count >= EROFS_BLKSIZ); ctx->clusterofs = clusterofs + count; } static int write_uncompressed_block(struct z_erofs_vle_compress_ctx *ctx, unsigned int *len, char *dst) { int ret; unsigned int count; if (!(sbi.feature_incompat & EROFS_FEATURE_INCOMPAT_LZ4_0PADDING)) { /* fix up clusterofs to 0 if possable */ if (ctx->head >= ctx->clusterofs) { ctx->head -= ctx->clusterofs; *len += ctx->clusterofs; ctx->clusterofs = 0; } } /* write uncompressed data */ count = min(EROFS_BLKSIZ, *len); memcpy(dst, ctx->queue + ctx->head, count); memset(dst + count, 0, EROFS_BLKSIZ - count); erofs_dbg("Writing %u uncompressed data to block %u", count, ctx->blkaddr); ret = blk_write(dst, ctx->blkaddr, 1); if (ret) return ret; return count; } static int vle_compress_one(struct erofs_inode *inode, struct z_erofs_vle_compress_ctx *ctx, bool final) { struct erofs_compress *const h = &compresshandle; unsigned int len = ctx->tail - ctx->head; unsigned int count; int ret; static char dstbuf[EROFS_BLKSIZ * 2]; char *const dst = dstbuf + EROFS_BLKSIZ; while (len) { bool raw; if (len <= EROFS_BLKSIZ) { if (final) goto nocompression; break; } count = len; ret = erofs_compress_destsize(h, compressionlevel, ctx->queue + ctx->head, &count, dst, EROFS_BLKSIZ); if (ret <= 0) { if (ret != -EAGAIN) { erofs_err("failed to compress %s: %s", inode->i_srcpath, erofs_strerror(ret)); } nocompression: ret = write_uncompressed_block(ctx, &len, dst); if (ret < 0) return ret; count = ret; raw = true; } else { /* write compressed data */ erofs_dbg("Writing %u compressed data to block %u", count, ctx->blkaddr); if (sbi.feature_incompat & EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) ret = blk_write(dst - (EROFS_BLKSIZ - ret), ctx->blkaddr, 1); else ret = blk_write(dst, ctx->blkaddr, 1); if (ret) return ret; raw = false; } ctx->head += count; /* write compression indexes for this blkaddr */ vle_write_indexes(ctx, count, raw); ++ctx->blkaddr; len -= count; if (!final && ctx->head >= EROFS_CONFIG_COMPR_MAX_SZ) { const uint qh_aligned = round_down(ctx->head, EROFS_BLKSIZ); const uint qh_after = ctx->head - qh_aligned; memmove(ctx->queue, ctx->queue + qh_aligned, len + qh_after); ctx->head = qh_after; ctx->tail = qh_after + len; break; } } return 0; } struct z_erofs_compressindex_vec { union { erofs_blk_t blkaddr; u16 delta[2]; } u; u16 clusterofs; u8 clustertype; }; static void *parse_legacy_indexes(struct z_erofs_compressindex_vec *cv, unsigned int nr, void *metacur) { struct z_erofs_vle_decompressed_index *const db = metacur; unsigned int i; for (i = 0; i < nr; ++i, ++cv) { struct z_erofs_vle_decompressed_index *const di = db + i; const unsigned int advise = le16_to_cpu(di->di_advise); cv->clustertype = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); cv->clusterofs = le16_to_cpu(di->di_clusterofs); if (cv->clustertype == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { cv->u.delta[0] = le16_to_cpu(di->di_u.delta[0]); cv->u.delta[1] = le16_to_cpu(di->di_u.delta[1]); } else { cv->u.blkaddr = le32_to_cpu(di->di_u.blkaddr); } } return db + nr; } static void *write_compacted_indexes(u8 *out, struct z_erofs_compressindex_vec *cv, erofs_blk_t *blkaddr_ret, unsigned int destsize, unsigned int logical_clusterbits, bool final) { unsigned int vcnt, encodebits, pos, i; erofs_blk_t blkaddr; if (destsize == 4) { vcnt = 2; } else if (destsize == 2 && logical_clusterbits == 12) { vcnt = 16; } else { return ERR_PTR(-EINVAL); } encodebits = (vcnt * destsize * 8 - 32) / vcnt; blkaddr = *blkaddr_ret; pos = 0; for (i = 0; i < vcnt; ++i) { unsigned int offset, v; u8 ch, rem; if (cv[i].clustertype == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { if (i + 1 == vcnt) offset = cv[i].u.delta[1]; else offset = cv[i].u.delta[0]; } else { offset = cv[i].clusterofs; ++blkaddr; if (cv[i].u.blkaddr != blkaddr) { if (i + 1 != vcnt) DBG_BUGON(!final); DBG_BUGON(cv[i].u.blkaddr); } } v = (cv[i].clustertype << logical_clusterbits) | offset; rem = pos & 7; ch = out[pos / 8] & ((1 << rem) - 1); out[pos / 8] = (v << rem) | ch; out[pos / 8 + 1] = v >> (8 - rem); out[pos / 8 + 2] = v >> (16 - rem); pos += encodebits; } DBG_BUGON(destsize * vcnt * 8 != pos + 32); *(__le32 *)(out + destsize * vcnt - 4) = cpu_to_le32(*blkaddr_ret); *blkaddr_ret = blkaddr; return out + destsize * vcnt; } int z_erofs_convert_to_compacted_format(struct erofs_inode *inode, erofs_blk_t blkaddr, unsigned int legacymetasize, unsigned int logical_clusterbits) { const uint headerpos = Z_EROFS_VLE_EXTENT_ALIGN(inode->inode_isize + inode->xattr_isize) + sizeof(struct z_erofs_map_header); const uint totalidx = (legacymetasize - Z_EROFS_LEGACY_MAP_HEADER_SIZE) / 8; u8 *out, *in; struct z_erofs_compressindex_vec cv[16]; /* # of 8-byte units so that it can be aligned with 32 bytes */ unsigned int compacted_4b_initial, compacted_4b_end; unsigned int compacted_2b; if (logical_clusterbits < LOG_BLOCK_SIZE || LOG_BLOCK_SIZE < 12) return -EINVAL; if (logical_clusterbits > 14) /* currently not supported */ return -ENOTSUP; if (logical_clusterbits == 12) { compacted_4b_initial = (32 - headerpos % 32) / 4; if (compacted_4b_initial == 32 / 4) compacted_4b_initial = 0; if (compacted_4b_initial > totalidx) { compacted_4b_initial = compacted_2b = 0; compacted_4b_end = totalidx; } else { compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); compacted_4b_end = totalidx - compacted_4b_initial - compacted_2b; } } else { compacted_2b = compacted_4b_initial = 0; compacted_4b_end = totalidx; } out = in = inode->compressmeta; /* write out compacted header */ memcpy(out, &mapheader, sizeof(mapheader)); out += sizeof(mapheader); in += Z_EROFS_LEGACY_MAP_HEADER_SIZE; /* generate compacted_4b_initial */ while (compacted_4b_initial) { in = parse_legacy_indexes(cv, 2, in); out = write_compacted_indexes(out, cv, &blkaddr, 4, logical_clusterbits, false); compacted_4b_initial -= 2; } DBG_BUGON(compacted_4b_initial); /* generate compacted_2b */ while (compacted_2b) { in = parse_legacy_indexes(cv, 16, in); out = write_compacted_indexes(out, cv, &blkaddr, 2, logical_clusterbits, false); compacted_2b -= 16; } DBG_BUGON(compacted_2b); /* generate compacted_4b_end */ while (compacted_4b_end > 1) { in = parse_legacy_indexes(cv, 2, in); out = write_compacted_indexes(out, cv, &blkaddr, 4, logical_clusterbits, false); compacted_4b_end -= 2; } /* generate final compacted_4b_end if needed */ if (compacted_4b_end) { memset(cv, 0, sizeof(cv)); in = parse_legacy_indexes(cv, 1, in); out = write_compacted_indexes(out, cv, &blkaddr, 4, logical_clusterbits, true); } inode->extent_isize = out - (u8 *)inode->compressmeta; inode->datalayout = EROFS_INODE_FLAT_COMPRESSION; return 0; } int erofs_write_compressed_file(struct erofs_inode *inode) { struct erofs_buffer_head *bh; struct z_erofs_vle_compress_ctx ctx; erofs_off_t remaining; erofs_blk_t blkaddr, compressed_blocks; unsigned int legacymetasize; int ret, fd; u8 *compressmeta = malloc(vle_compressmeta_capacity(inode->i_size)); if (!compressmeta) return -ENOMEM; fd = open(inode->i_srcpath, O_RDONLY | O_BINARY); if (fd < 0) { ret = -errno; goto err_free; } /* allocate main data buffer */ bh = erofs_balloc(DATA, 0, 0, 0); if (IS_ERR(bh)) { ret = PTR_ERR(bh); goto err_close; } memset(compressmeta, 0, Z_EROFS_LEGACY_MAP_HEADER_SIZE); blkaddr = erofs_mapbh(bh->block, true); /* start_blkaddr */ ctx.blkaddr = blkaddr; ctx.metacur = compressmeta + Z_EROFS_LEGACY_MAP_HEADER_SIZE; ctx.head = ctx.tail = 0; ctx.clusterofs = 0; remaining = inode->i_size; while (remaining) { const u64 readcount = min_t(u64, remaining, sizeof(ctx.queue) - ctx.tail); ret = read(fd, ctx.queue + ctx.tail, readcount); if (ret != readcount) { ret = -errno; goto err_bdrop; } remaining -= readcount; ctx.tail += readcount; /* do one compress round */ ret = vle_compress_one(inode, &ctx, false); if (ret) goto err_bdrop; } /* do the final round */ ret = vle_compress_one(inode, &ctx, true); if (ret) goto err_bdrop; /* fall back to no compression mode */ compressed_blocks = ctx.blkaddr - blkaddr; if (compressed_blocks >= BLK_ROUND_UP(inode->i_size)) { ret = -ENOSPC; goto err_bdrop; } vle_write_indexes_final(&ctx); close(fd); ret = erofs_bh_balloon(bh, blknr_to_addr(compressed_blocks)); DBG_BUGON(ret); erofs_info("compressed %s (%llu bytes) into %u blocks", inode->i_srcpath, (unsigned long long)inode->i_size, compressed_blocks); /* * TODO: need to move erofs_bdrop to erofs_write_tail_end * when both mkfs & kernel support compression inline. */ erofs_bdrop(bh, false); inode->compressmeta = compressmeta; inode->idata_size = 0; inode->u.i_blocks = compressed_blocks; legacymetasize = ctx.metacur - compressmeta; if (cfg.c_legacy_compress) { inode->extent_isize = legacymetasize; inode->datalayout = EROFS_INODE_FLAT_COMPRESSION_LEGACY; } else { ret = z_erofs_convert_to_compacted_format(inode, blkaddr - 1, legacymetasize, 12); DBG_BUGON(ret); } return 0; err_bdrop: erofs_bdrop(bh, true); /* revoke buffer */ err_close: close(fd); err_free: free(compressmeta); return ret; } static int erofs_get_compress_algorithm_id(const char *name) { if (!strcmp(name, "lz4") || !strcmp(name, "lz4hc")) return Z_EROFS_COMPRESSION_LZ4; return -ENOTSUP; } int z_erofs_compress_init(void) { unsigned int algorithmtype[2]; /* initialize for primary compression algorithm */ int ret = erofs_compressor_init(&compresshandle, cfg.c_compr_alg_master); if (ret) return ret; /* * if primary algorithm is not lz4* (e.g. compression off), * clear LZ4_0PADDING feature for old kernel compatibility. */ if (!cfg.c_compr_alg_master || strncmp(cfg.c_compr_alg_master, "lz4", 3)) sbi.feature_incompat &= ~EROFS_FEATURE_INCOMPAT_LZ4_0PADDING; if (!cfg.c_compr_alg_master) return 0; compressionlevel = cfg.c_compr_level_master < 0 ? compresshandle.alg->default_level : cfg.c_compr_level_master; /* figure out mapheader */ ret = erofs_get_compress_algorithm_id(cfg.c_compr_alg_master); if (ret < 0) return ret; algorithmtype[0] = ret; /* primary algorithm (head 0) */ algorithmtype[1] = 0; /* secondary algorithm (head 1) */ mapheader.h_advise |= Z_EROFS_ADVISE_COMPACTED_2B; mapheader.h_algorithmtype = algorithmtype[1] << 4 | algorithmtype[0]; mapheader.h_clusterbits = LOG_BLOCK_SIZE - 12; return 0; } int z_erofs_compress_exit(void) { return erofs_compressor_exit(&compresshandle); } erofs-utils-1.0/lib/compressor.c000066400000000000000000000036651355416010500167460ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs-utils/lib/compressor.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #include "erofs/internal.h" #include "compressor.h" #include "erofs/print.h" #define EROFS_CONFIG_COMPR_DEF_BOUNDARY (128) static struct erofs_compressor *compressors[] = { #if LZ4_ENABLED #if LZ4HC_ENABLED &erofs_compressor_lz4hc, #endif &erofs_compressor_lz4, #endif }; int erofs_compress_destsize(struct erofs_compress *c, int compression_level, void *src, unsigned int *srcsize, void *dst, unsigned int dstsize) { int ret; DBG_BUGON(!c->alg); if (!c->alg->compress_destsize) return -ENOTSUP; ret = c->alg->compress_destsize(c, compression_level, src, srcsize, dst, dstsize); if (ret < 0) return ret; /* check if there is enough gains to compress */ if (*srcsize <= dstsize * c->compress_threshold / 100) return -EAGAIN; return ret; } const char *z_erofs_list_available_compressors(unsigned int i) { return i >= ARRAY_SIZE(compressors) ? NULL : compressors[i]->name; } int erofs_compressor_init(struct erofs_compress *c, char *alg_name) { int ret, i; /* should be written in "minimum compression ratio * 100" */ c->compress_threshold = 100; /* optimize for 4k size page */ c->destsize_alignsize = PAGE_SIZE; c->destsize_redzone_begin = PAGE_SIZE - 16; c->destsize_redzone_end = EROFS_CONFIG_COMPR_DEF_BOUNDARY; if (!alg_name) { c->alg = NULL; return 0; } ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(compressors); ++i) { if (alg_name && strcmp(alg_name, compressors[i]->name)) continue; ret = compressors[i]->init(c); if (!ret) { DBG_BUGON(!c->alg); return 0; } } erofs_err("Cannot find a valid compressor %s", alg_name); return ret; } int erofs_compressor_exit(struct erofs_compress *c) { if (c->alg && c->alg->exit) return c->alg->exit(c); return 0; } erofs-utils-1.0/lib/compressor.h000066400000000000000000000024711355416010500167450ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0+ */ /* * erofs-utils/lib/compressor.h * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #ifndef __EROFS_LIB_COMPRESSOR_H #define __EROFS_LIB_COMPRESSOR_H #include "erofs/defs.h" struct erofs_compress; struct erofs_compressor { const char *name; int default_level; int best_level; int (*init)(struct erofs_compress *c); int (*exit)(struct erofs_compress *c); int (*compress_destsize)(struct erofs_compress *c, int compress_level, void *src, unsigned int *srcsize, void *dst, unsigned int dstsize); }; struct erofs_compress { struct erofs_compressor *alg; unsigned int compress_threshold; /* *_destsize specific */ unsigned int destsize_alignsize; unsigned int destsize_redzone_begin; unsigned int destsize_redzone_end; void *private_data; }; /* list of compression algorithms */ extern struct erofs_compressor erofs_compressor_lz4; extern struct erofs_compressor erofs_compressor_lz4hc; int erofs_compress_destsize(struct erofs_compress *c, int compression_level, void *src, unsigned int *srcsize, void *dst, unsigned int dstsize); int erofs_compressor_init(struct erofs_compress *c, char *alg_name); int erofs_compressor_exit(struct erofs_compress *c); #endif erofs-utils-1.0/lib/compressor_lz4.c000066400000000000000000000017471355416010500175360ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs-utils/lib/compressor-lz4.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #include #include "erofs/internal.h" #include "compressor.h" static int lz4_compress_destsize(struct erofs_compress *c, int compression_level, void *src, unsigned int *srcsize, void *dst, unsigned int dstsize) { int srcSize = (int)*srcsize; int rc = LZ4_compress_destSize(src, dst, &srcSize, (int)dstsize); if (!rc) return -EFAULT; *srcsize = srcSize; return rc; } static int compressor_lz4_exit(struct erofs_compress *c) { return 0; } static int compressor_lz4_init(struct erofs_compress *c) { c->alg = &erofs_compressor_lz4; return 0; } struct erofs_compressor erofs_compressor_lz4 = { .name = "lz4", .default_level = 0, .best_level = 0, .init = compressor_lz4_init, .exit = compressor_lz4_exit, .compress_destsize = lz4_compress_destsize, }; erofs-utils-1.0/lib/compressor_lz4hc.c000066400000000000000000000024571355416010500200500ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs-utils/lib/compressor-lz4hc.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #define LZ4_HC_STATIC_LINKING_ONLY (1) #include #include "erofs/internal.h" #include "compressor.h" static int lz4hc_compress_destsize(struct erofs_compress *c, int compression_level, void *src, unsigned int *srcsize, void *dst, unsigned int dstsize) { int srcSize = (int)*srcsize; int rc = LZ4_compress_HC_destSize(c->private_data, src, dst, &srcSize, (int)dstsize, compression_level); if (!rc) return -EFAULT; *srcsize = srcSize; return rc; } static int compressor_lz4hc_exit(struct erofs_compress *c) { if (!c->private_data) return -EINVAL; LZ4_freeStreamHC(c->private_data); return 0; } static int compressor_lz4hc_init(struct erofs_compress *c) { c->alg = &erofs_compressor_lz4hc; c->private_data = LZ4_createStreamHC(); if (!c->private_data) return -ENOMEM; return 0; } struct erofs_compressor erofs_compressor_lz4hc = { .name = "lz4hc", .default_level = LZ4HC_CLEVEL_DEFAULT, .best_level = LZ4HC_CLEVEL_MAX, .init = compressor_lz4hc_init, .exit = compressor_lz4hc_exit, .compress_destsize = lz4hc_compress_destsize, }; erofs-utils-1.0/lib/config.c000066400000000000000000000015571355416010500160150ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs_utils/lib/config.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #include #include "erofs/print.h" #include "erofs/internal.h" struct erofs_configure cfg; void erofs_init_configure(void) { memset(&cfg, 0, sizeof(cfg)); cfg.c_dbg_lvl = 0; cfg.c_version = PACKAGE_VERSION; cfg.c_dry_run = false; cfg.c_compr_level_master = -1; cfg.c_force_inodeversion = 0; cfg.c_inline_xattr_tolerance = 2; cfg.c_unix_timestamp = -1; } void erofs_show_config(void) { const struct erofs_configure *c = &cfg; erofs_dump("\tc_version: [%8s]\n", c->c_version); erofs_dump("\tc_dbg_lvl: [%8d]\n", c->c_dbg_lvl); erofs_dump("\tc_dry_run: [%8d]\n", c->c_dry_run); } void erofs_exit_configure(void) { } erofs-utils-1.0/lib/inode.c000066400000000000000000000521521355416010500156430ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs_utils/lib/inode.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu * with heavy changes by Gao Xiang */ #define _GNU_SOURCE #include #include #include #include #include #include #include "erofs/print.h" #include "erofs/inode.h" #include "erofs/cache.h" #include "erofs/io.h" #include "erofs/compress.h" #include "erofs/xattr.h" struct erofs_sb_info sbi; #define S_SHIFT 12 static unsigned char erofs_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = EROFS_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = EROFS_FT_DIR, [S_IFCHR >> S_SHIFT] = EROFS_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = EROFS_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = EROFS_FT_FIFO, [S_IFSOCK >> S_SHIFT] = EROFS_FT_SOCK, [S_IFLNK >> S_SHIFT] = EROFS_FT_SYMLINK, }; #define NR_INODE_HASHTABLE 64 struct list_head inode_hashtable[NR_INODE_HASHTABLE]; void erofs_inode_manager_init(void) { unsigned int i; for (i = 0; i < NR_INODE_HASHTABLE; ++i) init_list_head(&inode_hashtable[i]); } static struct erofs_inode *erofs_igrab(struct erofs_inode *inode) { ++inode->i_count; return inode; } /* get the inode from the (source) inode # */ struct erofs_inode *erofs_iget(ino_t ino) { struct list_head *head = &inode_hashtable[ino % NR_INODE_HASHTABLE]; struct erofs_inode *inode; list_for_each_entry(inode, head, i_hash) if (inode->i_ino[1] == ino) return erofs_igrab(inode); return NULL; } struct erofs_inode *erofs_iget_by_nid(erofs_nid_t nid) { struct list_head *head = &inode_hashtable[nid % NR_INODE_HASHTABLE]; struct erofs_inode *inode; list_for_each_entry(inode, head, i_hash) if (inode->nid == nid) return erofs_igrab(inode); return NULL; } unsigned int erofs_iput(struct erofs_inode *inode) { struct erofs_dentry *d, *t; if (inode->i_count > 1) return --inode->i_count; list_for_each_entry_safe(d, t, &inode->i_subdirs, d_child) free(d); list_del(&inode->i_hash); free(inode); return 0; } static int dentry_add_sorted(struct erofs_dentry *d, struct list_head *head) { struct list_head *pos; list_for_each(pos, head) { struct erofs_dentry *d2 = container_of(pos, struct erofs_dentry, d_child); if (strcmp(d->name, d2->name) < 0) break; } list_add_tail(&d->d_child, pos); return 0; } struct erofs_dentry *erofs_d_alloc(struct erofs_inode *parent, const char *name) { struct erofs_dentry *d = malloc(sizeof(*d)); if (!d) return ERR_PTR(-ENOMEM); strncpy(d->name, name, EROFS_NAME_LEN - 1); d->name[EROFS_NAME_LEN - 1] = '\0'; dentry_add_sorted(d, &parent->i_subdirs); return d; } /* allocate main data for a inode */ static int __allocate_inode_bh_data(struct erofs_inode *inode, unsigned long nblocks) { struct erofs_buffer_head *bh; int ret; if (!nblocks) { /* it has only tail-end inlined data */ inode->u.i_blkaddr = NULL_ADDR; return 0; } /* allocate main data buffer */ bh = erofs_balloc(DATA, blknr_to_addr(nblocks), 0, 0); if (IS_ERR(bh)) return PTR_ERR(bh); bh->op = &erofs_skip_write_bhops; inode->bh_data = bh; /* get blkaddr of the bh */ ret = erofs_mapbh(bh->block, true); DBG_BUGON(ret < 0); /* write blocks except for the tail-end block */ inode->u.i_blkaddr = bh->block->blkaddr; return 0; } int erofs_prepare_dir_file(struct erofs_inode *dir) { struct erofs_dentry *d; unsigned int d_size; int ret; /* dot is pointed to the current dir inode */ d = erofs_d_alloc(dir, "."); d->inode = erofs_igrab(dir); d->type = EROFS_FT_DIR; /* dotdot is pointed to the parent dir */ d = erofs_d_alloc(dir, ".."); d->inode = erofs_igrab(dir->i_parent); d->type = EROFS_FT_DIR; /* let's calculate dir size */ d_size = 0; list_for_each_entry(d, &dir->i_subdirs, d_child) { int len = strlen(d->name) + sizeof(struct erofs_dirent); if (d_size % EROFS_BLKSIZ + len > EROFS_BLKSIZ) d_size = round_up(d_size, EROFS_BLKSIZ); d_size += len; } dir->i_size = d_size; /* no compression for all dirs */ dir->datalayout = EROFS_INODE_FLAT_INLINE; /* allocate dir main data */ ret = __allocate_inode_bh_data(dir, erofs_blknr(d_size)); if (ret) return ret; /* it will be used in erofs_prepare_inode_buffer */ dir->idata_size = d_size % EROFS_BLKSIZ; return 0; } static void fill_dirblock(char *buf, unsigned int size, unsigned int q, struct erofs_dentry *head, struct erofs_dentry *end) { unsigned int p = 0; /* write out all erofs_dirents + filenames */ while (head != end) { const unsigned int namelen = strlen(head->name); struct erofs_dirent d = { .nid = cpu_to_le64(head->nid), .nameoff = cpu_to_le16(q), .file_type = head->type, }; memcpy(buf + p, &d, sizeof(d)); memcpy(buf + q, head->name, namelen); p += sizeof(d); q += namelen; head = list_next_entry(head, d_child); } memset(buf + q, 0, size - q); } static int write_dirblock(unsigned int q, struct erofs_dentry *head, struct erofs_dentry *end, erofs_blk_t blkaddr) { char buf[EROFS_BLKSIZ]; fill_dirblock(buf, EROFS_BLKSIZ, q, head, end); return blk_write(buf, blkaddr, 1); } int erofs_write_dir_file(struct erofs_inode *dir) { struct erofs_dentry *head = list_first_entry(&dir->i_subdirs, struct erofs_dentry, d_child); struct erofs_dentry *d; int ret; unsigned int q, used, blkno; q = used = blkno = 0; list_for_each_entry(d, &dir->i_subdirs, d_child) { const unsigned int len = strlen(d->name) + sizeof(struct erofs_dirent); if (used + len > EROFS_BLKSIZ) { ret = write_dirblock(q, head, d, dir->u.i_blkaddr + blkno); if (ret) return ret; head = d; q = used = 0; ++blkno; } used += len; q += sizeof(struct erofs_dirent); } DBG_BUGON(used > EROFS_BLKSIZ); if (used == EROFS_BLKSIZ) { DBG_BUGON(dir->i_size % EROFS_BLKSIZ); DBG_BUGON(dir->idata_size); return write_dirblock(q, head, d, dir->u.i_blkaddr + blkno); } DBG_BUGON(used != dir->i_size % EROFS_BLKSIZ); if (used) { /* fill tail-end dir block */ dir->idata = malloc(used); DBG_BUGON(used != dir->idata_size); fill_dirblock(dir->idata, dir->idata_size, q, head, d); } return 0; } int erofs_write_file_from_buffer(struct erofs_inode *inode, char *buf) { const unsigned int nblocks = erofs_blknr(inode->i_size); int ret; inode->datalayout = EROFS_INODE_FLAT_INLINE; ret = __allocate_inode_bh_data(inode, nblocks); if (ret) return ret; if (nblocks) blk_write(buf, inode->u.i_blkaddr, nblocks); inode->idata_size = inode->i_size % EROFS_BLKSIZ; if (inode->idata_size) { inode->idata = malloc(inode->idata_size); memcpy(inode->idata, buf + blknr_to_addr(nblocks), inode->idata_size); } return 0; } /* rules to decide whether a file could be compressed or not */ static bool erofs_file_is_compressible(struct erofs_inode *inode) { return true; } int erofs_write_file(struct erofs_inode *inode) { unsigned int nblocks, i; int ret, fd; if (!inode->i_size) { inode->datalayout = EROFS_INODE_FLAT_PLAIN; return 0; } if (cfg.c_compr_alg_master && erofs_file_is_compressible(inode)) { ret = erofs_write_compressed_file(inode); if (!ret || ret != -ENOSPC) return ret; } /* fallback to all data uncompressed */ inode->datalayout = EROFS_INODE_FLAT_INLINE; nblocks = inode->i_size / EROFS_BLKSIZ; ret = __allocate_inode_bh_data(inode, nblocks); if (ret) return ret; fd = open(inode->i_srcpath, O_RDONLY | O_BINARY); if (fd < 0) return -errno; for (i = 0; i < nblocks; ++i) { char buf[EROFS_BLKSIZ]; ret = read(fd, buf, EROFS_BLKSIZ); if (ret != EROFS_BLKSIZ) { if (ret < 0) goto fail; close(fd); return -EAGAIN; } ret = blk_write(buf, inode->u.i_blkaddr + i, 1); if (ret) goto fail; } /* read the tail-end data */ inode->idata_size = inode->i_size % EROFS_BLKSIZ; if (inode->idata_size) { inode->idata = malloc(inode->idata_size); ret = read(fd, inode->idata, inode->idata_size); if (ret < inode->idata_size) { close(fd); return -EIO; } } close(fd); return 0; fail: ret = -errno; close(fd); return ret; } static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh) { struct erofs_inode *const inode = bh->fsprivate; const u16 icount = EROFS_INODE_XATTR_ICOUNT(inode->xattr_isize); erofs_off_t off = erofs_btell(bh, false); union { struct erofs_inode_compact dic; struct erofs_inode_extended die; } u = { {0}, }; int ret; switch (inode->inode_isize) { case sizeof(struct erofs_inode_compact): u.dic.i_format = cpu_to_le16(0 | (inode->datalayout << 1)); u.dic.i_xattr_icount = cpu_to_le16(icount); u.dic.i_mode = cpu_to_le16(inode->i_mode); u.dic.i_nlink = cpu_to_le16(inode->i_nlink); u.dic.i_size = cpu_to_le32((u32)inode->i_size); u.dic.i_ino = cpu_to_le32(inode->i_ino[0]); u.dic.i_uid = cpu_to_le16((u16)inode->i_uid); u.dic.i_gid = cpu_to_le16((u16)inode->i_gid); switch ((inode->i_mode) >> S_SHIFT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: u.dic.i_u.rdev = cpu_to_le32(inode->u.i_rdev); break; default: if (is_inode_layout_compression(inode)) u.dic.i_u.compressed_blocks = cpu_to_le32(inode->u.i_blocks); else u.dic.i_u.raw_blkaddr = cpu_to_le32(inode->u.i_blkaddr); break; } break; case sizeof(struct erofs_inode_extended): u.die.i_format = cpu_to_le16(1 | (inode->datalayout << 1)); u.die.i_xattr_icount = cpu_to_le16(icount); u.die.i_mode = cpu_to_le16(inode->i_mode); u.die.i_nlink = cpu_to_le32(inode->i_nlink); u.die.i_size = cpu_to_le64(inode->i_size); u.die.i_ino = cpu_to_le32(inode->i_ino[0]); u.die.i_uid = cpu_to_le16(inode->i_uid); u.die.i_gid = cpu_to_le16(inode->i_gid); u.die.i_ctime = cpu_to_le64(inode->i_ctime); u.die.i_ctime_nsec = cpu_to_le32(inode->i_ctime_nsec); switch ((inode->i_mode) >> S_SHIFT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: u.die.i_u.rdev = cpu_to_le32(inode->u.i_rdev); break; default: if (is_inode_layout_compression(inode)) u.die.i_u.compressed_blocks = cpu_to_le32(inode->u.i_blocks); else u.die.i_u.raw_blkaddr = cpu_to_le32(inode->u.i_blkaddr); break; } break; default: erofs_err("unsupported on-disk inode version of nid %llu", (unsigned long long)inode->nid); BUG_ON(1); } ret = dev_write(&u, off, inode->inode_isize); if (ret) return false; off += inode->inode_isize; if (inode->xattr_isize) { char *xattrs = erofs_export_xattr_ibody(&inode->i_xattrs, inode->xattr_isize); if (IS_ERR(xattrs)) return false; ret = dev_write(xattrs, off, inode->xattr_isize); free(xattrs); if (ret) return false; off += inode->xattr_isize; } if (inode->extent_isize) { /* write compression metadata */ off = Z_EROFS_VLE_EXTENT_ALIGN(off); ret = dev_write(inode->compressmeta, off, inode->extent_isize); if (ret) return false; free(inode->compressmeta); } inode->bh = NULL; erofs_iput(inode); return erofs_bh_flush_generic_end(bh); } static struct erofs_bhops erofs_write_inode_bhops = { .flush = erofs_bh_flush_write_inode, }; int erofs_prepare_tail_block(struct erofs_inode *inode) { struct erofs_buffer_head *bh; int ret; if (!inode->idata_size) return 0; bh = inode->bh_data; if (!bh) { bh = erofs_balloc(DATA, EROFS_BLKSIZ, 0, 0); if (IS_ERR(bh)) return PTR_ERR(bh); bh->op = &erofs_skip_write_bhops; /* get blkaddr of bh */ ret = erofs_mapbh(bh->block, true); DBG_BUGON(ret < 0); inode->u.i_blkaddr = bh->block->blkaddr; inode->bh_data = bh; return 0; } /* expend a block as the tail block (should be successful) */ ret = erofs_bh_balloon(bh, EROFS_BLKSIZ); DBG_BUGON(ret); return 0; } int erofs_prepare_inode_buffer(struct erofs_inode *inode) { unsigned int inodesize; struct erofs_buffer_head *bh, *ibh; DBG_BUGON(inode->bh || inode->bh_inline); inodesize = inode->inode_isize + inode->xattr_isize; if (inode->extent_isize) inodesize = Z_EROFS_VLE_EXTENT_ALIGN(inodesize) + inode->extent_isize; if (is_inode_layout_compression(inode)) goto noinline; /* * if the file size is block-aligned for uncompressed files, * should use EROFS_INODE_FLAT_PLAIN data mapping mode. */ if (!inode->idata_size) inode->datalayout = EROFS_INODE_FLAT_PLAIN; bh = erofs_balloc(INODE, inodesize, 0, inode->idata_size); if (bh == ERR_PTR(-ENOSPC)) { int ret; inode->datalayout = EROFS_INODE_FLAT_PLAIN; noinline: /* expend an extra block for tail-end data */ ret = erofs_prepare_tail_block(inode); if (ret) return ret; bh = erofs_balloc(INODE, inodesize, 0, 0); if (IS_ERR(bh)) return PTR_ERR(bh); DBG_BUGON(inode->bh_inline); } else if (IS_ERR(bh)) { return PTR_ERR(bh); } else if (inode->idata_size) { inode->datalayout = EROFS_INODE_FLAT_INLINE; /* allocate inline buffer */ ibh = erofs_battach(bh, META, inode->idata_size); if (IS_ERR(ibh)) return PTR_ERR(ibh); ibh->op = &erofs_skip_write_bhops; inode->bh_inline = ibh; } bh->fsprivate = erofs_igrab(inode); bh->op = &erofs_write_inode_bhops; inode->bh = bh; return 0; } static bool erofs_bh_flush_write_inline(struct erofs_buffer_head *bh) { struct erofs_inode *const inode = bh->fsprivate; const erofs_off_t off = erofs_btell(bh, false); int ret; ret = dev_write(inode->idata, off, inode->idata_size); if (ret) return false; inode->idata_size = 0; free(inode->idata); inode->idata = NULL; erofs_iput(inode); return erofs_bh_flush_generic_end(bh); } static struct erofs_bhops erofs_write_inline_bhops = { .flush = erofs_bh_flush_write_inline, }; int erofs_write_tail_end(struct erofs_inode *inode) { struct erofs_buffer_head *bh, *ibh; bh = inode->bh_data; if (!inode->idata_size) goto out; /* have enough room to inline data */ if (inode->bh_inline) { ibh = inode->bh_inline; ibh->fsprivate = erofs_igrab(inode); ibh->op = &erofs_write_inline_bhops; } else { int ret; erofs_off_t pos; erofs_mapbh(bh->block, true); pos = erofs_btell(bh, true) - EROFS_BLKSIZ; ret = dev_write(inode->idata, pos, inode->idata_size); if (ret) return ret; if (inode->idata_size < EROFS_BLKSIZ) { ret = dev_fillzero(pos + inode->idata_size, EROFS_BLKSIZ - inode->idata_size, false); if (ret) return ret; } inode->idata_size = 0; free(inode->idata); inode->idata = NULL; } out: /* now bh_data can drop directly */ if (bh) { /* * Don't leave DATA buffers which were written in the global * buffer list. It will make balloc() slowly. */ #if 0 bh->op = &erofs_drop_directly_bhops; #else erofs_bdrop(bh, false); #endif inode->bh_data = NULL; } return 0; } static bool erofs_should_use_inode_extended(struct erofs_inode *inode) { if (cfg.c_force_inodeversion == FORCE_INODE_EXTENDED) return true; if (inode->i_size > UINT_MAX) return true; if (inode->i_uid > USHRT_MAX) return true; if (inode->i_gid > USHRT_MAX) return true; if (inode->i_nlink > USHRT_MAX) return true; return false; } static u32 erofs_new_encode_dev(dev_t dev) { const unsigned int major = major(dev); const unsigned int minor = minor(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } int erofs_fill_inode(struct erofs_inode *inode, struct stat64 *st, const char *path) { inode->i_mode = st->st_mode; inode->i_uid = st->st_uid; inode->i_gid = st->st_gid; inode->i_ctime = sbi.build_time; inode->i_ctime_nsec = sbi.build_time_nsec; inode->i_nlink = 1; /* fix up later if needed */ switch (inode->i_mode & S_IFMT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: inode->u.i_rdev = erofs_new_encode_dev(st->st_rdev); case S_IFDIR: inode->i_size = 0; break; case S_IFREG: case S_IFLNK: inode->i_size = st->st_size; break; default: return -EINVAL; } strncpy(inode->i_srcpath, path, sizeof(inode->i_srcpath) - 1); inode->i_srcpath[sizeof(inode->i_srcpath) - 1] = '\0'; inode->i_ino[1] = st->st_ino; if (erofs_should_use_inode_extended(inode)) { if (cfg.c_force_inodeversion == FORCE_INODE_COMPACT) { erofs_err("file %s cannot be in compact form", inode->i_srcpath); return -EINVAL; } inode->inode_isize = sizeof(struct erofs_inode_extended); } else { inode->inode_isize = sizeof(struct erofs_inode_compact); } list_add(&inode->i_hash, &inode_hashtable[st->st_ino % NR_INODE_HASHTABLE]); return 0; } struct erofs_inode *erofs_new_inode(void) { static unsigned int counter; struct erofs_inode *inode; inode = malloc(sizeof(struct erofs_inode)); if (!inode) return ERR_PTR(-ENOMEM); inode->i_parent = NULL; /* also used to indicate a new inode */ inode->i_ino[0] = counter++; /* inode serial number */ inode->i_count = 1; init_list_head(&inode->i_subdirs); init_list_head(&inode->i_xattrs); inode->idata_size = 0; inode->xattr_isize = 0; inode->extent_isize = 0; inode->bh = inode->bh_inline = inode->bh_data = NULL; inode->idata = NULL; return inode; } /* get the inode from the (source) path */ struct erofs_inode *erofs_iget_from_path(const char *path, bool is_src) { struct stat64 st; struct erofs_inode *inode; int ret; /* currently, only source path is supported */ if (!is_src) return ERR_PTR(-EINVAL); ret = lstat64(path, &st); if (ret) return ERR_PTR(-errno); inode = erofs_iget(st.st_ino); if (inode) return inode; /* cannot find in the inode cache */ inode = erofs_new_inode(); if (IS_ERR(inode)) return inode; ret = erofs_fill_inode(inode, &st, path); if (ret) return ERR_PTR(ret); return inode; } void erofs_fixup_meta_blkaddr(struct erofs_inode *rootdir) { const erofs_off_t rootnid_maxoffset = 0xffff << EROFS_ISLOTBITS; struct erofs_buffer_head *const bh = rootdir->bh; erofs_off_t off, meta_offset; erofs_mapbh(bh->block, true); off = erofs_btell(bh, false); if (off > rootnid_maxoffset) meta_offset = round_up(off - rootnid_maxoffset, EROFS_BLKSIZ); else meta_offset = 0; sbi.meta_blkaddr = erofs_blknr(meta_offset); rootdir->nid = (off - meta_offset) >> EROFS_ISLOTBITS; } erofs_nid_t erofs_lookupnid(struct erofs_inode *inode) { struct erofs_buffer_head *const bh = inode->bh; erofs_off_t off, meta_offset; if (!bh) return inode->nid; erofs_mapbh(bh->block, true); off = erofs_btell(bh, false); meta_offset = blknr_to_addr(sbi.meta_blkaddr); DBG_BUGON(off < meta_offset); return inode->nid = (off - meta_offset) >> EROFS_ISLOTBITS; } void erofs_d_invalidate(struct erofs_dentry *d) { struct erofs_inode *const inode = d->inode; d->nid = erofs_lookupnid(inode); erofs_iput(inode); } struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir) { int ret; DIR *_dir; struct dirent *dp; struct erofs_dentry *d; ret = erofs_prepare_xattr_ibody(dir->i_srcpath, &dir->i_xattrs); if (ret < 0) return ERR_PTR(ret); dir->xattr_isize = ret; if (!S_ISDIR(dir->i_mode)) { if (S_ISLNK(dir->i_mode)) { char *const symlink = malloc(dir->i_size); ret = readlink(dir->i_srcpath, symlink, dir->i_size); if (ret < 0) return ERR_PTR(-errno); erofs_write_file_from_buffer(dir, symlink); free(symlink); } else { erofs_write_file(dir); } erofs_prepare_inode_buffer(dir); erofs_write_tail_end(dir); return dir; } _dir = opendir(dir->i_srcpath); if (!_dir) { erofs_err("%s, failed to opendir at %s: %s", __func__, dir->i_srcpath, erofs_strerror(errno)); return ERR_PTR(-errno); } while (1) { /* * set errno to 0 before calling readdir() in order to * distinguish end of stream and from an error. */ errno = 0; dp = readdir(_dir); if (!dp) break; if (is_dot_dotdot(dp->d_name) || !strncmp(dp->d_name, "lost+found", strlen("lost+found"))) continue; d = erofs_d_alloc(dir, dp->d_name); if (IS_ERR(d)) { ret = PTR_ERR(d); goto err_closedir; } } if (errno) { ret = -errno; goto err_closedir; } closedir(_dir); ret = erofs_prepare_dir_file(dir); if (ret) goto err_closedir; ret = erofs_prepare_inode_buffer(dir); if (ret) goto err_closedir; if (IS_ROOT(dir)) erofs_fixup_meta_blkaddr(dir); list_for_each_entry(d, &dir->i_subdirs, d_child) { char buf[PATH_MAX]; if (is_dot_dotdot(d->name)) { erofs_d_invalidate(d); continue; } ret = snprintf(buf, PATH_MAX, "%s/%s", dir->i_srcpath, d->name); if (ret < 0 || ret >= PATH_MAX) { /* ignore the too long path */ goto fail; } d->inode = erofs_mkfs_build_tree_from_path(dir, buf); if (IS_ERR(d->inode)) { fail: d->inode = NULL; d->type = EROFS_FT_UNKNOWN; continue; } d->type = erofs_type_by_mode[d->inode->i_mode >> S_SHIFT]; erofs_d_invalidate(d); erofs_info("add file %s/%s (nid %llu, type %d)", dir->i_srcpath, d->name, (unsigned long long)d->nid, d->type); } erofs_write_dir_file(dir); erofs_write_tail_end(dir); return dir; err_closedir: closedir(_dir); return ERR_PTR(ret); } struct erofs_inode *erofs_mkfs_build_tree_from_path(struct erofs_inode *parent, const char *path) { struct erofs_inode *const inode = erofs_iget_from_path(path, true); if (IS_ERR(inode)) return inode; /* a hardlink to the existed inode */ if (inode->i_parent) { ++inode->i_nlink; return inode; } /* a completely new inode is found */ if (parent) inode->i_parent = parent; else inode->i_parent = inode; /* rootdir mark */ return erofs_mkfs_build_tree(inode); } erofs-utils-1.0/lib/io.c000066400000000000000000000077521355416010500151620ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs_utils/lib/io.c * * Copyright (C) 2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #define _LARGEFILE64_SOURCE #define _GNU_SOURCE #include #include #include "erofs/io.h" #ifdef HAVE_LINUX_FS_H #include #endif #ifdef HAVE_LINUX_FALLOC_H #include #endif #define pr_fmt(fmt) "EROFS IO: " FUNC_LINE_FMT fmt "\n" #include "erofs/print.h" static const char *erofs_devname; static int erofs_devfd = -1; static u64 erofs_devsz; int dev_get_blkdev_size(int fd, u64 *bytes) { errno = ENOTSUP; #ifdef BLKGETSIZE64 if (ioctl(fd, BLKGETSIZE64, bytes) >= 0) return 0; #endif #ifdef BLKGETSIZE { unsigned long size; if (ioctl(fd, BLKGETSIZE, &size) >= 0) { *bytes = ((u64)size << 9); return 0; } } #endif return -errno; } void dev_close(void) { close(erofs_devfd); erofs_devname = NULL; erofs_devfd = -1; erofs_devsz = 0; } int dev_open(const char *dev) { struct stat st; int fd, ret; fd = open(dev, O_RDWR | O_CREAT | O_BINARY, 0644); if (fd < 0) { erofs_err("failed to open(%s).", dev); return -errno; } ret = fstat(fd, &st); if (ret) { erofs_err("failed to fstat(%s).", dev); close(fd); return -errno; } switch (st.st_mode & S_IFMT) { case S_IFBLK: ret = dev_get_blkdev_size(fd, &erofs_devsz); if (ret) { erofs_err("failed to get block device size(%s).", dev); close(fd); return ret; } erofs_devsz = round_down(erofs_devsz, EROFS_BLKSIZ); break; case S_IFREG: ret = ftruncate(fd, 0); if (ret) { erofs_err("failed to ftruncate(%s).", dev); close(fd); return -errno; } /* INT64_MAX is the limit of kernel vfs */ erofs_devsz = INT64_MAX; break; default: erofs_err("bad file type (%s, %o).", dev, st.st_mode); close(fd); return -EINVAL; } erofs_devname = dev; erofs_devfd = fd; erofs_info("successfully to open %s", dev); return 0; } u64 dev_length(void) { return erofs_devsz; } int dev_write(const void *buf, u64 offset, size_t len) { int ret; if (cfg.c_dry_run) return 0; if (!buf) { erofs_err("buf is NULL"); return -EINVAL; } if (offset >= erofs_devsz || len > erofs_devsz || offset > erofs_devsz - len) { erofs_err("Write posion[%" PRIu64 ", %zd] is too large beyond the end of device(%" PRIu64 ").", offset, len, erofs_devsz); return -EINVAL; } ret = pwrite64(erofs_devfd, buf, len, (off64_t)offset); if (ret != (int)len) { if (ret < 0) { erofs_err("Failed to write data into device - %s:[%" PRIu64 ", %zd].", erofs_devname, offset, len); return -errno; } erofs_err("Writing data into device - %s:[%" PRIu64 ", %zd] - was truncated.", erofs_devname, offset, len); return -ERANGE; } return 0; } int dev_fillzero(u64 offset, size_t len, bool padding) { static const char zero[EROFS_BLKSIZ] = {0}; int ret; if (cfg.c_dry_run) return 0; #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE) if (!padding && fallocate(erofs_devfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, len) >= 0) return 0; #endif while (len > EROFS_BLKSIZ) { ret = dev_write(zero, offset, EROFS_BLKSIZ); if (ret) return ret; len -= EROFS_BLKSIZ; offset += EROFS_BLKSIZ; } return dev_write(zero, offset, len); } int dev_fsync(void) { int ret; ret = fsync(erofs_devfd); if (ret) { erofs_err("Could not fsync device!!!"); return -EIO; } return 0; } int dev_resize(unsigned int blocks) { int ret; struct stat st; u64 length; if (cfg.c_dry_run || erofs_devsz != INT64_MAX) return 0; ret = fstat(erofs_devfd, &st); if (ret) { erofs_err("failed to fstat."); return -errno; } length = (u64)blocks * EROFS_BLKSIZ; if (st.st_size == length) return 0; if (st.st_size > length) return ftruncate(erofs_devfd, length); length = length - st.st_size; #if defined(HAVE_FALLOCATE) if (fallocate(erofs_devfd, 0, st.st_size, length) >= 0) return 0; #endif return dev_fillzero(st.st_size, length, true); } erofs-utils-1.0/lib/xattr.c000066400000000000000000000260531355416010500157100ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * erofs_utils/lib/xattr.c * * Originally contributed by an anonymous person, * heavily changed by Li Guifu * and Gao Xiang */ #define _GNU_SOURCE #include #include #ifdef HAVE_LINUX_XATTR_H #include #endif #include #include #include "erofs/print.h" #include "erofs/hashtable.h" #include "erofs/xattr.h" #include "erofs/cache.h" #define EA_HASHTABLE_BITS 16 struct xattr_item { const char *kvbuf; unsigned int hash[2], len[2], count; int shared_xattr_id; u8 prefix; struct hlist_node node; }; struct inode_xattr_node { struct list_head list; struct xattr_item *item; }; static DECLARE_HASHTABLE(ea_hashtable, EA_HASHTABLE_BITS); static LIST_HEAD(shared_xattrs_list); static unsigned int shared_xattrs_count, shared_xattrs_size; static struct xattr_prefix { const char *prefix; u16 prefix_len; } xattr_types[] = { [EROFS_XATTR_INDEX_USER] = { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN }, [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = { XATTR_NAME_POSIX_ACL_ACCESS, sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 }, [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = { XATTR_NAME_POSIX_ACL_DEFAULT, sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 }, [EROFS_XATTR_INDEX_TRUSTED] = { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }, [EROFS_XATTR_INDEX_SECURITY] = { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN } }; static unsigned int BKDRHash(char *str, unsigned int len) { const unsigned int seed = 131313; unsigned int hash = 0; while (len) { hash = hash * seed + (*str++); --len; } return hash; } static unsigned int xattr_item_hash(u8 prefix, char *buf, unsigned int len[2], unsigned int hash[2]) { hash[0] = BKDRHash(buf, len[0]); /* key */ hash[1] = BKDRHash(buf + len[0], len[1]); /* value */ return prefix ^ hash[0] ^ hash[1]; } static unsigned int put_xattritem(struct xattr_item *item) { if (item->count > 1) return --item->count; free(item); return 0; } static struct xattr_item *get_xattritem(u8 prefix, char *kvbuf, unsigned int len[2]) { struct xattr_item *item; unsigned int hash[2], hkey; hkey = xattr_item_hash(prefix, kvbuf, len, hash); hash_for_each_possible(ea_hashtable, item, node, hkey) { if (prefix == item->prefix && item->len[0] == len[0] && item->len[1] == len[1] && item->hash[0] == hash[0] && item->hash[1] == hash[1] && !memcmp(kvbuf, item->kvbuf, len[0] + len[1])) { free(kvbuf); ++item->count; return item; } } item = malloc(sizeof(*item)); if (!item) { free(kvbuf); return ERR_PTR(-ENOMEM); } INIT_HLIST_NODE(&item->node); item->count = 1; item->kvbuf = kvbuf; item->len[0] = len[0]; item->len[1] = len[1]; item->hash[0] = hash[0]; item->hash[1] = hash[1]; item->shared_xattr_id = -1; item->prefix = prefix; hash_add(ea_hashtable, &item->node, hkey); return item; } static bool match_prefix(const char *key, u8 *index, u16 *len) { struct xattr_prefix *p; for (p = xattr_types; p < xattr_types + ARRAY_SIZE(xattr_types); ++p) { if (p->prefix && !strncmp(p->prefix, key, p->prefix_len)) { *len = p->prefix_len; *index = p - xattr_types; return true; } } return false; } static struct xattr_item *parse_one_xattr(const char *path, const char *key, unsigned int keylen) { ssize_t ret; u8 prefix; u16 prefixlen; unsigned int len[2]; char *kvbuf; erofs_dbg("parse xattr [%s] of %s", path, key); if (!match_prefix(key, &prefix, &prefixlen)) return ERR_PTR(-ENODATA); DBG_BUGON(keylen < prefixlen); /* determine length of the value */ ret = lgetxattr(path, key, NULL, 0); if (ret < 0) return ERR_PTR(-errno); len[1] = ret; /* allocate key-value buffer */ len[0] = keylen - prefixlen; kvbuf = malloc(len[0] + len[1]); if (!kvbuf) return ERR_PTR(-ENOMEM); memcpy(kvbuf, key + prefixlen, len[0]); if (len[1]) { /* copy value to buffer */ ret = lgetxattr(path, key, kvbuf + len[0], len[1]); if (ret < 0) { free(kvbuf); return ERR_PTR(-errno); } if (len[1] != ret) { erofs_err("size of xattr value got changed just now (%u-> %ld)", len[1], (long)ret); len[1] = ret; } } return get_xattritem(prefix, kvbuf, len); } static int inode_xattr_add(struct list_head *hlist, struct xattr_item *item) { struct inode_xattr_node *node = malloc(sizeof(*node)); if (!node) return -ENOMEM; init_list_head(&node->list); node->item = item; list_add(&node->list, hlist); return 0; } static int shared_xattr_add(struct xattr_item *item) { struct inode_xattr_node *node = malloc(sizeof(*node)); if (!node) return -ENOMEM; init_list_head(&node->list); node->item = item; list_add(&node->list, &shared_xattrs_list); shared_xattrs_size += sizeof(struct erofs_xattr_entry); shared_xattrs_size = EROFS_XATTR_ALIGN(shared_xattrs_size + item->len[0] + item->len[1]); return ++shared_xattrs_count; } static int read_xattrs_from_file(const char *path, struct list_head *ixattrs) { int ret = 0; char *keylst, *key; ssize_t kllen = llistxattr(path, NULL, 0); if (kllen < 0 && errno != ENODATA) { erofs_err("llistxattr to get the size of names for %s failed", path); return -errno; } if (kllen <= 1) return 0; keylst = malloc(kllen); if (!keylst) return -ENOMEM; /* copy the list of attribute keys to the buffer.*/ kllen = llistxattr(path, keylst, kllen); if (kllen < 0) { erofs_err("llistxattr to get names for %s failed", path); ret = -errno; goto err; } /* * loop over the list of zero terminated strings with the * attribute keys. Use the remaining buffer length to determine * the end of the list. */ key = keylst; while (kllen > 0) { unsigned int keylen = strlen(key); struct xattr_item *item = parse_one_xattr(path, key, keylen); if (IS_ERR(item)) { ret = PTR_ERR(item); goto err; } if (ixattrs) { ret = inode_xattr_add(ixattrs, item); if (ret < 0) goto err; } else if (item->count == cfg.c_inline_xattr_tolerance + 1) { ret = shared_xattr_add(item); if (ret < 0) goto err; ret = 0; } kllen -= keylen + 1; key += keylen + 1; } err: free(keylst); return ret; } int erofs_prepare_xattr_ibody(const char *path, struct list_head *ixattrs) { int ret; struct inode_xattr_node *node; /* check if xattr is disabled */ if (cfg.c_inline_xattr_tolerance < 0) return 0; ret = read_xattrs_from_file(path, ixattrs); if (ret < 0) return ret; if (list_empty(ixattrs)) return 0; /* get xattr ibody size */ ret = sizeof(struct erofs_xattr_ibody_header); list_for_each_entry(node, ixattrs, list) { const struct xattr_item *item = node->item; if (item->shared_xattr_id >= 0) { ret += sizeof(__le32); continue; } ret += sizeof(struct erofs_xattr_entry); ret = EROFS_XATTR_ALIGN(ret + item->len[0] + item->len[1]); } return ret; } static int erofs_count_all_xattrs_from_path(const char *path) { int ret; DIR *_dir; struct stat64 st; _dir = opendir(path); if (!_dir) { erofs_err("%s, failed to opendir at %s: %s", __func__, path, erofs_strerror(errno)); return -errno; } ret = 0; while (1) { struct dirent *dp; char buf[PATH_MAX]; /* * set errno to 0 before calling readdir() in order to * distinguish end of stream and from an error. */ errno = 0; dp = readdir(_dir); if (!dp) break; if (is_dot_dotdot(dp->d_name) || !strncmp(dp->d_name, "lost+found", strlen("lost+found"))) continue; ret = snprintf(buf, PATH_MAX, "%s/%s", path, dp->d_name); if (ret < 0 || ret >= PATH_MAX) { /* ignore the too long path */ ret = -ENOMEM; goto fail; } ret = read_xattrs_from_file(buf, NULL); if (ret) goto fail; ret = lstat64(buf, &st); if (ret) { ret = -errno; goto fail; } if (!S_ISDIR(st.st_mode)) continue; ret = erofs_count_all_xattrs_from_path(buf); if (ret) goto fail; } if (errno) ret = -errno; fail: closedir(_dir); return ret; } static void erofs_cleanxattrs(bool sharedxattrs) { unsigned int i; struct xattr_item *item; hash_for_each(ea_hashtable, i, item, node) { if (sharedxattrs && item->shared_xattr_id >= 0) continue; hash_del(&item->node); free(item); } if (sharedxattrs) return; shared_xattrs_size = shared_xattrs_count = 0; } int erofs_build_shared_xattrs_from_path(const char *path) { int ret; struct erofs_buffer_head *bh; struct inode_xattr_node *node, *n; char *buf; unsigned int p; erofs_off_t off; /* check if xattr or shared xattr is disabled */ if (cfg.c_inline_xattr_tolerance < 0 || cfg.c_inline_xattr_tolerance == INT_MAX) return 0; if (shared_xattrs_size || shared_xattrs_count) { DBG_BUGON(1); return -EINVAL; } ret = erofs_count_all_xattrs_from_path(path); if (ret) return ret; if (!shared_xattrs_size) goto out; buf = malloc(shared_xattrs_size); if (!buf) return -ENOMEM; bh = erofs_balloc(XATTR, shared_xattrs_size, 0, 0); if (IS_ERR(bh)) { free(buf); return PTR_ERR(bh); } bh->op = &erofs_skip_write_bhops; erofs_mapbh(bh->block, true); off = erofs_btell(bh, false); sbi.xattr_blkaddr = off / EROFS_BLKSIZ; off %= EROFS_BLKSIZ; p = 0; list_for_each_entry_safe(node, n, &shared_xattrs_list, list) { struct xattr_item *const item = node->item; const struct erofs_xattr_entry entry = { .e_name_index = item->prefix, .e_name_len = item->len[0], .e_value_size = cpu_to_le16(item->len[1]) }; list_del(&node->list); item->shared_xattr_id = (off + p) / sizeof(struct erofs_xattr_entry); memcpy(buf + p, &entry, sizeof(entry)); p += sizeof(struct erofs_xattr_entry); memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]); p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]); free(node); } bh->fsprivate = buf; bh->op = &erofs_buf_write_bhops; out: erofs_cleanxattrs(true); return 0; } char *erofs_export_xattr_ibody(struct list_head *ixattrs, unsigned int size) { struct inode_xattr_node *node, *n; struct erofs_xattr_ibody_header *header; LIST_HEAD(ilst); unsigned int p; char *buf = calloc(1, size); if (!buf) return ERR_PTR(-ENOMEM); header = (struct erofs_xattr_ibody_header *)buf; header->h_shared_count = 0; p = sizeof(struct erofs_xattr_ibody_header); list_for_each_entry_safe(node, n, ixattrs, list) { struct xattr_item *const item = node->item; list_del(&node->list); /* move inline xattrs to the onstack list */ if (item->shared_xattr_id < 0) { list_add(&node->list, &ilst); continue; } *(__le32 *)(buf + p) = cpu_to_le32(item->shared_xattr_id); p += sizeof(__le32); ++header->h_shared_count; free(node); put_xattritem(item); } list_for_each_entry_safe(node, n, &ilst, list) { struct xattr_item *const item = node->item; const struct erofs_xattr_entry entry = { .e_name_index = item->prefix, .e_name_len = item->len[0], .e_value_size = cpu_to_le16(item->len[1]) }; memcpy(buf + p, &entry, sizeof(entry)); p += sizeof(struct erofs_xattr_entry); memcpy(buf + p, item->kvbuf, item->len[0] + item->len[1]); p = EROFS_XATTR_ALIGN(p + item->len[0] + item->len[1]); list_del(&node->list); free(node); put_xattritem(item); } DBG_BUGON(p > size); return buf; } erofs-utils-1.0/mkfs/000077500000000000000000000000001355416010500145665ustar00rootroot00000000000000erofs-utils-1.0/mkfs/Makefile.am000066400000000000000000000003661355416010500166270ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0+ # Makefile.am AUTOMAKE_OPTIONS = foreign bin_PROGRAMS = mkfs.erofs mkfs_erofs_SOURCES = main.c mkfs_erofs_CFLAGS = -Wall -Werror -I$(top_srcdir)/include mkfs_erofs_LDADD = $(top_builddir)/lib/liberofs.la erofs-utils-1.0/mkfs/main.c000066400000000000000000000173631355416010500156700ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0+ /* * mkfs/main.c * * Copyright (C) 2018-2019 HUAWEI, Inc. * http://www.huawei.com/ * Created by Li Guifu */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include "erofs/config.h" #include "erofs/print.h" #include "erofs/cache.h" #include "erofs/inode.h" #include "erofs/io.h" #include "erofs/compress.h" #include "erofs/xattr.h" #define EROFS_SUPER_END (EROFS_SUPER_OFFSET + sizeof(struct erofs_super_block)) static struct option long_options[] = { {"help", no_argument, 0, 1}, {0, 0, 0, 0}, }; static void print_available_compressors(FILE *f, const char *delim) { unsigned int i = 0; const char *s; while ((s = z_erofs_list_available_compressors(i)) != NULL) { if (i++) fputs(delim, f); fputs(s, f); } fputc('\n', f); } static void usage(void) { fputs("usage: [options] FILE DIRECTORY\n\n" "Generate erofs image from DIRECTORY to FILE, and [options] are:\n" " -zX[,Y] X=compressor (Y=compression level, optional)\n" " -d# set output message level to # (maximum 9)\n" " -x# set xattr tolerance to # (< 0, disable xattrs; default 2)\n" " -EX[,...] X=extended options\n" " -T# set a fixed UNIX timestamp # to all files\n" " --help display this help and exit\n" "\nAvailable compressors are: ", stderr); print_available_compressors(stderr, ", "); } static int parse_extended_opts(const char *opts) { #define MATCH_EXTENTED_OPT(opt, token, keylen) \ (keylen == sizeof(opt) - 1 && !memcmp(token, opt, sizeof(opt) - 1)) const char *token, *next, *tokenend, *value __maybe_unused; unsigned int keylen, vallen; value = NULL; for (token = opts; *token != '\0'; token = next) { const char *p = strchr(token, ','); next = NULL; if (p) next = p + 1; else { p = token + strlen(token); next = p; } tokenend = memchr(token, '=', p - token); if (tokenend) { keylen = tokenend - token; vallen = p - tokenend - 1; if (!vallen) return -EINVAL; value = tokenend + 1; } else { keylen = p - token; vallen = 0; } if (MATCH_EXTENTED_OPT("legacy-compress", token, keylen)) { if (vallen) return -EINVAL; /* disable compacted indexes and 0padding */ cfg.c_legacy_compress = true; sbi.feature_incompat &= ~EROFS_FEATURE_INCOMPAT_LZ4_0PADDING; } if (MATCH_EXTENTED_OPT("force-inode-compact", token, keylen)) { if (vallen) return -EINVAL; cfg.c_force_inodeversion = FORCE_INODE_COMPACT; } if (MATCH_EXTENTED_OPT("force-inode-extended", token, keylen)) { if (vallen) return -EINVAL; cfg.c_force_inodeversion = FORCE_INODE_EXTENDED; } } return 0; } static int mkfs_parse_options_cfg(int argc, char *argv[]) { char *endptr; int opt, i; while((opt = getopt_long(argc, argv, "d:x:z:E:T:", long_options, NULL)) != -1) { switch (opt) { case 'z': if (!optarg) { cfg.c_compr_alg_master = "(default)"; break; } /* get specified compression level */ for (i = 0; optarg[i] != '\0'; ++i) { if (optarg[i] == ',') { cfg.c_compr_level_master = atoi(optarg + i + 1); optarg[i] = '\0'; break; } } cfg.c_compr_alg_master = strndup(optarg, i); break; case 'd': i = atoi(optarg); if (i < EROFS_MSG_MIN || i > EROFS_MSG_MAX) { erofs_err("invalid debug level %d", i); return -EINVAL; } cfg.c_dbg_lvl = i; break; case 'x': i = strtol(optarg, &endptr, 0); if (*endptr != '\0') { erofs_err("invalid xattr tolerance %s", optarg); return -EINVAL; } cfg.c_inline_xattr_tolerance = i; break; case 'E': opt = parse_extended_opts(optarg); if (opt) return opt; break; case 'T': cfg.c_unix_timestamp = strtoull(optarg, &endptr, 0); if (cfg.c_unix_timestamp == -1 || *endptr != '\0') { erofs_err("invalid UNIX timestamp %s", optarg); return -EINVAL; } break; case 1: usage(); exit(0); default: /* '?' */ return -EINVAL; } } if (optind >= argc) return -EINVAL; cfg.c_img_path = strdup(argv[optind++]); if (!cfg.c_img_path) return -ENOMEM; if (optind >= argc) { erofs_err("Source directory is missing"); return -EINVAL; } cfg.c_src_path = realpath(argv[optind++], NULL); if (!cfg.c_src_path) { erofs_err("Failed to parse source directory: %s", erofs_strerror(-errno)); return -ENOENT; } if (optind < argc) { erofs_err("Unexpected argument: %s\n", argv[optind]); return -EINVAL; } return 0; } int erofs_mkfs_update_super_block(struct erofs_buffer_head *bh, erofs_nid_t root_nid, erofs_blk_t *blocks) { struct erofs_super_block sb = { .magic = cpu_to_le32(EROFS_SUPER_MAGIC_V1), .blkszbits = LOG_BLOCK_SIZE, .inos = 0, .build_time = cpu_to_le64(sbi.build_time), .build_time_nsec = cpu_to_le32(sbi.build_time_nsec), .blocks = 0, .meta_blkaddr = sbi.meta_blkaddr, .xattr_blkaddr = sbi.xattr_blkaddr, .feature_incompat = cpu_to_le32(sbi.feature_incompat), }; const unsigned int sb_blksize = round_up(EROFS_SUPER_END, EROFS_BLKSIZ); char *buf; *blocks = erofs_mapbh(NULL, true); sb.blocks = cpu_to_le32(*blocks); sb.root_nid = cpu_to_le16(root_nid); buf = calloc(sb_blksize, 1); if (!buf) { erofs_err("Failed to allocate memory for sb: %s", erofs_strerror(-errno)); return -ENOMEM; } memcpy(buf + EROFS_SUPER_OFFSET, &sb, sizeof(sb)); bh->fsprivate = buf; bh->op = &erofs_buf_write_bhops; return 0; } int main(int argc, char **argv) { int err = 0; struct erofs_buffer_head *sb_bh; struct erofs_inode *root_inode; erofs_nid_t root_nid; struct stat64 st; erofs_blk_t nblocks; struct timeval t; erofs_init_configure(); fprintf(stderr, "%s %s\n", basename(argv[0]), cfg.c_version); cfg.c_legacy_compress = false; sbi.feature_incompat = EROFS_FEATURE_INCOMPAT_LZ4_0PADDING; err = mkfs_parse_options_cfg(argc, argv); if (err) { if (err == -EINVAL) usage(); return 1; } err = lstat64(cfg.c_src_path, &st); if (err) return 1; if ((st.st_mode & S_IFMT) != S_IFDIR) { erofs_err("root of the filesystem is not a directory - %s", cfg.c_src_path); usage(); return 1; } if (cfg.c_unix_timestamp != -1) { sbi.build_time = cfg.c_unix_timestamp; sbi.build_time_nsec = 0; } else if (!gettimeofday(&t, NULL)) { sbi.build_time = t.tv_sec; sbi.build_time_nsec = t.tv_usec; } err = dev_open(cfg.c_img_path); if (err) { usage(); return 1; } erofs_show_config(); sb_bh = erofs_buffer_init(); if (IS_ERR(sb_bh)) { err = PTR_ERR(sb_bh); erofs_err("Failed to initialize buffers: %s", erofs_strerror(err)); goto exit; } err = erofs_bh_balloon(sb_bh, EROFS_SUPER_END); if (err < 0) { erofs_err("Failed to balloon erofs_super_block: %s", erofs_strerror(err)); goto exit; } err = z_erofs_compress_init(); if (err) { erofs_err("Failed to initialize compressor: %s", erofs_strerror(err)); goto exit; } erofs_inode_manager_init(); err = erofs_build_shared_xattrs_from_path(cfg.c_src_path); if (err) { erofs_err("Failed to build shared xattrs: %s", erofs_strerror(err)); goto exit; } root_inode = erofs_mkfs_build_tree_from_path(NULL, cfg.c_src_path); if (IS_ERR(root_inode)) { err = PTR_ERR(root_inode); goto exit; } root_nid = erofs_lookupnid(root_inode); erofs_iput(root_inode); err = erofs_mkfs_update_super_block(sb_bh, root_nid, &nblocks); if (err) goto exit; /* flush all remaining buffers */ if (!erofs_bflush(NULL)) err = -EIO; else err = dev_resize(nblocks); exit: z_erofs_compress_exit(); dev_close(); erofs_exit_configure(); if (err) { erofs_err("\tCould not format the device : %s\n", erofs_strerror(err)); return 1; } return 0; }